diff --git a/OpenOOD/.gitignore b/OpenOOD/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..216d8d0a58840c3c1dd294e6fb76c24bd776b80e --- /dev/null +++ b/OpenOOD/.gitignore @@ -0,0 +1,169 @@ +# ignore some temp/test files +_test_* +*-backup* + +# ignore data and output directory +data +data/ +results/ +checkpoints/ +ipynb_checkpoints/ + +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +.vs/ +wheels/ +pip-wheel-metadata/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/ +OpenOOD.wiki/ + +# PyBuilder +target/ + +# Jupyter Notebook +.ipynb +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +.python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# vscode debug +.vscode/ + +# macos files +.DS_Store + +# check format +.isort.cfg + +# no jupyter notebook +*.ipynb_checkpoints +*.ipynb + +# ignore custom config and scripts +config/*/_*/ +scripts/_*/ +tools/mytools/ + +# ignore pretrained bit model +bit_pretrained_models/ +group_config/ + +# local dev +local/ +*legacy* diff --git a/OpenOOD/.pre-commit-config.yaml b/OpenOOD/.pre-commit-config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e7ed56abe4363b10cc4a3e64fc20b877a6472940 --- /dev/null +++ b/OpenOOD/.pre-commit-config.yaml @@ -0,0 +1,32 @@ +exclude: ^tests/data/ +repos: + - repo: https://github.com/PyCQA/flake8.git + rev: 3.8.3 + hooks: + - id: flake8 + - repo: https://github.com/pre-commit/mirrors-yapf + rev: v0.30.0 + hooks: + - id: yapf + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v3.1.0 + hooks: + - id: trailing-whitespace + - id: check-yaml + - id: end-of-file-fixer + - id: double-quote-string-fixer + - id: check-merge-conflict + - id: fix-encoding-pragma + args: ["--remove"] + - id: mixed-line-ending + args: ["--fix=lf"] + - repo: https://github.com/codespell-project/codespell + rev: v2.1.0 + hooks: + - id: codespell + args: ["--ignore-words=codespell_ignored.txt"] + - repo: https://github.com/myint/docformatter + rev: v1.3.1 + hooks: + - id: docformatter + args: ["--in-place", "--wrap-descriptions", "79"] diff --git a/OpenOOD/CODE_OF_CONDUCT.md b/OpenOOD/CODE_OF_CONDUCT.md new file mode 100644 index 0000000000000000000000000000000000000000..6784a8d16675b5010cdf149ae632c3ee6b2e791b --- /dev/null +++ b/OpenOOD/CODE_OF_CONDUCT.md @@ -0,0 +1,128 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +We as members, contributors, and leaders pledge to make participation in our +community a harassment-free experience for everyone, regardless of age, body +size, visible or invisible disability, ethnicity, sex characteristics, gender +identity and expression, level of experience, education, socio-economic status, +nationality, personal appearance, race, religion, or sexual identity +and orientation. + +We pledge to act and interact in ways that contribute to an open, welcoming, +diverse, inclusive, and healthy community. + +## Our Standards + +Examples of behavior that contributes to a positive environment for our +community include: + +* Demonstrating empathy and kindness toward other people +* Being respectful of differing opinions, viewpoints, and experiences +* Giving and gracefully accepting constructive feedback +* Accepting responsibility and apologizing to those affected by our mistakes, + and learning from the experience +* Focusing on what is best not just for us as individuals, but for the + overall community + +Examples of unacceptable behavior include: + +* The use of sexualized language or imagery, and sexual attention or + advances of any kind +* Trolling, insulting or derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or email + address, without their explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Enforcement Responsibilities + +Community leaders are responsible for clarifying and enforcing our standards of +acceptable behavior and will take appropriate and fair corrective action in +response to any behavior that they deem inappropriate, threatening, offensive, +or harmful. + +Community leaders have the right and responsibility to remove, edit, or reject +comments, commits, code, wiki edits, issues, and other contributions that are +not aligned to this Code of Conduct, and will communicate reasons for moderation +decisions when appropriate. + +## Scope + +This Code of Conduct applies within all community spaces, and also applies when +an individual is officially representing the community in public spaces. +Examples of representing our community include using an official e-mail address, +posting via an official social media account, or acting as an appointed +representative at an online or offline event. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported to the community leaders responsible for enforcement at +yangjingkang001@gmail.com. +All complaints will be reviewed and investigated promptly and fairly. + +All community leaders are obligated to respect the privacy and security of the +reporter of any incident. + +## Enforcement Guidelines + +Community leaders will follow these Community Impact Guidelines in determining +the consequences for any action they deem in violation of this Code of Conduct: + +### 1. Correction + +**Community Impact**: Use of inappropriate language or other behavior deemed +unprofessional or unwelcome in the community. + +**Consequence**: A private, written warning from community leaders, providing +clarity around the nature of the violation and an explanation of why the +behavior was inappropriate. A public apology may be requested. + +### 2. Warning + +**Community Impact**: A violation through a single incident or series +of actions. + +**Consequence**: A warning with consequences for continued behavior. No +interaction with the people involved, including unsolicited interaction with +those enforcing the Code of Conduct, for a specified period of time. This +includes avoiding interactions in community spaces as well as external channels +like social media. Violating these terms may lead to a temporary or +permanent ban. + +### 3. Temporary Ban + +**Community Impact**: A serious violation of community standards, including +sustained inappropriate behavior. + +**Consequence**: A temporary ban from any sort of interaction or public +communication with the community for a specified period of time. No public or +private interaction with the people involved, including unsolicited interaction +with those enforcing the Code of Conduct, is allowed during this period. +Violating these terms may lead to a permanent ban. + +### 4. Permanent Ban + +**Community Impact**: Demonstrating a pattern of violation of community +standards, including sustained inappropriate behavior, harassment of an +individual, or aggression toward or disparagement of classes of individuals. + +**Consequence**: A permanent ban from any sort of public interaction within +the community. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], +version 2.0, available at +https://www.contributor-covenant.org/version/2/0/code_of_conduct.html. + +Community Impact Guidelines were inspired by [Mozilla's code of conduct +enforcement ladder](https://github.com/mozilla/diversity). + +[homepage]: https://www.contributor-covenant.org + +For answers to common questions about this code of conduct, see the FAQ at +https://www.contributor-covenant.org/faq. Translations are available at +https://www.contributor-covenant.org/translations. diff --git a/OpenOOD/CONTRIBUTING.md b/OpenOOD/CONTRIBUTING.md new file mode 100644 index 0000000000000000000000000000000000000000..1568a09285409b22052e428741e315520b8f91bc --- /dev/null +++ b/OpenOOD/CONTRIBUTING.md @@ -0,0 +1,68 @@ +## Contributing to OpenOOD + +All kinds of contributions are welcome, including but not limited to the following. + +- Integrate more methods under generalized OOD detection +- Fix typo or bugs +- Add new features and components + +### Workflow + +1. fork and pull the latest OpenOOD repository +2. checkout a new branch (do not use master branch for PRs) +3. commit your changes +4. create a PR + +```{note} +If you plan to add some new features that involve large changes, it is encouraged to open an issue for discussion first. +``` +### Code style + +#### Python + +We adopt [PEP8](https://www.python.org/dev/peps/pep-0008/) as the preferred code style. + +We use the following tools for linting and formatting: + +- [flake8](http://flake8.pycqa.org/en/latest/): A wrapper around some linter tools. +- [yapf](https://github.com/google/yapf): A formatter for Python files. +- [isort](https://github.com/timothycrosley/isort): A Python utility to sort imports. +- [markdownlint](https://github.com/markdownlint/markdownlint): A linter to check markdown files and flag style issues. +- [docformatter](https://github.com/myint/docformatter): A formatter to format docstring. + +Style configurations of yapf and isort can be found in [setup.cfg](./setup.cfg). + +We use [pre-commit hook](https://pre-commit.com/) that checks and formats for `flake8`, `yapf`, `isort`, `trailing whitespaces`, `markdown files`, +fixes `end-of-files`, `double-quoted-strings`, `python-encoding-pragma`, `mixed-line-ending`, sorts `requirments.txt` automatically on every commit. +The config for a pre-commit hook is stored in [.pre-commit-config](./.pre-commit-config.yaml). + +After you clone the repository, you will need to install initialize pre-commit hook. + +```shell +pip install -U pre-commit +``` + +From the repository folder + +```shell +pre-commit install +``` + +## Contributing to OpenOOD leaderboard + +We welcome new entries submitted to the leaderboard. Please follow the instructions below to submit your results. + +1. Evaluate your model/method with OpenOOD's benchmark and evaluator such that the comparison is fair. + +2. Report your new results by opening an issue. Remember to specify the following information: + +- **`Training`**: The training method of your model, e.g., `CrossEntropy`. +- **`Postprocessor`**: The postprocessor of your model, e.g., `MSP`, `ReAct`, etc. +- **`Near-OOD AUROC`**: The AUROC score of your model on the near-OOD split. +- **`Far-OOD AUROC`**: The AUROC score of your model on the far-OOD split. +- **`ID Accuracy`**: The accuracy of your model on the ID test data. +- **`Outlier Data`**: Whether your model uses the outlier data for training. +- **`Model Arch.`**: The architecture of your base classifier, e.g., `ResNet18`. +- **`Additional Description`**: Any additional description of your model, e.g., `100 epochs`, `torchvision pretrained`, etc. + +3. Ideally, send us a copy of your model checkpoint so that we can verify your results on our end. You can either upload the checkpoint to a cloud storage and share the link in the issue, or send us an email at [jz288@duke.edu](mailto:jz288@duke.edu). diff --git a/OpenOOD/LICENSE b/OpenOOD/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..606b40619e6b44313bb1c40d4e6c9c9f96623bd8 --- /dev/null +++ b/OpenOOD/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2021 Jingkang Yang + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/OpenOOD/README.md b/OpenOOD/README.md new file mode 100644 index 0000000000000000000000000000000000000000..8751acf24f928a737cd5c829b85dd0ab33469fd8 --- /dev/null +++ b/OpenOOD/README.md @@ -0,0 +1,367 @@ +# OpenOOD: Benchmarking Generalized OOD Detection + + + + +| :exclamation: When using OpenOOD in your research, it is vital to cite both the OpenOOD benchmark (versions 1 and 1.5) and the individual works that have contributed to your research. Accurate citation acknowledges the efforts and contributions of all researchers involved. For example, if your work involves the NINCO benchmark within OpenOOD, please include a citation for NINCO apart of OpenOOD.| +|-----------------------------------------| + + +[![paper](https://img.shields.io/badge/Paper-OpenReview%20(v1.0)-b31b1b?style=for-the-badge)](https://openreview.net/pdf?id=gT6j4_tskUt) +    +[![paper](https://img.shields.io/badge/PAPER-arXiv%20(v1.5)-yellowgreen?style=for-the-badge)](https://arxiv.org/abs/2306.09301) +    + + + +[![paper](https://img.shields.io/badge/leaderboard-35%2B%20Methods-228c22?style=for-the-badge)](https://zjysteven.github.io/OpenOOD/) +    +[![paper](https://img.shields.io/badge/colab-tutorial-orange?style=for-the-badge)](https://colab.research.google.com/drive/1tvTpCM1_ju82Yygu40fy7Lc0L1YrlkQF?usp=sharing) +    +[![paper](https://img.shields.io/badge/Forum-SLACK-797ef6?style=for-the-badge)](https://openood.slack.com/) + + + + + +This repository reproduces representative methods within the [`Generalized Out-of-Distribution Detection Framework`](https://arxiv.org/abs/2110.11334), +aiming to make a fair comparison across methods that were initially developed for anomaly detection, novelty detection, open set recognition, and out-of-distribution detection. +This codebase is still under construction. +Comments, issues, contributions, and collaborations are all welcomed! + +| ![timeline.jpg](https://live.staticflickr.com/65535/52144751937_95282e7de3_k.jpg) | +|:--:| +| Timeline of the methods that OpenOOD supports. More methods are included as OpenOOD iterates.| + + +## Updates +- **27 Oct, 2023**: A short version of OpenOOD `v1.5` is accepted to [NeurIPS 2023 Workshop on Distribution Shifts](https://sites.google.com/view/distshift2023/home?authuser=0) as an oral presentation. You may want to check out our [presentation slides](https://drive.google.com/file/d/1qlLQxWpYqFMwjgAHayV_ly2MSGbQ8b18/view?usp=drive_link) and [video recording](https://youtu.be/l58qYmY9NVw). +- **25 Sept, 2023**: OpenOOD now supports OOD detection with foundation models including zero-shot CLIP and DINOv2 linear probe. Check out the example evaluation script [here](https://github.com/Jingkang50/OpenOOD/blob/main/scripts/eval_ood_imagenet_foundation_models.py). +- **16 June, 2023**: :boom::boom: We are releasing OpenOOD `v1.5`, which includes the following exciting updates. A detailed changelog is provided in the [Wiki](https://github.com/Jingkang50/OpenOOD/wiki/OpenOOD-v1.5-change-log). An overview of the supported methods and benchmarks (with paper links) is available [here](https://github.com/Jingkang50/OpenOOD/wiki/OpenOOD-v1.5-methods-&-benchmarks-overview). + - A new [report](https://arxiv.org/abs/2306.09301) which provides benchmarking results on ImageNet and for full-spectrum detection. + - A unified, easy-to-use evaluator that allows evaluation by simply creating an evaluator instance and calling its functions. Check out this [colab tutorial](https://colab.research.google.com/drive/1tvTpCM1_ju82Yygu40fy7Lc0L1YrlkQF?usp=sharing)! + - A live [leaderboard](https://zjysteven.github.io/OpenOOD/) that tracks the state-of-the-art of this field. +- **14 October, 2022**: OpenOOD `v1.0` is accepted to NeurIPS 2022. Check the report [here](https://arxiv.org/abs/2210.07242). +- **14 June, 2022**: We release `v0.5`. +- **12 April, 2022**: Primary release to support [Full-Spectrum OOD Detection](https://arxiv.org/abs/2204.05306). + +## FAQ +- `APS_mode` means Automatic (hyper)Parameter Searching mode, which enables the model to validate all the hyperparameters in the sweep list based on the validation ID/OOD set. The default value is False. Check [here](https://github.com/Jingkang50/OpenOOD/blob/main/configs/postprocessors/dice.yml) for example. + + +## Get Started + +### v1.5 (up-to-date) +#### Installation +OpenOOD now supports installation via pip. +``` +pip install git+https://github.com/Jingkang50/OpenOOD +# optional, if you want to use CLIP +# pip install git+https://github.com/openai/CLIP.git +``` + +#### Data +If you only use our evaluator, the benchmarks for evaluation will be automatically downloaded by the evaluator (again check out this [tutorial](https://colab.research.google.com/drive/1tvTpCM1_ju82Yygu40fy7Lc0L1YrlkQF?usp=sharing)). If you would like to also use OpenOOD for training, you can get all data with our [downloading script](https://github.com/Jingkang50/OpenOOD/tree/main/scripts/download). Note that ImageNet-1K training images should be downloaded from its official website. + +#### Pre-trained checkpoints +OpenOOD v1.5 focuses on 4 ID datasets, and we release pre-trained models accordingly. +- CIFAR-10 [[Google Drive]](https://drive.google.com/file/d/1byGeYxM_PlLjT72wZsMQvP6popJeWBgt/view?usp=drive_link): ResNet-18 classifiers trained with cross-entropy loss from 3 training runs. +- CIFAR-100 [[Google Drive]](https://drive.google.com/file/d/1s-1oNrRtmA0pGefxXJOUVRYpaoAML0C-/view?usp=drive_link): ResNet-18 classifiers trained with cross-entropy loss from 3 training runs. +- ImageNet-200 [[Google Drive]](https://drive.google.com/file/d/1ddVmwc8zmzSjdLUO84EuV4Gz1c7vhIAs/view?usp=drive_link): ResNet-18 classifiers trained with cross-entropy loss from 3 training runs. +- ImageNet-1K [[Google Drive]](https://drive.google.com/file/d/15PdDMNRfnJ7f2oxW6lI-Ge4QJJH3Z0Fy/view?usp=drive_link): ResNet-50 classifiers including 1) the one from torchvision, 2) the ones that are trained by us with specific methods such as MOS, CIDER, and 3) the official checkpoints of data augmentation methods such as AugMix, PixMix. + +Again, these checkpoints can be downloaded with the downloading script [here](https://github.com/Jingkang50/OpenOOD/tree/main/scripts/download). + + +Our codebase accesses the datasets from `./data/` and pretrained models from `./results/checkpoints/` by default. +``` +├── ... +├── data +│ ├── benchmark_imglist +│ ├── images_classic +│ └── images_largescale +├── openood +├── results +│ ├── checkpoints +│ └── ... +├── scripts +├── main.py +├── ... +``` + +#### Training and evaluation scripts +We provide training and evaluation scripts for all the methods we support in [scripts folder](https://github.com/Jingkang50/OpenOOD/tree/main/scripts). + +--- +## Supported Benchmarks (10) +This part lists all the benchmarks we support. Feel free to include more. + + + +
+Anomaly Detection (1) + +> - [x] [MVTec-AD](https://www.mvtec.com/company/research/datasets/mvtec-ad) +
+ +
+Open Set Recognition (4) + +> - [x] [MNIST-4/6]() +> - [x] [CIFAR-4/6]() +> - [x] [CIFAR-40/60]() +> - [x] [TinyImageNet-20/180]() +
+ +
+Out-of-Distribution Detection (6) + +> - [x] [BIMCV (A COVID X-Ray Dataset)]() +> > Near-OOD: `CT-SCAN`, `X-Ray-Bone`;
+> > Far-OOD: `MNIST`, `CIFAR-10`, `Texture`, `Tiny-ImageNet`;
+> - [x] [MNIST]() +> > Near-OOD: `NotMNIST`, `FashionMNIST`;
+> > Far-OOD: `Texture`, `CIFAR-10`, `TinyImageNet`, `Places365`;
+> - [x] [CIFAR-10]() +> > Near-OOD: `CIFAR-100`, `TinyImageNet`;
+> > Far-OOD: `MNIST`, `SVHN`, `Texture`, `Places365`;
+> - [x] [CIFAR-100]() +> > Near-OOD: `CIFAR-10`, `TinyImageNet`;
+> > Far-OOD: `MNIST`, `SVHN`, `Texture`, `Places365`;
+> - [x] [ImageNet-200]() +> > Near-OOD: `SSB-hard`, `NINCO`;
+> > Far-OOD: `iNaturalist`, `Texture`, `OpenImage-O`;
+> > Covariate-Shifted ID: `ImageNet-C`, `ImageNet-R`, `ImageNet-v2`; +> - [x] [ImageNet-1K]() +> > Near-OOD: `SSB-hard`, `NINCO`;
+> > Far-OOD: `iNaturalist`, `Texture`, `OpenImage-O`;
+> > Covariate-Shifted ID: `ImageNet-C`, `ImageNet-R`, `ImageNet-v2`; +
+ +Note that OpenOOD v1.5 emphasizes and focuses on the last 4 benchmarks for OOD detection. + +--- +## Supported Backbones (6) +This part lists all the backbones we will support in our codebase, including CNN-based and Transformer-based models. Backbones like ResNet-50 and Transformer have ImageNet-1K/22K pretrained models. + +
+CNN-based Backbones (4) + +> - [x] [LeNet-5](http://yann.lecun.com/exdb/lenet/) +> - [x] [ResNet-18](https://openaccess.thecvf.com/content_cvpr_2016/html/He_Deep_Residual_Learning_CVPR_2016_paper.html) +> - [x] [WideResNet-28](https://arxiv.org/abs/1605.07146) +> - [x] [ResNet-50](https://openaccess.thecvf.com/content_cvpr_2016/html/He_Deep_Residual_Learning_CVPR_2016_paper.html) ([BiT](https://github.com/google-research/big_transfer)) +
+ + +
+Transformer-based Architectures (2) + +> - [x] [ViT](https://github.com/google-research/vision_transformer) ([DeiT](https://github.com/facebookresearch/deit)) +> - [x] [Swin Transformer](https://openaccess.thecvf.com/content/ICCV2021/html/Liu_Swin_Transformer_Hierarchical_Vision_Transformer_Using_Shifted_Windows_ICCV_2021_paper.html) +
+ +--- +## Supported Methods (50+) +This part lists all the methods we include in this codebase. Up to `v1.5`, we totally support **more than 50 popular methods** for generalized OOD detection. + +All the supported methodolgies can be placed in the following four categories. + +![density]   ![reconstruction]   ![classification]   ![distance] + +We also note our supported methodolgies with the following tags if they have special designs in the corresponding steps, compared to the standard classifier training process. + +![preprocess]   ![extradata]   ![training]   ![postprocess] + + + +
+Anomaly Detection (5) + +> - [x] [![](https://img.shields.io/badge/ICML'18-Deep–SVDD-f4d5b3?style=for-the-badge)](https://github.com/lukasruff/Deep-SVDD-PyTorch) ![training] ![postprocess] +> - [x] [![](https://img.shields.io/badge/arXiv'20-KDAD-f4d5b3?style=for-the-badge)]() +![training] ![postprocess] +> - [x] [![](https://img.shields.io/badge/CVPR'21-CutPaste-d0e9ff?style=for-the-badge)](https://github.com/lukasruff/Deep-SVDD-PyTorch) +![training] ![postprocess] +> - [x] [![](https://img.shields.io/badge/arXiv'21-PatchCore-f4d5b3?style=for-the-badge)](https://github.com/lukasruff/Deep-SVDD-PyTorch) ![training] ![postprocess] +> - [x] [![](https://img.shields.io/badge/ICCV'21-DRÆM-c2e2de?style=for-the-badge)](https://github.com/lukasruff/Deep-SVDD-PyTorch) ![training] ![postprocess] +
+ + +
+Open Set Recognition (3) + +> Post-Hoc Methods (1): +> - [x] [![](https://img.shields.io/badge/CVPR'16-OpenMax-d0e9ff?style=for-the-badge)](https://github.com/13952522076/Open-Set-Recognition) ![postprocess] +> - [x] [![](https://img.shields.io/badge/ICCV'21-OpenGAN-fdd7e6?style=for-the-badge)](https://github.com/aimerykong/OpenGAN/tree/main/utils) ![postprocess] + +> Training Methods (1): +> - [x] [![](https://img.shields.io/badge/TPAMI'21-ARPL-f4d5b3?style=for-the-badge)](https://github.com/iCGY96/ARPL) ![training] ![postprocess] +
+ + +
+Out-of-Distribution Detection (22) + + + +> Post-Hoc Methods (13): +> - [x] [![msp](https://img.shields.io/badge/ICLR'17-MSP-fdd7e6?style=for-the-badge)](https://openreview.net/forum?id=Hkg4TI9xl) +> - [x] [![odin](https://img.shields.io/badge/ICLR'18-ODIN-fdd7e6?style=for-the-badge)](https://openreview.net/forum?id=H1VGkIxRZ)    ![postprocess] +> - [x] [![mds](https://img.shields.io/badge/NeurIPS'18-MDS-f4d5b3?style=for-the-badge)](https://papers.nips.cc/paper/2018/hash/abdeb6f575ac5c6676b747bca8d09cc2-Abstract.html)    ![postprocess] +> - [x] [![mdsensemble](https://img.shields.io/badge/NeurIPS'18-MDSEns-f4d5b3?style=for-the-badge)](https://papers.nips.cc/paper/2018/hash/abdeb6f575ac5c6676b747bca8d09cc2-Abstract.html)    ![postprocess] +> - [x] [![gram](https://img.shields.io/badge/ICML'20-Gram-f4d5b3?style=for-the-badge)](https://github.com/VectorInstitute/gram-ood-detection)    ![postprocess] +> - [x] [![ebo](https://img.shields.io/badge/NeurIPS'20-EBO-d0e9ff?style=for-the-badge)](https://github.com/wetliu/energy_ood)    ![postprocess] +> - [x] [![rmds](https://img.shields.io/badge/ARXIV'21-RMDS-f4d5b3?style=for-the-badge)](https://arxiv.org/abs/2106.09022)    ![postprocess] +> - [x] [![gradnorm](https://img.shields.io/badge/NeurIPS'21-GradNorm-fdd7e6?style=for-the-badge)](https://github.com/deeplearning-wisc/gradnorm_ood)    ![postprocess] +> - [x] [![react](https://img.shields.io/badge/NeurIPS'21-ReAct-fdd7e6?style=for-the-badge)](https://github.com/deeplearning-wisc/react)    ![postprocess] +> - [x] [![mls](https://img.shields.io/badge/ICML'22-MLS-fdd7e6?style=for-the-badge)](https://github.com/hendrycks/anomaly-seg)    ![postprocess] +> - [x] [![klm](https://img.shields.io/badge/ICML'22-KL–Matching-fdd7e6?style=for-the-badge)](https://github.com/hendrycks/anomaly-seg)    ![postprocess] +> - [x] [![sem](https://img.shields.io/badge/arXiv'22-SEM-fdd7e6?style=for-the-badge)]()    ![postprocess] +> - [x] [![vim](https://img.shields.io/badge/CVPR'22-VIM-fdd7e6?style=for-the-badge)](https://ooddetection.github.io/)    ![postprocess] +> - [x] [![knn](https://img.shields.io/badge/ICML'22-KNN-fdd7e6?style=for-the-badge)](https://github.com/deeplearning-wisc/knn-ood)    ![postprocess] +> - [x] [![dice](https://img.shields.io/badge/ECCV'22-DICE-d0e9ff?style=for-the-badge)](https://github.com/deeplearning-wisc/dice)    ![postprocess] +> - [x] [![rankfeat](https://img.shields.io/badge/NEURIPS'22-RANKFEAT-fdd7e6?style=for-the-badge)](https://github.com/KingJamesSong/RankFeat)    ![postprocess] +> - [x] [![ash](https://img.shields.io/badge/ICLR'23-ASH-fdd7e6?style=for-the-badge)](https://andrijazz.github.io/ash)    ![postprocess] +> - [x] [![she](https://img.shields.io/badge/ICLR'23-SHE-fdd7e6?style=for-the-badge)](https://github.com/zjs975584714/SHE)    ![postprocess] +> - [x] [![gen](https://img.shields.io/badge/CVPR'23-GEN-fdd7e6?style=for-the-badge)](https://openaccess.thecvf.com/content/CVPR2023/papers/Liu_GEN_Pushing_the_Limits_of_Softmax-Based_Out-of-Distribution_Detection_CVPR_2023_paper.pdf)    ![postprocess] +> - [x] [![nnguide](https://img.shields.io/badge/ICCV'23-NNGuide-fdd7e6?style=for-the-badge)](https://arxiv.org/abs/2309.14888)    ![postprocess] +> - [x] [![relation](https://img.shields.io/badge/NEURIPS'23-Relation-fdd7e6?style=for-the-badge)](https://arxiv.org/abs/2301.12321)    ![postprocess] +> - [x] [![scale](https://img.shields.io/badge/ICLR'24-Scale-fdd7e6?style=for-the-badge)](https://github.com/kai422/SCALE)    ![postprocess] + +> Training Methods (6): +> - [x] [![confbranch](https://img.shields.io/badge/arXiv'18-ConfBranch-fdd7e6?style=for-the-badge)](https://github.com/uoguelph-mlrg/confidence_estimation)    ![preprocess]   ![training] +> - [x] [![rotpred](https://img.shields.io/badge/neurips'19-RotPred-fdd7e6?style=for-the-badge)](https://github.com/hendrycks/ss-ood)    ![preprocess]   ![training] +> - [x] [![godin](https://img.shields.io/badge/CVPR'20-G–ODIN-fdd7e6?style=for-the-badge)](https://github.com/guyera/Generalized-ODIN-Implementation)    ![training]   ![postprocess] +> - [x] [![csi](https://img.shields.io/badge/NeurIPS'20-CSI-fdd7e6?style=for-the-badge)](https://github.com/alinlab/CSI)    ![preprocess]   ![training]   ![postprocess] +> - [x] [![ssd](https://img.shields.io/badge/ICLR'21-SSD-fdd7e6?style=for-the-badge)](https://github.com/inspire-group/SSD)    ![training]   ![postprocess] +> - [x] [![mos](https://img.shields.io/badge/CVPR'21-MOS-fdd7e6?style=for-the-badge)](https://github.com/deeplearning-wisc/large_scale_ood)    ![training] +> - [x] [![vos](https://img.shields.io/badge/ICLR'22-VOS-d0e9ff?style=for-the-badge)](https://github.com/deeplearning-wisc/vos)    ![training]   ![postprocess] +> - [x] [![logitnorm](https://img.shields.io/badge/ICML'22-LogitNorm-fdd7e6?style=for-the-badge)](https://github.com/hongxin001/logitnorm_ood)    ![training]   ![preprocess] +> - [x] [![cider](https://img.shields.io/badge/ICLR'23-CIDER-f4d5b3?style=for-the-badge)](https://github.com/deeplearning-wisc/cider)    ![training]   ![postprocess] +> - [x] [![npos](https://img.shields.io/badge/ICLR'23-NPOS-f4d5b3?style=for-the-badge)](https://github.com/deeplearning-wisc/npos)    ![training]   ![postprocess] +> - [x] [![t2fnorm](https://img.shields.io/badge/arXiv'23-T2FNorm-f4d5b3?style=for-the-badge)](https://arxiv.org/abs/2305.17797)    ![training] +> - [x] [![ish](https://img.shields.io/badge/ICLR'24-ish-fdd7e6?style=for-the-badge)](https://github.com/kai422/SCALE)    ![training] + + +> Training With Extra Data (3): +> - [x] [![oe](https://img.shields.io/badge/ICLR'19-OE-fdd7e6?style=for-the-badge)](https://openreview.net/forum?id=HyxCxhRcY7)    ![extradata]   ![training] +> - [x] [![mcd](https://img.shields.io/badge/ICCV'19-MCD-fdd7e6?style=for-the-badge)](https://openaccess.thecvf.com/content_ICCV_2019/papers/Yu_Unsupervised_Out-of-Distribution_Detection_by_Maximum_Classifier_Discrepancy_ICCV_2019_paper.pdf)    ![extradata]   ![training] +> - [x] [![udg](https://img.shields.io/badge/ICCV'21-UDG-fdd7e6?style=for-the-badge)](https://openaccess.thecvf.com/content/ICCV2021/html/Yang_Semantically_Coherent_Out-of-Distribution_Detection_ICCV_2021_paper.html)    ![extradata]   ![training] +> - [x] [![mixoe](https://img.shields.io/badge/WACV'23-MixOE-fdd7e6?style=for-the-badge)](https://openaccess.thecvf.com/content/WACV2023/html/Zhang_Mixture_Outlier_Exposure_Towards_Out-of-Distribution_Detection_in_Fine-Grained_Environments_WACV_2023_paper.html)    ![extradata]   ![training] +
+ + +
+Method Uncertainty (4) + +> - [x] [![mcdropout](https://img.shields.io/badge/ICML'16-MC–Dropout-fdd7e6?style=for-the-badge)]()    ![training]   ![postprocess] +> - [x] [![deepensemble](https://img.shields.io/badge/NeurIPS'17-Deep–Ensemble-fdd7e6?style=for-the-badge)]()    ![training] +> - [x] [![tempscale](https://img.shields.io/badge/ICML'17-Temp–Scaling-fdd7e6?style=for-the-badge)](https://proceedings.mlr.press/v70/guo17a.html)    ![postprocess] +> - [x] [![rts](https://img.shields.io/badge/AAAI'23-RTS-fdd7e6?style=for-the-badge)]()    ![training]   ![postprocess] +
+ + +
+Data Augmentation (3) + +> - [x] [![mixup](https://img.shields.io/badge/ICLR'18-Mixup-fdd7e6?style=for-the-badge)]()    ![preprocess] +> - [x] [![cutmix](https://img.shields.io/badge/ICCV'19-CutMix-fdd7e6?style=for-the-badge)]()    ![preprocess] +> - [x] [![styleaugment](https://img.shields.io/badge/ICLR'19-StyleAugment-fdd7e6?style=for-the-badge)](https://openreview.net/forum?id=Bygh9j09KX)    ![preprocess] +> - [x] [![randaugment](https://img.shields.io/badge/CVPRW'20-RandAugment-fdd7e6?style=for-the-badge)](https://openaccess.thecvf.com/content_CVPRW_2020/html/w40/Cubuk_Randaugment_Practical_Automated_Data_Augmentation_With_a_Reduced_Search_Space_CVPRW_2020_paper.html)    ![preprocess] +> - [x] [![augmix](https://img.shields.io/badge/ICLR'20-AugMix-fdd7e6?style=for-the-badge)](https://github.com/google-research/augmix)    ![preprocess] +> - [x] [![deepaugment](https://img.shields.io/badge/ICCV'21-DeepAugment-fdd7e6?style=for-the-badge)](https://github.com/hendrycks/imagenet-r)    ![preprocess] +> - [x] [![pixmix](https://img.shields.io/badge/CVPR'21-PixMix-fdd7e6?style=for-the-badge)](https://openaccess.thecvf.com/content/CVPR2022/html/Hendrycks_PixMix_Dreamlike_Pictures_Comprehensively_Improve_Safety_Measures_CVPR_2022_paper.html)    ![preprocess] +> - [x] [![regmixup](https://img.shields.io/badge/ICLR'23-RegMixup-fdd7e6?style=for-the-badge)](https://github.com/FrancescoPinto/RegMixup)    ![preprocess] +
+ +--- +## Contributing +We appreciate all contributions to improve OpenOOD. +We sincerely welcome community users to participate in these projects. Please refer to [CONTRIBUTING.md](https://github.com/Jingkang50/OpenOOD/blob/main/CONTRIBUTING.md) for the contributing guideline. + +## Contributors + + + + + +## Citation +If you find our repository useful for your research, please consider citing our paper: +```bibtex +# v1.5 report +@article{zhang2023openood, + title={OpenOOD v1.5: Enhanced Benchmark for Out-of-Distribution Detection}, + author={Zhang, Jingyang and Yang, Jingkang and Wang, Pengyun and Wang, Haoqi and Lin, Yueqian and Zhang, Haoran and Sun, Yiyou and Du, Xuefeng and Zhou, Kaiyang and Zhang, Wayne and Li, Yixuan and Liu, Ziwei and Chen, Yiran and Li, Hai}, + journal={arXiv preprint arXiv:2306.09301}, + year={2023} +} + +# v1.0 report +@article{yang2022openood, + author = {Yang, Jingkang and Wang, Pengyun and Zou, Dejian and Zhou, Zitang and Ding, Kunyuan and Peng, Wenxuan and Wang, Haoqi and Chen, Guangyao and Li, Bo and Sun, Yiyou and Du, Xuefeng and Zhou, Kaiyang and Zhang, Wayne and Hendrycks, Dan and Li, Yixuan and Liu, Ziwei}, + title = {OpenOOD: Benchmarking Generalized Out-of-Distribution Detection}, + year = {2022} +} + +@article{yang2022fsood, + title = {Full-Spectrum Out-of-Distribution Detection}, + author = {Yang, Jingkang and Zhou, Kaiyang and Liu, Ziwei}, + journal={arXiv preprint arXiv:2204.05306}, + year = {2022} +} + +@article{yang2021oodsurvey, + title={Generalized Out-of-Distribution Detection: A Survey}, + author={Yang, Jingkang and Zhou, Kaiyang and Li, Yixuan and Liu, Ziwei}, + journal={arXiv preprint arXiv:2110.11334}, + year={2021} +} + +@inproceedings{bitterwolf2023ninco, + title={In or Out? Fixing ImageNet Out-of-Distribution Detection Evaluation}, + author={Julian Bitterwolf and Maximilian Mueller and Matthias Hein}, + booktitle={ICML}, + year={2023}, + url={https://proceedings.mlr.press/v202/bitterwolf23a.html} +} +``` + + + + + + + +[density]: https://img.shields.io/badge/Density-d0e9ff?style=for-the-badge&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAADIAAAAyCAYAAAAeP4ixAAAABmJLR0QA/wD/AP+gvaeTAAACuElEQVRoge2Zu2tUQRSHv9UomO0SFKNBrJQkFnY2Ij6iJig+EMRKbIUk+B9YWxn/gdSCIIIkgmiChY3BwkZN8IVoIgQECw0KSSxmxj1z9e7eOTObXeR+cGEm95wz58e8zt1AScm68AR4a58DLc4liI5Mfxew27a3rHMuUWxodQKpKIW0G6WQdqMU0m5k75GUdAJHgb3AGjAHTAPLTRzzD+/toGvAYWWMCjAGLIlY7lkCRq1NU4kVUgEm+FtA9pmgyWJihdzCT/gbMAU8sG357maCfHOJETKEn+hjoEe877F/kzYnIvPNRSukM+P7DKj+w64KzAq7dzSpONUKGRV+P4GBOrb7rI2zv6rKtAEaIZuAD8JvvIDPOP6sJL8GNEIuCJ9fmG+aRvTiz8rZ4EwzpLjZL4v2HeBjAZ9PwF3Rv5IgD4/QGenGzILzOR4w1kn8fdUVlGmG2BkZxuwRgAXM8VqUR8AX296MOb7VxAo5Jdr3gdUA3xVgMidWMDFCNuIvpck8wzpInyESnl4he+SQsP2BuRRDqWKqYRfnoCIGEDcjw6I9gxETynfMb2kO9fKKESLrpKmIOHJ5Jau9ii6tbsxmdbZ7IsbsE3FWgK2aINoZGRS+n4F5ZRyAV5gL0uVzRBMkRojjoTKGZFq0j2kCpBAScgnmIWMM5loFUGSPDAibVfyPJy07bCwXtz80gGZGzov2LLCoiJFlAXgu+udCA2iEyJL7nsI/DxkruqxvtLR24i+BvtgBBf34S7Y3xDl0Ri5R+xlnHnN0puIltWO8AlwMcQ4R0gGMiP7tkIEKImOOYApTFfWW1jXxbhnYrh2kDtswNZsbZ6yoY72y+TS10qMLuC7evQbOhOVYmDlgv23fwFTVX21/EfPd0xA5I+34zOQl/t/+W+Ep8KYViRTkRasTKCkpKUnDb6XM8jMAxEX4AAAAAElFTkSuQmCC + +[reconstruction]: https://img.shields.io/badge/Reconstruction-c2e2de?style=for-the-badge&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAADIAAAAyCAYAAAAeP4ixAAAABmJLR0QA/wD/AP+gvaeTAAADj0lEQVRoge3aS2tdVRQH8F80bS5Y2yYx1qEFrYpSMxBHKjrRFEV8oFB1Wuqotv0A+h3EfBQpitLGRGhsrQ9Ek0adVIUKWvARoxWvg7U35yTcpLnnnnvuLfQPhwX7+V/7sfbaax9uYLgw0kAf83gYf+IyvsfXWMBHuNIAh1owj/Ym3794D69hbFAEu8EOjOM+PIc3cRprCqV+xEm06uhwq9Hb+NWBvTiC86V2v8WhXhtuWpEyZvBZqf1ZNc1OGVM4q7+KwM14A6upj3PYV1fjd2IlNfyd/iqScRDLqZ8V7O+1wduwlBr8RIxOE4oQxmFBMYB3VG2oJaY2T/GtKb0pReAWxZI+p+KeeUcxtVOl9CYVgUnFMpvttvLT+E/Y+ekNeU0rQuyZbABmtltpp2Jzn+iQPwhF4LhihWxriZ1IFb7CaIf8QSkyis9T38evVXgHfkqFNztd54WzNwgcEtx+cA3f7HAq+EUDpKpgBJ8Kjq9sVfCDVOhIA6Sq4qjgeGqzArvxD66Kw2hYMY6/Bc89nQq8KDQ90yCpqjgjuD6bE24qZT6U5OkmGVVEHuxHckJZkYNJftkYnerIxujenFBW5O4kv2mMTnVcTPJATigrsjfJXxqjUx2Xk5zolJnvz9dDEGBMcF3LCeUZyaGhQbgfPaOsyB9J7hoEkS6xO8nfckJZkbw3arsj9xG3J/lrTigrspLkAcOPe5LM1mudItnsPtgYnerIZ95STigrspDk443RqY4nkux4nRgXsdg1xZkyjJhQOI1506+bkSv4UNjolxql1h1eFtfx95Ws1ka8Ks6R8w2R6hYjuCA4Ht6q4Ji4RrbxVP95dY1nBLdLtuGBnEyFL4hY7LBgVHi9bRzbToWWCO23RUB5WJAHeFkX/mCOVqwqbPYgMY2/BKcnu608qxiByXp5dYUpRcDw7SoNlIPYZ0VAuWnswmLisKiHK8aU9c8KU1sXrxUT+FjxHNezM7tfMbXLmvHFpkt9XhSPTbVgn2KZrYrYa6e4cK8YFdYpb+xFhcteG1oKA9AWAeWeX18TRsRTRj4n8sbu67V7RjHtbRGLPapadHICryvcjryUujaxVdESyyu7M23hkc7hLTyP+4XZ3pm+STyAF1KZuVQn178kTuyBBD/GRFT8lLgCbPeNPn9X8a5wAHtSoM6favbgMTwqftW4S5jr/Ij6O34WpnRJvLPM2cIVv4HrGf8Dfs0JOaMPQmgAAAAASUVORK5CYII= + +[classification]: https://img.shields.io/badge/Classification-fdd7e6?style=for-the-badge&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAADIAAAAyCAYAAAAeP4ixAAAABmJLR0QA/wD/AP+gvaeTAAAD3klEQVRogcWZu09UQRTGfwILhTw0cQFRG0tQEwsrG4ydiQExFsQEY6h4KJLYU6M22qGxImpjYvwHLBQBDSq6KqBg1MSOGOWlaHAtztnMZbl79965j/2SCRvuzPnm3Jlz5ptzwaAKuABMAivaJoB+oJLoECvPHmAayBZoL4GmsCRx81Q5jL8D2oAabe3AjD57Qbg3FjvPRTXwHqhzeV7nIOmzIUiK55kObvPoc0r7TNgQJMWzrINrPPrUap8lG4IkeMp0YDFsc/S3xZ8APIFRhuxZgOMe/XLPMrZEyP73y/PWhqAfWZUZ3INwBzCnfbpsCBTdAXh6bQgqkfydBWaRgKvV1uEwvg402hAoUsCYD54p7WuFJowzbm1d/2aAelsSoAF44sEzBewOYR+QlekDxpEMs4qkwS5kJTJE48w+tbOhHMvAU2Q7Wa9EEKSBN5h9bvvmLquNBxHNywppzMrMYufMax3fHuG8rFDPZmeCiLzDOm4R0V4lRwMi/HLayZnNUkAncBdYQOJgDZhHzocscCPJyRZDA+KE05l24AuFs1KuLQAnk59yYTRinFnETDSDqN1mYLu2FmAAkzD+AcOEkCVRoxHjxG8kjXppsjKgR/tmgStxT9Av2jFOtAYY14pxxkvaJ4IUJiZ6LMb36thPRFsXCIxOTEzYSPxyTCo/E2YiYe4XYDLPLSR4g2IDuK2/T4ecSygsIG+zOYSNFkxKdkMKuQKMIUklljLVik6iOoSNGrWx6vIsjSjiuMtUrFH8Hl4MOUd+5f2/AnhF/GUqIJqtdQBzQE4DI8B5YIhkylQA3FNDAyFsDOItZ+IuUwGSabKI7LBNvzkheQ44ClwC7mAcibtMBcjenMf+QMwVPj6z9Xa4RHFH6ojIkQrgPnYS5RimFuCmhCcpfgHr0D7jAXi3IA08wgSqUzSWe4wrR1Yi58RwgX5BylRW5SOAI8BXNfIN2dtDGIcyyF5vQc6YaiQ7DWJiIgtcpbCM91umsi4fdWNU62M23w5PKGmxi5Xf4kOxMpVr+cjtS9IkssRV2kYcRq7j/iZSyNsbRc6AZbX1Abn+9gF/ka2134cz+WUqz/KRny9JueBbBc76mIAXbqqt0ZB2NsHvl6QskmoPRcC5F5EkG8DBCOwBwb4kWWcHF1xTmw+jMhjkS9JYVKTALuAnJmhXCSnRg3xJ+h7UuAfSSAKITKInKgUUFcBztfkRORdqCSnRE5MCDvSovTnkhM6HlURPRArkYVztdXj0CSzRY5cCLvihNnd69LGS6FZSIARijctAUiAkShGXsaAUcRkLShGXsSHpuIwVkcXlf7gl3GNHJu+DAAAAAElFTkSuQmCC + +[distance]: https://img.shields.io/badge/Distance-f4d5b3?style=for-the-badge&&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAADIAAAAyCAYAAAAeP4ixAAAABmJLR0QA/wD/AP+gvaeTAAAB3ElEQVRoge2aO07DQBRFDzT5LABFNLRIFIiSgoYC0SGxAgoaPoVXQ4OggR2AxH8XSBErgB4huoTCfnjk2HHi+fiNlNMkhT2ak+vnGzmBBc7ZAo6ApbY3YsMO8A2MgUtgud3tNGMX+CGVGGWvF0SWjJnENbAP/BJZMkUJ2fQeEcmYl9MNk5s1k1F7mZlJjIEPYLXkONXJmEncAsPs/RAYlByvMhlT4or0Ex6Qy5xXnKdKZtpMDICzmvNVyMw6E3W0OjPzzkQdrSRT1hMrwDsRJVM3E7bJHJBfqodWO52CKVG1WRuZLvCQnfsJrFnut5R5ZmKWW2+RLvBILrFuv+VJmvTELLdeIbhEk56oI4iEq56oogPcZWt/ARsO1/7HdU8UCZ6Ey54QOsA9AZPw0RNd4AkFM2GTTPAkfPREjwBJ+O6J4BI+eqIHPKNgJmxQNxNNCJ6Ej57oAS9E3hNqZsImmeBJ+OoJkYi6J7xLbDIpIbjqCe8SkD6ZkJk4drx2MAkhIf2hZQScOFozyGCX4VKmNQnBlDltuEbrEoKNjBoJoYlMH2USwjwyfeAVhRKCKVPVJ+olhGky0UgIZTLRSQgJ+T8UEuCNgI3tGklGvtJElUQRkYlaQtjG7UOIBQB/hf9HJ+Iv7O8AAAAASUVORK5CYII= + + + + +[preprocess]: https://img.shields.io/badge/PreProcess-f4d5b3?style=social&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAABmJLR0QA/wD/AP+gvaeTAAABDUlEQVQ4jc3TPy8EURQF8N8uS/wJGxuh0tH7CBKthk/gk6iIQiFRSEhEFEQhGoSQbERUEo2SGp1CwTa7infJZE3sbuckr5j75p5z7pk7/BesoIZGm6eG5SxBDSMdCFbwmS002mi6xU1zT7ED1fpfQtmLAexhtAVhI++hGyd4wD3KUS/jUJr9G8P8HmETBUzjGqdSuMeYwno4PMMjZrMOlnCHwagVsI23UC9iHNWoz+AlS/CEsSZHXTjABvpwiZ0YdSsc/hBMykcJEziXwi0FSTXGQVqkSl43ekNpHz1BcoV+YQXW8BwvZLGKVymPRexKoc7hQ1y0whHepXzqWJBZ41abWJA+3xAuMK/pH/gCPJhBnIabIDQAAAAASUVORK5CYII= + +[training]: https://img.shields.io/badge/Training-f4d5b3?style=social&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAGAAAABgCAYAAADimHc4AAAABmJLR0QA/wD/AP+gvaeTAAAH5UlEQVR4nO2dW2wVRRjHf6WVwgNVlKCERJDihUiQxhvGFjCgFEXig8QgiSHGywOCD14wmpgYiakEEyMGNUEgImBQ4+VBJdqICMGgKAhYvIBGgaIhEAVLKbTHhzkbjqczszOzs2d3YX/JvJye/c+337dnZvabSyEnJycnJycnJycd1AILgQNAwbIcKF7bt+JWn0EsxN7x5aWl4lafQbQTPQDtFbfakaqkDZBQ8KSTxnvrRZ+kDTjbyQOQMDUxas8EpgCHgSXALxH1VE1K1CarHngIGAh8Cqz2oJkoNZy+iaD8CzQbXFuPumNVofr+KIP6JgHHyq5bTbwPZqzUAG8hd8hx5EEYDMwFNiuucw1AAfgWeAQYKrluEtChuG4NUG10xylC53xZEIYjmqbOkGsKwClNvacMru8CliJ+YaB3fukvITNBMHF+aRDeRjjFdFz/nabu7RY6J4F3CHd+poJg43yX0onozFU0AydirH8VKQ5CnM5vAxYDYwzsGFP87q6YbHmTFAbBxPnfY9bGB2ULMAcYEsGuC4EHgC+BHsN6O4u2ZiYIJs7fDNQBU9EHoRtYC4yNwc7LgWXo+5vOoo11hI/EVpKCIFTRe5yvcn6AKgitwJUVsHk48K6k/sD5ASZBWFEBe7XMxM75ATcCWxHNwjbgrkoYW0YzsBE4AmwCxkm+YxKEGZUwVsUKhVFBmy9zftaoQ98nLPNVkUsy7rDmb5cBTY62pIkmxL2oOFQpQ2SMROR2dJ3atMSsi8409IOGo8AliVlXZAr6t8kTwITErHOnEf1LXQcijZEKJqP/JWxLzjRnviIjzg+4id7p3NLxfSamBUv4hww5P2A5cqN3JGmUIxuR38vaJI3SMQR1XzDLg35fYCLwHKJ5+IvTv64/i5+9BIzHz/RqM/J76UK8zKWORcgNXk+05qc/8DB2i7N+B54E+kWoF+A9hf7LEXW9MxB5+98DXBVB91bcVsUFZQ9we4T66xHzBrJ+4IIIut65H7kD3nfUqwLmI5oXV+eXlhbcf4WqXNeDjnqx0IrcSFl+JYwqxOu9D8eXltdx6xsaFHqfO2jFwhDkc7G7HPWelWj5Ks842iSb6uxGPsEfCy6rlB93qGe6ge5HwGxECmBA0bZRxc/WoZ946cZsSUw5TxjYFZRYVmW7rFIeaVlHP0SnqdJrA6410GkEdmp09mE/Ohql0VMVr6uybUcifzjU8ZRG7zPE025KHep+qQDMs7StCjio0ZOV/ZZ1aLGN/nJL/T6IsbtMawdu8woDgN0Kzf3AOZZ6qxRauuINm0pPAtdb6k9QaPUgZs9cGaexc7ylVhNmC78SC8ApxNM63UF/sULzk6iGFzVk2gsctO5EjO5M30+8EXcFqtTvPR6071Vob/KgHZD5AASJtfJyhQftkQrtgx60AzIfAFXb2t+D9gCFdpcH7QAv/klyh4zKWB9r81WrqVO3IyhJg44oPh/sQVuVu090NYOMJAOwR/G5j2UtqiGxqs7EcAlA+TB0O265FlVm0ceKubsVn29x0LoDMQwt77Nix+bl4wRmy8hLUb2IFYDrItjdhDo5Z/uCdw3yyZlE3wNUxXbKri/qfNNu7PJAAecCPyg0f8V+guZVhVZFAmCbjGtzqGOeRq8Vu3xQWDJuvoN9ezV6suI1Gfe8ZeUF7DdW9EMYrdLbiUg1h3ED8KNG52fEHIINIzR6quI1HV2LCILOQeVljkM9zejzLD2ISZfZiBx9LeJpvxSRclgXYlMPYqLflsdCdMuf/BYqeExOPfKOzmWUAWLa0PZpMy1PO9okW57eQ4rWB6k2MTQ4aPVBTKD7dv5SpztTp7TXO+rFgqoDjbKMbz7mG+vCmp0W3PdzfajQvc9RLxYGIzZdlxvZDYyOoHsbotN0df4+4JYI9TcgfwiOAedF0I2FJcid0BpRtxbxa/hNoa9y/FyiLU2sAjYo9BdF0I2N4ai3gPpIKVQhhqEvIiZT2jk9YmovfrYAMdVoO98rYxbye+kALvKgHwtrkRu9PUmjHGlDfi/eNuX5ZgryfqCASF5laYNGDeoBwDFSuOXqZvT7xLYmZ5ozupNXUhWEyeid30m0pSVJ0Yh+h2QqgjAC9b6wwPlTlVenn7CzLf4GhiVmHfCCxKgzxfkBYUFY6Ksilxkx3S6Rn/C79iYpNiHuRcWgShkiw/WwjkbE3uECYopvFpUfJc1AHH/WDXxDRg/rqCJ80arpcTVfEM85QeWMRj5Z08H/0xaZOK4GxJh5DWZBCGtPe4APcNvaFEYD4pA+3XxDcJJjZg5sCqgm/OAm2yPLdgCPIkZargxF5IS+tqj3OOFHlqXK+QEmQXAte4HXgKsN7BiLWBSg2hsQtaTS+QHVuG1kMC0n0S9/vxW7M0jPKOcH2AShA3ECoW3TpCKs6SgtXYjkoSp/lUnnB1QjHBvm/ODUkaGINLPuyJugRD26uBMxdxHM54blsTLn/IBqhOFhzi/lfMQO9A3oRysqdE7cjOiQZQt+J6EOfiadH1BN74P9jmJ23s7F+AtAveaagIn0PiNoBRV0fpxvojMQ7wCHgFcQSwNNUDnb9h84mN7bMMR6pkHAx4iDxStGGidMdE+7DWm8t16kbsfI2UYegIRJYwB8/BM2l2MTEiGNAVjpQeMNDxpnLS6rsoOyH3HAX/7PPHNycnJycnJycnJycnJycnJyUsd/Xk5Gaglg9FgAAAAASUVORK5CYII= + + +[extradata]: https://img.shields.io/badge/ExtraData-f4d5b3?style=social&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAGQAAABkCAYAAABw4pVUAAAABmJLR0QA/wD/AP+gvaeTAAAGYElEQVR4nO2d228VRRzHP6fl2ipFvECN1gpoQR6MVEXFN28hkQdS0FdFRf0P8E8QjXh5ML6aiAFvKD6p9U2lGkuJxEtiI1ClRWy5lYrc6sNvT/acPbN7dk9nd6dnf59ksifl7G9+85uzM7PznWFKuEcL0A3cCtwE3OJdrweu8dISoN37/kJggff5PPCv9/kcMAGc9K7/AH8CR4AR4A/gMDCdamkSUso5/4XAWuBe77raS20Z5T8F/AL8DBwAvgcG8Ss1c7KukPnAeuAR4GHgTmBuxj7U4yIwBPQDXwDfABdy9cgy7cCTwIfAJNJEzKY06fn+BBk8uWk+IeuBF4FNJC/IaeA3/Pb+CDAGjCP9wQRwxvtuZb9R2Z904Pc31wLLkP7oZu+6CliU0K8p4GPgbeDbhPfmwhzgKeAg8X+Bx4BPgO3AY0BXhv52eXlu93w4lsDvg0hZ52Tob2xagWeAYeoXZBzY7X2/Ow9n69ANPAvsQXytV55hYCsSAyfoBQaoXwnvAhtxrxOPogV4EHgDGCW6jEPAA/m4KSwA3gKuYHbwCvAV0Iejj3VC5gKbkdFXWJkvA28io8lMuQ0Zr5ucugS8B6zJ2qkMWYOU8RLmGAwCK7Nyphc4EeLIZ8DtWTniAD3APsyx+Bt52U2V+4BThsxHgMfTztxhNiIxCMblFBKzVLgR+MuQ6afIOL/odCAjs2B8xpB3H6u0IvM8wcxeI//5MJcoATupjdMAlofFLxgyedlmBk3GDmrj9bwt421IBxVsplpsZdCEtCADnMqYHUemdmbMloDhM0CnDcNNzlJkTq4ydn02DO8OGN1hw2hBeIXq2L1vw+ihgNF7bBgtCOuojt1PNowGH7urbRgtCIuobe4jidMxBzUDHebGJxjfuj/mRkZKPQ3cU1QSx6qRCtnSwD1FZXMaRoMvOGeRaRQlmmXU9r/TNgybZjL3oS+GUbQQPgs8Y8IUMn0fCcc0dZJ6hUwjiqE+KT4l4FWiYzZjooxPI4vJltrIaJZzHeHNVKoVYuqoRhCRpqiECVSZdOprEdElrLNfZSPTWcJq4HPMsRgF7jL8fcaYDK4gfJHDZWAXzb/IYRdSVlMMfgSWe9/NpEJAlrq8TvQyoH7kRXI2rcUKYx6yvvdrosu8k+plQJlVSJm7gf0hDpbTBLNzoVwr/kK540SXcRDzQrnMK6Ts+FbiLSWdAD4AnsN/rF1iObANWfF+kvrlGQaeJnz4n0uFlCkvth4y3BeWRoG9wEvABmRtbRYzyiUvrw1e3nsJH6yY0hDxFlsnqpA4BQ8aiRus+5HFEX3428/iMom/HeGodx3F36I2jqx5AtlMc8773I609eBvRVjife5EKqC8HaEHuKoBvz4C3gG+i3lPo/GLNDiTR64NmfXcg0xMxv0FupLOIjJ2H40tUnDmCTExD3lyHgUeQsbp8yLvyJ4LSAfdD3yJbMy5OAN7ieKXdYUEWYBUyjrvegfyYpm0KWmUSeBX/E2fA0hl/Gcxj1lVISZKyM6mbu/ahWyLvoHqbdFlOXQ+/pa5KfxgnqV6W/QJZFv0US8dRvqmtEm9D0m6L6/ILCZhH6Kaerqopu4Yqqk7hGrqDqGaumOopu4Iqqk7hGrqDqGauiOopu4Iqqk7gGrqDqCaugOopo5q6lVO1SNoRDV11dRzTaqp54xq6qimHu8fGzFoAdXUYxisTKqpx0c1dcdQTd0xVFN3CNXUHUI1dcdQTd0RVFN3CNXUHUI1dUdQTd0RVFN3ANXUHUA1dQdQTR3V1KucqkfQiGrqqqnnmlRTzxnnNfXTVKuEHcT4L7Nj0uyaegd+0woSy8VRN8Q5zm6E6pe8HuCHxK6ZOY+0xZXtcTNp6sFZi5F6N8SpkENUV8gW7FWIiWkkUFksQEiboGJo5XSEzdR2cqoY1sekGG6yYXghtW+pqhhGY1IMrR15BHKgVXD4pophOCbFcJvNDFoxH0C8Ez1PpJISMuEajNN+UjhNuhPzwZKqGAphiuEYMkpMhaijV1UxrI1LqkevlllL7bmGlZ29KoaSMjmcuMxKRBUzOaKKocRmRdZOzUcOcw9zqoiKYW4H3FfSi3kEVpmKoBgewKwY5kKRFcPfEcXQ+rDWBqoYWnAqLVQxdJSiK4aJyHrao4iKYSLynodqdsUwMXlXiIlmUgwVRbHK/zvve/uixi6eAAAAAElFTkSuQmCC + + + +[postprocess]: https://img.shields.io/badge/PostProcess-f4d5b3?style=social&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAGAAAABgCAYAAADimHc4AAAABmJLR0QA/wD/AP+gvaeTAAAFXElEQVR4nO2dW2gdRRjHf7bRIiKiVNSK2IqkCV4ajdfUqEWrSOKt1KpVUi+NN0SliD5oVfClCj4Vi/oo6oNI38UHxUbEB29QwUtVvEWbqmlMTc31+DCNrbDz7ew5szu7O98Pljxk58+3//85s7O7s3NAURRFURRFURRFURRFUVpnAbAGeAP4FpgAGgG3F4HDcj3iEnEh8BlhDY82hJuA/YQ3O8oQVgL/EN7ktO0VTBdZK44AdhHe3GhDuJ/wpkbdHb1H8kH+AQwAxxZYy3JLLbUNoQ2YJvkA+wPUkyWAWoRwMskHtpcwB5Y1gMqfE2wH/GXJ6ildCJVNvEmGgc+F/w8CWynwWxtbAOPAKuBjYZ8HgJcpyJvYAgAYBVYjhzAIvEQB/sQYAJQohFgDgJKEEHMAUIIQYg8AAoegARiChdDmU6wCzF+gNcvggb/3AXOtl6PfgGYYBF7wJaYBNMc1voQ0gMBoAIHRAAJT1wC+wtzRbHXryLvQugZQGTSAwGgAgdEAAqMBBCa2e0FZmR9N5YZ+AwKjAQRGAwiMBhAYDSAwGoAfjgc2Ax8Bu4ExYCdmlt1ZRRRQtrmhRXInxnDbfNNZYBvmBZbciDWAZ3Cf+PsucFRehcQYwNNkn309BBydRzGxBfAozU1/zy2EmAJ4jObNP7Q78npOiCWATcjG7gJWAIuB7Sn7bvNZWAwBPISZjGUz9Adg6SH7LwReF/afBbp8FVf3ADYim/8jsCyhXVoIW30VWOcABkk3/zShfRv27minryLrGsBdmK7CZv7PwOkOOost7cd8FVrHAAaQzR/GHLcLZ1g09vgqtm4BrAdmsJv/G9DpqHUi8IVF50NfBdcpgHXI5o8AZzpqLcF4YNN6ylfRdQlgDTCF3bBRoNtR6xTga0FrHDjBV+F1COB6ZPP/BM5x1DoVs0ybTavBwZc9vFD1APqBSeRP/nmOWsuA7wWtBvCcx9qBagdwFfISa2OYNfBcWAp8J2g1gOc91v4fVQ1gNbL5+4BLHbXagZ8ErQawxWPt/6OKAfRiDLaZ9TdwuaPWcuAXQStX8+cLqFIAKzGjEMn8VY5aHZiLMsn8zR5rT6RKAfQAf2E3awK4wlGrE/hV0GoAT3is3UpVArgI2fxJoM9RqwtzK8GmNQc84rF2kSoEcC5mLC+Zf20Grd8FrTnM84PCKHsAXZjVG22GTQHXOWp1p2jNAQ96rN2JMgewAvnTOgPc7KjVgzwHaA6z4lbhlDWAs5H76RngVketS5DPH7PAHR5rz8QSS1FjhFuPswN5hDID3Oao1Yts/gywwWPtmSnbwq2dmHv2kmG3O2pdibkusGlNA7d4rL1ppKWLNwDHFVRHO/KFUZau4mrkH56YAtZ6rL0l7sVeqI/NhWMwz2mlk+Tdjlp9yEvxTwI3OmoVwuHAN4QNoE9on2WE0od8k24SuMFRq1B6yO8HHFzYYmmbZWzen3IMk7hfMwRhLfn8hIkLQ5a2zzq2X4f8VGwC8/yg9JwPfEKxARyJ/ZPrMoNhPfaRXAMzEnK9SVcKFmD6ydcw54ZWf8Yqjcss7UZIvxYZQJ4JMX5AXxF4kmTztqe0S5v9tg/3ZwNR8zbJBm4S2mxENn8vcHF+JdeHhdhvkl2QsH8HZlKUNOl21NJWSaAbe/exCDM38x7gVcxs5rTzjZqfkYdJNnI/8k20pG0PHl+ciIW38DPUHcHcwlYykjYrwWXbTUFvtdeNdlo3f5gClqhMog4rZvU20WYas1T9ELADeB8z5CycWAKYAD7FGP4BxnBvrwfFTtI08DHgHeBxzLPcRcGqqzkncbAPfxMzHO2mQsvw/Atv0E+fkVDOBwAAAABJRU5ErkJggg== diff --git a/OpenOOD/bash_allocation.slurm b/OpenOOD/bash_allocation.slurm new file mode 100644 index 0000000000000000000000000000000000000000..ad5e8f33387306f5ef91cfa71eba054ed7b609db --- /dev/null +++ b/OpenOOD/bash_allocation.slurm @@ -0,0 +1,16 @@ +#!/bin/bash +#SBATCH --job-name=zzzz2 +#SBATCH --output=output2.txt +#SBATCH --error=error2.txt +#SBATCH --cpus-per-task=5 +#SBATCH --ntasks=4 +#SBATCH --gres=gpu:1 +#SBATCH --mem=100000 +#SBATCH -N 1 + + +./batch_file_deal3_train_method.sh + + +# 取消当前作业以释放节点 +scancel $SLURM_JOB_ID \ No newline at end of file diff --git a/OpenOOD/bash_allocation2.slurm b/OpenOOD/bash_allocation2.slurm new file mode 100644 index 0000000000000000000000000000000000000000..a390c848e9ffae0a2200e8299c8db24cc3349ad7 --- /dev/null +++ b/OpenOOD/bash_allocation2.slurm @@ -0,0 +1,16 @@ +#!/bin/bash +#SBATCH --job-name=OOD_post_method +#SBATCH --output=output_OOD_post_method.txt +#SBATCH --error=error_OOD_post_method.txt +#SBATCH --cpus-per-task=5 +#SBATCH --ntasks=4 +#SBATCH --gres=gpu:1 +#SBATCH --mem=100000 +#SBATCH -N 1 + + +python batch_file_deal_post_method_Ours_Notline.py + + +# 取消当前作业以释放节点 +scancel $SLURM_JOB_ID \ No newline at end of file diff --git a/OpenOOD/batch_file_deal2.py b/OpenOOD/batch_file_deal2.py new file mode 100644 index 0000000000000000000000000000000000000000..5563e61cd66cd094d472856f407161d971806280 --- /dev/null +++ b/OpenOOD/batch_file_deal2.py @@ -0,0 +1,120 @@ +import subprocess +import os + +# 设置 PYTHONPATH 环境变量 +pythonpath = '.' +if 'PYTHONPATH' in os.environ: + pythonpath += ':' + os.environ['PYTHONPATH'] +os.environ['PYTHONPATH'] = pythonpath + +ROOT = "/home/zhourixin/OOD_Folder/CODE/other_methods/openOOD_code/OpenOOD" + + +run_file = ROOT+"/eval_ood.py" + + +subprocess.run(["python", run_file, "--id-data=bronze2", \ + "--root=/home/zhourixin/OOD_Folder/CODE/other_methods/openOOD_code/OpenOOD/results/bronze_2_p2pnet_415", \ + "--postprocessor=gram",\ + "--batch-size=20",\ + "--save-score",\ + "--save-csv",\ + ]) + +# subprocess.run(["python", run_file, "--id-data=bronze2", \ +# "--root=/home/zhourixin/OOD_Folder/CODE/other_methods/openOOD_code/OpenOOD/results/bronze_2_p2pnet_415", \ +# "--postprocessor=gradnorm",\ +# "--batch-size=20",\ +# "--save-score",\ +# "--save-csv",\ +# ]) + + + +# subprocess.run(["python", run_file, "--id-data=bronze2", \ +# "--root=/home/zhourixin/OOD_Folder/CODE/other_methods/openOOD_code/OpenOOD/results/bronze_2_p2pnet_415", \ +# "--postprocessor=react",\ +# "--batch-size=100",\ +# "--save-score",\ +# "--save-csv",\ +# ]) + +# subprocess.run(["python", run_file, "--id-data=bronze2", \ +# "--root=/home/zhourixin/OOD_Folder/CODE/other_methods/openOOD_code/OpenOOD/results/bronze_2_p2pnet_415", \ +# "--postprocessor=mls",\ +# "--batch-size=100",\ +# "--save-score",\ +# "--save-csv",\ +# ]) + +# subprocess.run(["python", run_file, "--id-data=bronze2", \ +# "--root=/home/zhourixin/OOD_Folder/CODE/other_methods/openOOD_code/OpenOOD/results/bronze_2_p2pnet_415", \ +# "--postprocessor=klm",\ +# "--batch-size=100",\ +# "--save-score",\ +# "--save-csv",\ +# ]) + +# subprocess.run(["python", run_file, "--id-data=bronze2", \ +# "--root=/home/zhourixin/OOD_Folder/CODE/other_methods/openOOD_code/OpenOOD/results/bronze_2_p2pnet_415", \ +# "--postprocessor=vim",\ +# "--batch-size=100",\ +# "--save-score",\ +# "--save-csv",\ +# ]) + +# subprocess.run(["python", run_file, "--id-data=bronze2", \ +# "--root=/home/zhourixin/OOD_Folder/CODE/other_methods/openOOD_code/OpenOOD/results/bronze_2_p2pnet_415", \ +# "--postprocessor=knn",\ +# "--batch-size=100",\ +# "--save-score",\ +# "--save-csv",\ +# ]) + +# subprocess.run(["python", run_file, "--id-data=bronze2", \ +# "--root=/home/zhourixin/OOD_Folder/CODE/other_methods/openOOD_code/OpenOOD/results/bronze_2_p2pnet_415", \ +# "--postprocessor=dice",\ +# "--batch-size=100",\ +# "--save-score",\ +# "--save-csv",\ +# ]) + + + + + + +# run_file = ROOT+"/main.py" +# subprocess.run(["python", run_file, "--config configs/datasets/cifar10/cifar10.yml \ +# configs/datasets/cifar10/cifar10_ood.yml \ +# configs/networks/resnet18_32x32.yml \ +# configs/pipelines/test/test_ood.yml \ +# configs/preprocessors/base_preprocessor.yml \ +# configs/postprocessors/knn.yml ", "--num_workers=8", +# "--network.checkpoint='results/cifar10_resnet18_32x32_base_e100_lr0.1_default/s0/best.ckpt'", +# "--mark=0"]) + + +# cmd = "python main.py \ +# --config configs/datasets/cifar10/cifar10.yml \ +# configs/datasets/cifar10/cifar10_ood.yml \ +# configs/networks/resnet18_32x32.yml \ +# configs/pipelines/test/test_ood.yml \ +# configs/preprocessors/base_preprocessor.yml \ +# configs/postprocessors/knn.yml \ +# --num_workers 8 \ +# --network.checkpoint 'results/cifar10_resnet18_32x32_base_e100_lr0.1_default/s0/best.ckpt' \ +# --mark 0" + +# subprocess.run(cmd, shell=True, cwd=ROOT) + +# # subprocess.run(["python", run_file, "--model_type=test"]) +# # subprocess.run(["python", run_file, "--model_type=stage1"]) +# # subprocess.run(["python", run_file, "--model_type=stage2"]) +# # subprocess.run(["python", run_file, "--model_type=stage2_searching"]) + +# path = ROOT +# # cmd = 'python -m torch.distributed.launch --nproc_per_node 4 texture_countour_double_GCN.py --model_type=stage2' +# # subprocess.run(cmd, shell=True, cwd=path) +# cmd = 'python texture_countour_double_GCN.py --model_type=stage2_searching' +# subprocess.run(cmd, shell=True, cwd=path) \ No newline at end of file diff --git a/OpenOOD/batch_file_deal3_train_method.sh b/OpenOOD/batch_file_deal3_train_method.sh new file mode 100644 index 0000000000000000000000000000000000000000..b46f68f0b0053fbd2a08ab09e0c784bc79266e13 --- /dev/null +++ b/OpenOOD/batch_file_deal3_train_method.sh @@ -0,0 +1,32 @@ + +SEED=0 +# train +# python main.py \ +# --config configs/datasets/bronze2/bronze2.yml \ +# configs/networks/opengan.yml \ +# configs/pipelines/train/train_opengan.yml \ +# configs/preprocessors/base_preprocessor.yml \ +# configs/postprocessors/opengan.yml \ +# --dataset.feat_root ./results/bronze2_OursBronze2_feat_extract_opengan_default/s${SEED} \ +# --network.backbone.pretrained True \ +# --network.backbone.checkpoint ./results/pretrained_weights/resnet50_imagenet1k_v1.pth \ +# --optimizer.num_epochs 90 \ +# --seed ${SEED} \ +# --proj_ROOT /home/zhourixin/OOD_Folder/CODE/other_methods/openOOD_code/OpenOOD + +# test +SCHEME="ood" # "ood" or "fsood" +python main.py \ + --config configs/datasets/bronze2/bronze2.yml \ + configs/datasets/bronze2/bronze2_ood.yml \ + configs/networks/opengan.yml \ + configs/pipelines/test/test_opengan.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/postprocessors/opengan.yml \ + --num_workers 8 \ + --network.backbone.name OursBronze2 \ + --network.backbone.pretrained True \ + --network.backbone.checkpoint ./results/bronze2_ours_resnet50_415_NotLine_train/s0/model_state_dict_epoch90.pth \ + --evaluator.ood_scheme ${SCHEME} \ + --seed ${SEED} \ + --proj_ROOT /home/zhourixin/OOD_Folder/CODE/other_methods/openOOD_code/OpenOOD diff --git a/OpenOOD/batch_file_deal_post_method_Ours_Notline.py b/OpenOOD/batch_file_deal_post_method_Ours_Notline.py new file mode 100644 index 0000000000000000000000000000000000000000..45de8708212a6f25fbf078cd5e989f223d3256b3 --- /dev/null +++ b/OpenOOD/batch_file_deal_post_method_Ours_Notline.py @@ -0,0 +1,128 @@ +import subprocess +import os + +# 设置 PYTHONPATH 环境变量 +pythonpath = '.' +if 'PYTHONPATH' in os.environ: + pythonpath += ':' + os.environ['PYTHONPATH'] +os.environ['PYTHONPATH'] = pythonpath + +ROOT = "/home/zhourixin/OOD_Folder/CODE/other_methods/openOOD_code/OpenOOD" + + +run_file = ROOT+"/eval_ood.py" + +subprocess.run(["python", run_file, "--id-data=bronze2", \ + "--root=/home/zhourixin/OOD_Folder/CODE/other_methods/openOOD_code/OpenOOD/results/bronze2_ours_resnet50_415_NotLine_train", \ + "--postprocessor=openmax",\ + "--save-score",\ + "--save-csv",\ + ]) + +subprocess.run(["python", run_file, "--id-data=bronze2", \ + "--root=/home/zhourixin/OOD_Folder/CODE/other_methods/openOOD_code/OpenOOD/results/bronze2_ours_resnet50_415_NotLine_train", \ + "--postprocessor=msp",\ + "--batch-size=200",\ + "--save-score",\ + "--save-csv",\ + ]) + + + +subprocess.run(["python", run_file, "--id-data=bronze2", \ + "--root=/home/zhourixin/OOD_Folder/CODE/other_methods/openOOD_code/OpenOOD/results/bronze2_ours_resnet50_415_NotLine_train", \ + "--postprocessor=odin",\ + "--batch-size=10",\ + "--save-score",\ + "--save-csv",\ + ]) + +subprocess.run(["python", run_file, "--id-data=bronze2", \ + "--root=/home/zhourixin/OOD_Folder/CODE/other_methods/openOOD_code/OpenOOD/results/bronze2_ours_resnet50_415_NotLine_train", \ + "--postprocessor=mds",\ + "--batch-size=100",\ + "--save-score",\ + "--save-csv",\ + ]) + + + + +subprocess.run(["python", run_file, "--id-data=bronze2", \ + "--root=/home/zhourixin/OOD_Folder/CODE/other_methods/openOOD_code/OpenOOD/results/bronze2_ours_resnet50_415_NotLine_train", \ + "--postprocessor=ebo",\ + "--batch-size=100",\ + "--save-score",\ + "--save-csv",\ + ]) + + + +subprocess.run(["python", run_file, "--id-data=bronze2", \ + "--root=/home/zhourixin/OOD_Folder/CODE/other_methods/openOOD_code/OpenOOD/results/bronze2_ours_resnet50_415_NotLine_train", \ + "--postprocessor=gram",\ + "--batch-size=20",\ + "--save-score",\ + "--save-csv",\ + ]) + +subprocess.run(["python", run_file, "--id-data=bronze2", \ + "--root=/home/zhourixin/OOD_Folder/CODE/other_methods/openOOD_code/OpenOOD/results/bronze2_ours_resnet50_415_NotLine_train", \ + "--postprocessor=gradnorm",\ + "--batch-size=20",\ + "--save-score",\ + "--save-csv",\ + ]) + + + +subprocess.run(["python", run_file, "--id-data=bronze2", \ + "--root=/home/zhourixin/OOD_Folder/CODE/other_methods/openOOD_code/OpenOOD/results/bronze2_ours_resnet50_415_NotLine_train", \ + "--postprocessor=react",\ + "--batch-size=100",\ + "--save-score",\ + "--save-csv",\ + ]) + +subprocess.run(["python", run_file, "--id-data=bronze2", \ + "--root=/home/zhourixin/OOD_Folder/CODE/other_methods/openOOD_code/OpenOOD/results/bronze2_ours_resnet50_415_NotLine_train", \ + "--postprocessor=mls",\ + "--batch-size=100",\ + "--save-score",\ + "--save-csv",\ + ]) + +subprocess.run(["python", run_file, "--id-data=bronze2", \ + "--root=/home/zhourixin/OOD_Folder/CODE/other_methods/openOOD_code/OpenOOD/results/bronze2_ours_resnet50_415_NotLine_train", \ + "--postprocessor=klm",\ + "--batch-size=100",\ + "--save-score",\ + "--save-csv",\ + ]) + +subprocess.run(["python", run_file, "--id-data=bronze2", \ + "--root=/home/zhourixin/OOD_Folder/CODE/other_methods/openOOD_code/OpenOOD/results/bronze2_ours_resnet50_415_NotLine_train", \ + "--postprocessor=vim",\ + "--batch-size=100",\ + "--save-score",\ + "--save-csv",\ + ]) + +subprocess.run(["python", run_file, "--id-data=bronze2", \ + "--root=/home/zhourixin/OOD_Folder/CODE/other_methods/openOOD_code/OpenOOD/results/bronze2_ours_resnet50_415_NotLine_train", \ + "--postprocessor=knn",\ + "--batch-size=100",\ + "--save-score",\ + "--save-csv",\ + ]) + +subprocess.run(["python", run_file, "--id-data=bronze2", \ + "--root=/home/zhourixin/OOD_Folder/CODE/other_methods/openOOD_code/OpenOOD/results/bronze2_ours_resnet50_415_NotLine_train", \ + "--postprocessor=dice",\ + "--batch-size=100",\ + "--save-score",\ + "--save-csv",\ + ]) + + + diff --git a/OpenOOD/batch_file_deal_post_method_p2pNet.py b/OpenOOD/batch_file_deal_post_method_p2pNet.py new file mode 100644 index 0000000000000000000000000000000000000000..8afbff316d0b408761f63123217efb4ed2f98c75 --- /dev/null +++ b/OpenOOD/batch_file_deal_post_method_p2pNet.py @@ -0,0 +1,128 @@ +import subprocess +import os + +# 设置 PYTHONPATH 环境变量 +pythonpath = '.' +if 'PYTHONPATH' in os.environ: + pythonpath += ':' + os.environ['PYTHONPATH'] +os.environ['PYTHONPATH'] = pythonpath + +ROOT = "/home/zhourixin/OOD_Folder/CODE/other_methods/openOOD_code/OpenOOD" + + +run_file = ROOT+"/eval_ood.py" + +subprocess.run(["python", run_file, "--id-data=bronze2", \ + "--root=/home/zhourixin/OOD_Folder/CODE/other_methods/openOOD_code/OpenOOD/results/bronze_2_p2pnet_415", \ + "--postprocessor=openmax",\ + "--save-score",\ + "--save-csv",\ + ]) + +subprocess.run(["python", run_file, "--id-data=bronze2", \ + "--root=/home/zhourixin/OOD_Folder/CODE/other_methods/openOOD_code/OpenOOD/results/bronze_2_p2pnet_415", \ + "--postprocessor=msp",\ + "--batch-size=200",\ + "--save-score",\ + "--save-csv",\ + ]) + + + +subprocess.run(["python", run_file, "--id-data=bronze2", \ + "--root=/home/zhourixin/OOD_Folder/CODE/other_methods/openOOD_code/OpenOOD/results/bronze_2_p2pnet_415", \ + "--postprocessor=odin",\ + "--batch-size=10",\ + "--save-score",\ + "--save-csv",\ + ]) + +subprocess.run(["python", run_file, "--id-data=bronze2", \ + "--root=/home/zhourixin/OOD_Folder/CODE/other_methods/openOOD_code/OpenOOD/results/bronze_2_p2pnet_415", \ + "--postprocessor=mds",\ + "--batch-size=100",\ + "--save-score",\ + "--save-csv",\ + ]) + + + + +subprocess.run(["python", run_file, "--id-data=bronze2", \ + "--root=/home/zhourixin/OOD_Folder/CODE/other_methods/openOOD_code/OpenOOD/results/bronze_2_p2pnet_415", \ + "--postprocessor=ebo",\ + "--batch-size=100",\ + "--save-score",\ + "--save-csv",\ + ]) + + + +subprocess.run(["python", run_file, "--id-data=bronze2", \ + "--root=/home/zhourixin/OOD_Folder/CODE/other_methods/openOOD_code/OpenOOD/results/bronze_2_p2pnet_415", \ + "--postprocessor=gram",\ + "--batch-size=20",\ + "--save-score",\ + "--save-csv",\ + ]) + +subprocess.run(["python", run_file, "--id-data=bronze2", \ + "--root=/home/zhourixin/OOD_Folder/CODE/other_methods/openOOD_code/OpenOOD/results/bronze_2_p2pnet_415", \ + "--postprocessor=gradnorm",\ + "--batch-size=20",\ + "--save-score",\ + "--save-csv",\ + ]) + + + +subprocess.run(["python", run_file, "--id-data=bronze2", \ + "--root=/home/zhourixin/OOD_Folder/CODE/other_methods/openOOD_code/OpenOOD/results/bronze_2_p2pnet_415", \ + "--postprocessor=react",\ + "--batch-size=100",\ + "--save-score",\ + "--save-csv",\ + ]) + +subprocess.run(["python", run_file, "--id-data=bronze2", \ + "--root=/home/zhourixin/OOD_Folder/CODE/other_methods/openOOD_code/OpenOOD/results/bronze_2_p2pnet_415", \ + "--postprocessor=mls",\ + "--batch-size=100",\ + "--save-score",\ + "--save-csv",\ + ]) + +subprocess.run(["python", run_file, "--id-data=bronze2", \ + "--root=/home/zhourixin/OOD_Folder/CODE/other_methods/openOOD_code/OpenOOD/results/bronze_2_p2pnet_415", \ + "--postprocessor=klm",\ + "--batch-size=100",\ + "--save-score",\ + "--save-csv",\ + ]) + +subprocess.run(["python", run_file, "--id-data=bronze2", \ + "--root=/home/zhourixin/OOD_Folder/CODE/other_methods/openOOD_code/OpenOOD/results/bronze_2_p2pnet_415", \ + "--postprocessor=vim",\ + "--batch-size=100",\ + "--save-score",\ + "--save-csv",\ + ]) + +subprocess.run(["python", run_file, "--id-data=bronze2", \ + "--root=/home/zhourixin/OOD_Folder/CODE/other_methods/openOOD_code/OpenOOD/results/bronze_2_p2pnet_415", \ + "--postprocessor=knn",\ + "--batch-size=100",\ + "--save-score",\ + "--save-csv",\ + ]) + +subprocess.run(["python", run_file, "--id-data=bronze2", \ + "--root=/home/zhourixin/OOD_Folder/CODE/other_methods/openOOD_code/OpenOOD/results/bronze_2_p2pnet_415", \ + "--postprocessor=dice",\ + "--batch-size=100",\ + "--save-score",\ + "--save-csv",\ + ]) + + + diff --git a/OpenOOD/batch_file_deal_vim_ablation.py b/OpenOOD/batch_file_deal_vim_ablation.py new file mode 100644 index 0000000000000000000000000000000000000000..617285d6c0c78280e11c55505d6a24da2c834029 --- /dev/null +++ b/OpenOOD/batch_file_deal_vim_ablation.py @@ -0,0 +1,26 @@ +import subprocess +import os + +# 设置 PYTHONPATH 环境变量 +pythonpath = '.' +if 'PYTHONPATH' in os.environ: + pythonpath += ':' + os.environ['PYTHONPATH'] +os.environ['PYTHONPATH'] = pythonpath + +ROOT = "/home/zhourixin/OOD_Folder/CODE/other_methods/openOOD_code/OpenOOD" + + +run_file = ROOT+"/eval_ood.py" + + + +subprocess.run(["python", run_file, "--id-data=bronze2", \ + "--root=/home/zhourixin/OOD_Folder/CODE/other_methods/openOOD_code/OpenOOD/results/bronze2_ours_resnet50_415_NotLine_train", \ + "--postprocessor=vim",\ + "--batch-size=100",\ + "--save-score",\ + "--save-csv",\ + ]) + + +# subprocess.r \ No newline at end of file diff --git a/OpenOOD/codespell_ignored.txt b/OpenOOD/codespell_ignored.txt new file mode 100644 index 0000000000000000000000000000000000000000..defc26379052b4f34721a4ff1bf2ada915e9858b --- /dev/null +++ b/OpenOOD/codespell_ignored.txt @@ -0,0 +1,5 @@ +ans +fpr +als +hist +tha diff --git a/OpenOOD/configs/datasets/aircraft/aircraft.yml b/OpenOOD/configs/datasets/aircraft/aircraft.yml new file mode 100644 index 0000000000000000000000000000000000000000..a2be9e29cf86ba2dfc8745aeca0d8220c4b75700 --- /dev/null +++ b/OpenOOD/configs/datasets/aircraft/aircraft.yml @@ -0,0 +1,33 @@ +dataset: + name: aircraft + num_classes: 50 + pre_size: 512 + image_size: 448 + + interpolation: bilinear + normalization_type: aircraft + + num_workers: '@{num_workers}' + num_gpus: '@{num_gpus}' + num_machines: '@{num_machines}' + + split_names: [train, val, test] + + train: + dataset_class: ImglistDataset + data_dir: ./data/images_largescale/ + imglist_pth: ./data/benchmark_imglist/aircraft/train_id.txt + batch_size: 32 + shuffle: True + val: + dataset_class: ImglistDataset + data_dir: ./data/images_largescale/ + imglist_pth: ./data/benchmark_imglist/aircraft/val_id.txt + batch_size: 200 + shuffle: False + test: + dataset_class: ImglistDataset + data_dir: ./data/images_largescale/ + imglist_pth: ./data/benchmark_imglist/aircraft/test_id.txt + batch_size: 200 + shuffle: False diff --git a/OpenOOD/configs/datasets/aircraft/aircraft_oe.yml b/OpenOOD/configs/datasets/aircraft/aircraft_oe.yml new file mode 100644 index 0000000000000000000000000000000000000000..644e4a6917382bee87dcdbd94ee804bfb0665189 --- /dev/null +++ b/OpenOOD/configs/datasets/aircraft/aircraft_oe.yml @@ -0,0 +1,12 @@ +name: aircraft_oe + +dataset: + name: aircraft_oe + split_names: [train, oe, val, test] + oe: + dataset_class: ImglistDataset + data_dir: ./data/images_largescale/ + imglist_pth: ./data/benchmark_imglist/aircraft/train_oe.txt + batch_size: 32 + shuffle: True + interpolation: bilinear diff --git a/OpenOOD/configs/datasets/aircraft/aircraft_ood.yml b/OpenOOD/configs/datasets/aircraft/aircraft_ood.yml new file mode 100644 index 0000000000000000000000000000000000000000..5532b0b739f60c2cfdafc5a9a2fcf6e3651c9de7 --- /dev/null +++ b/OpenOOD/configs/datasets/aircraft/aircraft_ood.yml @@ -0,0 +1,28 @@ +ood_dataset: + name: aircraft_ood + num_classes: 50 + + dataset_class: ImglistDataset + interpolation: bilinear + batch_size: 64 + shuffle: False + + pre_size: 512 + image_size: 448 + num_workers: '@{num_workers}' + num_gpus: '@{num_gpus}' + num_machines: '@{num_machines}' + split_names: [val, nearood, farood] + val: + data_dir: ./data/images_largescale/ + imglist_pth: ./data/benchmark_imglist/aircraft/val_ood.txt + nearood: + datasets: [hardood] + hard: + data_dir: ./data/images_largescale/ + imglist_pth: ./data/benchmark_imglist/aircraft/test_ood_hard.txt + farood: + datasets: [easyood] + easy: + data_dir: ./data/images_largescale/ + imglist_pth: ./data/benchmark_imglist/aircraft/test_ood_easy.txt diff --git a/OpenOOD/configs/datasets/bronze2/bronze2.yml b/OpenOOD/configs/datasets/bronze2/bronze2.yml new file mode 100644 index 0000000000000000000000000000000000000000..0c7341d33ee417ba3345495439d09196e79f3ea9 --- /dev/null +++ b/OpenOOD/configs/datasets/bronze2/bronze2.yml @@ -0,0 +1,36 @@ +dataset: + name: bronze2 + num_classes: 11 + pre_size: 420 + image_size: 400 + + interpolation: bilinear + normalization_type: imagenet + + num_workers: '@{num_workers}' + num_gpus: '@{num_gpus}' + num_machines: '@{num_machines}' + + split_names: [train, val, test] + + train: + dataset_class: Bronze2ExcelDataset + data_dir: /data/bronze_ID_and_OOD/bronze2NotLine/image_not_line + imglist_pth: /data/bronze_ID_and_OOD/bronze2NotLine/not_line_ding_gui_train_val_test/ding_gui_not_line_train.xlsx + xml_path: /data/bronze_ID_and_OOD/bronze2NotLine/xmls + batch_size: 128 + shuffle: True + val: + dataset_class: Bronze2ExcelDataset + data_dir: /data/bronze_ID_and_OOD/bronze2NotLine/image_not_line + imglist_pth: /data/bronze_ID_and_OOD/bronze2NotLine/not_line_ding_gui_train_val_test/ding_gui_not_line_val.xlsx + xml_path: /data/bronze_ID_and_OOD/bronze2NotLine/xmls + batch_size: 128 + shuffle: False + test: + dataset_class: Bronze2ExcelDataset + data_dir: /data/bronze_ID_and_OOD/bronze2NotLine/image_not_line + imglist_pth: /data/bronze_ID_and_OOD/bronze2NotLine/not_line_ding_gui_train_val_test/ding_gui_not_line_test.xlsx + xml_path: /data/bronze_ID_and_OOD/bronze2NotLine/xmls + batch_size: 128 + shuffle: False diff --git a/OpenOOD/configs/datasets/bronze2/bronze2_ood.yml b/OpenOOD/configs/datasets/bronze2/bronze2_ood.yml new file mode 100644 index 0000000000000000000000000000000000000000..9260bd73bd6548291010ebd5aaec4412319edba4 --- /dev/null +++ b/OpenOOD/configs/datasets/bronze2/bronze2_ood.yml @@ -0,0 +1,62 @@ +ood_dataset: + name: bronze2_ood + num_classes: 11 + + dataset_class: ImglistDataset + interpolation: bilinear + batch_size: 32 + shuffle: False + + pre_size: 256 + image_size: 224 + num_workers: '@{num_workers}' + num_gpus: '@{num_gpus}' + num_machines: '@{num_machines}' + split_names: [val, nearood, midood, farood] + val: + data_dir: /home/zhourixin/OOD_Folder/CODE/other_methods/openOOD_code/OpenOOD/data/images_largescale/ + imglist_pth: /home/zhourixin/OOD_Folder/CODE/other_methods/openOOD_code/OpenOOD/data/benchmark_imglist/imagenet/val_openimage_o.txt + + nearood: + datasets: [imagenet21k_container, imagenet21k_container_refine, bronzeS_containerM, bronzeM_containerS, bronze_Line] + + imagenet21k_container: + data_dir: ./data/images_largescale + imglist_pth: /home/zhourixin/OOD_Folder/CODE/other_methods/openOOD_code/OpenOOD/data/benchmark_imglist/imagenet21k_container/imagenet21k_container_file-list.txt + imagenet21k_container_refine: + data_dir: ./data/images_largescale + imglist_pth: /home/zhourixin/OOD_Folder/CODE/other_methods/openOOD_code/OpenOOD/data/benchmark_imglist/imagenet21k_container_refine/imagenet21k_container_file-list-refine.txt + bronzeS_containerM: + data_dir: ./data/images_largescale + imglist_pth: /home/zhourixin/OOD_Folder/CODE/other_methods/openOOD_code/OpenOOD/data/images_largescale/transfer_dataset/bronze_structure_container_material/bronze_structure_container_material_test.txt + bronzeM_containerS: + data_dir: ./data/images_largescale + imglist_pth: /home/zhourixin/OOD_Folder/CODE/other_methods/openOOD_code/OpenOOD/data/images_largescale/transfer_dataset/container_structure_bronze_material/container_structure_bronze_material_test.txt + bronze_Line: + data_dir: ./data/images_largescale + imglist_pth: /home/zhourixin/OOD_Folder/CODE/other_methods/openOOD_code/OpenOOD/data/images_largescale/bronze_line/bronze2_Line_OOD_list.txt + + midood: + datasets: [ssb_hard, ninco] + + ssb_hard: + data_dir: ./data/images_largescale + imglist_pth: /home/zhourixin/OOD_Folder/CODE/other_methods/openOOD_code/OpenOOD/data/benchmark_imglist/imagenet/test_ssb_hard.txt + ninco: + data_dir: ./data/images_largescale + imglist_pth: /home/zhourixin/OOD_Folder/CODE/other_methods/openOOD_code/OpenOOD/data/benchmark_imglist/imagenet/test_ninco.txt + + + farood: + datasets: [inaturalist, textures, openimageo] + + textures: + data_dir: ./data/images_classic + imglist_pth: /home/zhourixin/OOD_Folder/CODE/other_methods/openOOD_code/OpenOOD/data/benchmark_imglist/imagenet/test_textures.txt + inaturalist: + data_dir: ./data/images_largescale + imglist_pth: /home/zhourixin/OOD_Folder/CODE/other_methods/openOOD_code/OpenOOD/data/benchmark_imglist/imagenet/test_inaturalist.txt + openimageo: + data_dir: ./data/images_largescale + imglist_pth: /home/zhourixin/OOD_Folder/CODE/other_methods/openOOD_code/OpenOOD/data/benchmark_imglist/imagenet/test_openimage_o.txt + diff --git a/OpenOOD/configs/datasets/cifar10/cifar10.yml b/OpenOOD/configs/datasets/cifar10/cifar10.yml new file mode 100644 index 0000000000000000000000000000000000000000..b4e7e624da8ef44259d78273056353eaa28eb229 --- /dev/null +++ b/OpenOOD/configs/datasets/cifar10/cifar10.yml @@ -0,0 +1,33 @@ +dataset: + name: cifar10 + num_classes: 10 + pre_size: 32 + image_size: 32 + + interpolation: bilinear + normalization_type: cifar10 + + num_workers: '@{num_workers}' + num_gpus: '@{num_gpus}' + num_machines: '@{num_machines}' + + split_names: [train, val, test] + + train: + dataset_class: ImglistDataset + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/cifar10/train_cifar10.txt + batch_size: 128 + shuffle: True + val: + dataset_class: ImglistDataset + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/cifar10/val_cifar10.txt + batch_size: 200 + shuffle: False + test: + dataset_class: ImglistDataset + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/cifar10/test_cifar10.txt + batch_size: 200 + shuffle: False diff --git a/OpenOOD/configs/datasets/cifar10/cifar10_double_label.yml b/OpenOOD/configs/datasets/cifar10/cifar10_double_label.yml new file mode 100644 index 0000000000000000000000000000000000000000..bc1e23cadb100de309cd6c1b03f54f6cc4c68ccc --- /dev/null +++ b/OpenOOD/configs/datasets/cifar10/cifar10_double_label.yml @@ -0,0 +1,32 @@ +dataset: + name: cifar10_double_label + interpolation: bilinear + normalization_type: cifar10 + split_names: [train, val, test] + num_classes: 12 # actually it's 10 classes but it has 2 groups + image_size: 32 + pre_size: 32 + num_workers: '@{num_workers}' + num_gpus: '@{num_gpus}' + num_machines: '@{num_machines}' + train: + dataset_class: ImglistDataset + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/cifar10/train_cifar10_mos.txt + batch_size: 128 + shuffle: True + interpolation: bilinear + val: + dataset_class: ImglistDataset + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/cifar10/val_cifar10_mos.txt + batch_size: 128 + shuffle: False + interpolation: bilinear + test: + dataset_class: ImglistDataset + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/cifar10/test_cifar10_mos.txt + batch_size: 128 + shuffle: False + interpolation: bilinear diff --git a/OpenOOD/configs/datasets/cifar10/cifar10_extra.yml b/OpenOOD/configs/datasets/cifar10/cifar10_extra.yml new file mode 100644 index 0000000000000000000000000000000000000000..b1e0513824c89d4fe87d093ff1e012b57b3f16da --- /dev/null +++ b/OpenOOD/configs/datasets/cifar10/cifar10_extra.yml @@ -0,0 +1,37 @@ +dataset: + name: cifar10 + num_classes: 10 + pre_size: 32 + image_size: 32 + + interpolation: bilinear + normalization_type: cifar10 + + num_workers: '@{num_workers}' + num_gpus: '@{num_gpus}' + num_machines: '@{num_machines}' + + split_names: [train, val, test] + + train: + dataset_class: ImglistExtraDataDataset + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/cifar10/train_cifar10.txt + batch_size: 128 + shuffle: True + extra_data_pth: ./data/images_classic/cifar10_extra/stylegan_images.npy + extra_label_pth: ./data/images_classic/cifar10_extra/stylegan_labels.npy + extra_percent: 100 + orig_ratio: 0.8 + val: + dataset_class: ImglistDataset + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/cifar10/val_cifar10.txt + batch_size: 200 + shuffle: False + test: + dataset_class: ImglistDataset + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/cifar10/test_cifar10.txt + batch_size: 200 + shuffle: False diff --git a/OpenOOD/configs/datasets/cifar10/cifar10_fsood.yml b/OpenOOD/configs/datasets/cifar10/cifar10_fsood.yml new file mode 100644 index 0000000000000000000000000000000000000000..c92493ccf70a785f9e1a4585210057939badfe11 --- /dev/null +++ b/OpenOOD/configs/datasets/cifar10/cifar10_fsood.yml @@ -0,0 +1,43 @@ +ood_dataset: + name: cifar10_fsood + num_classes: 10 + + num_workers: '@{num_workers}' + num_gpus: '@{num_gpus}' + num_machines: '@{num_machines}' + + dataset_class: ImglistDataset + batch_size: 128 + shuffle: False + + split_names: [val, nearood, farood, csid] + val: + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/cifar10/val_cifar100.txt + nearood: + datasets: [cifar100, tin] + cifar100: + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/cifar10/test_cifar100.txt + tin: + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/cifar10/test_tin.txt + farood: + datasets: [mnist, svhn, texture, place365] + mnist: + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/cifar10/test_mnist.txt + svhn: + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/cifar10/test_svhn.txt + texture: + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/cifar10/test_texture.txt + place365: + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/cifar10/test_places365.txt + csid: + datasets: [cinic10] + cinic10: + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/cifar10/val_cinic10.txt diff --git a/OpenOOD/configs/datasets/cifar10/cifar10_oe.yml b/OpenOOD/configs/datasets/cifar10/cifar10_oe.yml new file mode 100644 index 0000000000000000000000000000000000000000..db413ec53dd569958a18e3107efbbd9332ad97d0 --- /dev/null +++ b/OpenOOD/configs/datasets/cifar10/cifar10_oe.yml @@ -0,0 +1,12 @@ +name: cifar10_oe + +dataset: + name: cifar10_oe + split_names: [train, oe, val, test] + oe: + dataset_class: ImglistDataset + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/cifar10/train_tin597.txt + batch_size: 256 + shuffle: True + interpolation: bilinear diff --git a/OpenOOD/configs/datasets/cifar10/cifar10_ood.yml b/OpenOOD/configs/datasets/cifar10/cifar10_ood.yml new file mode 100644 index 0000000000000000000000000000000000000000..a31701c34c5d86c3430c3bc5a8449d96eea835dd --- /dev/null +++ b/OpenOOD/configs/datasets/cifar10/cifar10_ood.yml @@ -0,0 +1,38 @@ +ood_dataset: + name: cifar10_ood + num_classes: 10 + + num_workers: '@{num_workers}' + num_gpus: '@{num_gpus}' + num_machines: '@{num_machines}' + + dataset_class: ImglistDataset + batch_size: 128 + shuffle: False + + split_names: [val, nearood, farood] + val: + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/cifar10/val_tin.txt + nearood: + datasets: [cifar100, tin] + cifar100: + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/cifar10/test_cifar100.txt + tin: + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/cifar10/test_tin.txt + farood: + datasets: [mnist, svhn, texture, place365] + mnist: + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/cifar10/test_mnist.txt + svhn: + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/cifar10/test_svhn.txt + texture: + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/cifar10/test_texture.txt + place365: + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/cifar10/test_places365.txt diff --git a/OpenOOD/configs/datasets/cifar100/cifar100.yml b/OpenOOD/configs/datasets/cifar100/cifar100.yml new file mode 100644 index 0000000000000000000000000000000000000000..238ba6fda6c650ee0ef90256672161d361009a9b --- /dev/null +++ b/OpenOOD/configs/datasets/cifar100/cifar100.yml @@ -0,0 +1,33 @@ +dataset: + name: cifar100 + num_classes: 100 + image_size: 32 + pre_size: 32 + + interpolation: bilinear + normalization_type: cifar100 + + num_workers: '@{num_workers}' + num_gpus: '@{num_gpus}' + num_machines: '@{num_machines}' + + split_names: [train, val, test] + + train: + dataset_class: ImglistDataset + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/cifar100/train_cifar100.txt + batch_size: 128 + shuffle: True + val: + dataset_class: ImglistDataset + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/cifar100/val_cifar100.txt + batch_size: 200 + shuffle: False + test: + dataset_class: ImglistDataset + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/cifar100/test_cifar100.txt + batch_size: 200 + shuffle: False diff --git a/OpenOOD/configs/datasets/cifar100/cifar100_double_label.yml b/OpenOOD/configs/datasets/cifar100/cifar100_double_label.yml new file mode 100644 index 0000000000000000000000000000000000000000..7fa4b111a2aea732585dae6153101b4117b5b399 --- /dev/null +++ b/OpenOOD/configs/datasets/cifar100/cifar100_double_label.yml @@ -0,0 +1,32 @@ +dataset: + name: cifar100_double_label + interpolation: bilinear + normalization_type: cifar100 + split_names: [train, val, test] + num_classes: 120 # actually it's 100 classes but it has 20 groups + image_size: 32 + pre_size: 32 + num_workers: '@{num_workers}' + num_gpus: '@{num_gpus}' + num_machines: '@{num_machines}' + train: + dataset_class: ImglistDataset + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/cifar100/train_cifar100_mos.txt + batch_size: 128 + shuffle: True + interpolation: bilinear + val: + dataset_class: ImglistDataset + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/cifar100/val_cifar100_mos.txt + batch_size: 128 + shuffle: False + interpolation: bilinear + test: + dataset_class: ImglistDataset + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/cifar100/test_cifar100_mos.txt + batch_size: 128 + shuffle: False + interpolation: bilinear diff --git a/OpenOOD/configs/datasets/cifar100/cifar100_extra.yml b/OpenOOD/configs/datasets/cifar100/cifar100_extra.yml new file mode 100644 index 0000000000000000000000000000000000000000..7fb043ded729154d48df2d484ea2ee257c8fae42 --- /dev/null +++ b/OpenOOD/configs/datasets/cifar100/cifar100_extra.yml @@ -0,0 +1,37 @@ +dataset: + name: cifar100 + num_classes: 100 + pre_size: 32 + image_size: 32 + + interpolation: bilinear + normalization_type: cifar100 + + num_workers: '@{num_workers}' + num_gpus: '@{num_gpus}' + num_machines: '@{num_machines}' + + split_names: [train, val, test] + + train: + dataset_class: ImglistExtraDataDataset + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/cifar100/train_cifar100.txt + batch_size: 128 + shuffle: True + extra_data_pth: ./data/images_classic/cifar100_extra/stylegan_images.npy + extra_label_pth: ./data/images_classic/cifar100_extra/stylegan_labels.npy + extra_percent: 100 + orig_ratio: 0.8 + val: + dataset_class: ImglistDataset + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/cifar100/val_cifar100.txt + batch_size: 200 + shuffle: False + test: + dataset_class: ImglistDataset + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/cifar100/test_cifar100.txt + batch_size: 200 + shuffle: False diff --git a/OpenOOD/configs/datasets/cifar100/cifar100_fsood.yml b/OpenOOD/configs/datasets/cifar100/cifar100_fsood.yml new file mode 100644 index 0000000000000000000000000000000000000000..db9a33d409252a20d799d97628dce544b56bf2a1 --- /dev/null +++ b/OpenOOD/configs/datasets/cifar100/cifar100_fsood.yml @@ -0,0 +1,43 @@ +ood_dataset: + name: cifar100_ood + num_classes: 100 + + num_workers: '@{num_workers}' + num_gpus: '@{num_gpus}' + num_machines: '@{num_machines}' + + dataset_class: ImglistDataset + batch_size: 128 + shuffle: False + + split_names: [val, nearood, farood, csid] + val: + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/cifar100/val_cifar10.txt + nearood: + datasets: [cifar10, tin] + cifar10: + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/cifar100/test_cifar10.txt + tin: + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/cifar100/test_tin.txt + farood: + datasets: [mnist, svhn, texture, places365] + mnist: + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/cifar100/test_mnist.txt + svhn: + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/cifar100/test_svhn.txt + texture: + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/cifar100/test_texture.txt + places365: + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/cifar100/test_places365.txt + csid: + datasets: [cifar100c] + cifar100c: + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/cifar100/test_cifar100c.txt diff --git a/OpenOOD/configs/datasets/cifar100/cifar100_oe.yml b/OpenOOD/configs/datasets/cifar100/cifar100_oe.yml new file mode 100644 index 0000000000000000000000000000000000000000..e47aa1874e9042c36879d2d91cb34afb2523b074 --- /dev/null +++ b/OpenOOD/configs/datasets/cifar100/cifar100_oe.yml @@ -0,0 +1,12 @@ +name: cifar100_oe + +dataset: + name: cifar100_oe + split_names: [train, oe, val, test] + oe: + dataset_class: ImglistDataset + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/cifar100/train_tin597.txt + batch_size: 256 + shuffle: True + interpolation: bilinear diff --git a/OpenOOD/configs/datasets/cifar100/cifar100_ood.yml b/OpenOOD/configs/datasets/cifar100/cifar100_ood.yml new file mode 100644 index 0000000000000000000000000000000000000000..6c8935d421f525349da69410b53e0d788eafd885 --- /dev/null +++ b/OpenOOD/configs/datasets/cifar100/cifar100_ood.yml @@ -0,0 +1,38 @@ +ood_dataset: + name: cifar100_ood + num_classes: 100 + + num_workers: '@{num_workers}' + num_gpus: '@{num_gpus}' + num_machines: '@{num_machines}' + + dataset_class: ImglistDataset + batch_size: 128 + shuffle: False + + split_names: [val, nearood, farood] + val: + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/cifar100/val_tin.txt + nearood: + datasets: [cifar10, tin] + cifar10: + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/cifar100/test_cifar10.txt + tin: + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/cifar100/test_tin.txt + farood: + datasets: [mnist, svhn, texture, places365] + mnist: + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/cifar100/test_mnist.txt + svhn: + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/cifar100/test_svhn.txt + texture: + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/cifar100/test_texture.txt + places365: + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/cifar100/test_places365.txt diff --git a/OpenOOD/configs/datasets/covid/covid.yml b/OpenOOD/configs/datasets/covid/covid.yml new file mode 100644 index 0000000000000000000000000000000000000000..fb22c671ea2a9412e417dba2ce4a87f323f2463d --- /dev/null +++ b/OpenOOD/configs/datasets/covid/covid.yml @@ -0,0 +1,29 @@ +dataset: + name: covid + split_names: [train, val, test] + num_classes: 2 + image_size: 224 + num_workers: '@{num_workers}' + num_gpus: '@{num_gpus}' + num_machines: '@{num_machines}' + train: + dataset_class: ImglistDataset + data_dir: ./data/covid_images/ + imglist_pth: ./data/imglist/covid/train_bimcv.txt + batch_size: 128 + shuffle: True + interpolation: bilinear + val: + dataset_class: ImglistDataset + data_dir: ./data/covid_images/ + imglist_pth: ./data/imglist/covid/val_bimcv.txt + batch_size: 200 + shuffle: False + interpolation: bilinear + test: + dataset_class: ImglistDataset + data_dir: ./data/covid_images/ + imglist_pth: ./data/imglist/covid/test_bimcv.txt + batch_size: 200 + shuffle: False + interpolation: bilinear diff --git a/OpenOOD/configs/datasets/covid/covid_fsood.yml b/OpenOOD/configs/datasets/covid/covid_fsood.yml new file mode 100644 index 0000000000000000000000000000000000000000..d75222e53f2933c754ab247e34d8c7b3d7a647b4 --- /dev/null +++ b/OpenOOD/configs/datasets/covid/covid_fsood.yml @@ -0,0 +1,47 @@ +ood_dataset: + name: covid_fsood + dataset_class: ImglistDataset + interpolation: bilinear + + batch_size: 20 + shuffle: False + num_classes: 2 + image_size: 224 + num_workers: '@{num_workers}' + num_gpus: '@{num_gpus}' + num_machines: '@{num_machines}' + + split_names: [val, csid, nearood, farood] + val: + data_dir: ./data/covid_images/ + imglist_pth: ./data/imglist/covid/val_ct.txt + csid: + datasets: [actmed, hannover] + actmed: + data_dir: ./data/covid_images/ + imglist_pth: ./data/imglist/covid/test_actmed.txt + hannover: + data_dir: ./data/covid_images/ + imglist_pth: ./data/imglist/covid/test_hannover.txt + nearood: + datasets: [ct, xraybone] + ct: + data_dir: ./data/covid_images/ + imglist_pth: ./data/imglist/covid/test_ct.txt + xraybone: + data_dir: ./data/covid_images/ + imglist_pth: ./data/imglist/covid/test_xraybone.txt + farood: + datasets: [mnist, cifar10, texture, tin] + mnist: + data_dir: ./data/images/ + imglist_pth: ./data/imglist/covid/test_mnist.txt + cifar10: + data_dir: ./data/images/ + imglist_pth: ./data/imglist/covid/test_cifar10.txt + texture: + data_dir: ./data/images/ + imglist_pth: ./data/imglist/covid/test_texture.txt + tin: + data_dir: ./data/images/ + imglist_pth: ./data/imglist/covid/test_tin.txt diff --git a/OpenOOD/configs/datasets/covid/covid_ood.yml b/OpenOOD/configs/datasets/covid/covid_ood.yml new file mode 100644 index 0000000000000000000000000000000000000000..b8a4bd7143fb32be7f3f6c384082126f6a7d487a --- /dev/null +++ b/OpenOOD/configs/datasets/covid/covid_ood.yml @@ -0,0 +1,39 @@ +ood_dataset: + name: covid_ood + dataset_class: ImglistDataset + interpolation: bilinear + + batch_size: 128 + shuffle: False + num_classes: 2 + image_size: 224 + num_workers: '@{num_workers}' + num_gpus: '@{num_gpus}' + num_machines: '@{num_machines}' + + split_names: [val, nearood, farood] + val: + data_dir: ./data/covid_images/ + imglist_pth: ./data/imglist/covid/val_ct.txt + nearood: + datasets: [ct, xraybone] + ct: + data_dir: ./data/covid_images/ + imglist_pth: ./data/imglist/covid/test_ct.txt + xraybone: + data_dir: ./data/covid_images/ + imglist_pth: ./data/imglist/covid/test_xraybone.txt + farood: + datasets: [mnist, cifar10, texture, tin] + mnist: + data_dir: ./data/images/ + imglist_pth: ./data/imglist/covid/test_mnist.txt + cifar10: + data_dir: ./data/images/ + imglist_pth: ./data/imglist/covid/test_cifar10.txt + texture: + data_dir: ./data/images/ + imglist_pth: ./data/imglist/covid/test_texture.txt + tin: + data_dir: ./data/images/ + imglist_pth: ./data/imglist/covid/test_tin.txt diff --git a/OpenOOD/configs/datasets/imagenet/imagenet.yml b/OpenOOD/configs/datasets/imagenet/imagenet.yml new file mode 100644 index 0000000000000000000000000000000000000000..4a033e320bb09373d7e9d04e8e96cc4a5418e008 --- /dev/null +++ b/OpenOOD/configs/datasets/imagenet/imagenet.yml @@ -0,0 +1,33 @@ +dataset: + name: imagenet + num_classes: 1000 + pre_size: 256 + image_size: 224 + + interpolation: bilinear + normalization_type: imagenet + + num_workers: '@{num_workers}' + num_gpus: '@{num_gpus}' + num_machines: '@{num_machines}' + + split_names: [train, val, test] + + train: + dataset_class: ImglistDataset + data_dir: ./data/images_largescale/ + imglist_pth: ./data/benchmark_imglist/imagenet/train_imagenet.txt + batch_size: 128 + shuffle: True + val: + dataset_class: ImglistDataset + data_dir: ./data/images_largescale/ + imglist_pth: ./data/benchmark_imglist/imagenet/val_imagenet.txt + batch_size: 128 + shuffle: False + test: + dataset_class: ImglistDataset + data_dir: ./data/images_largescale/ + imglist_pth: ./data/benchmark_imglist/imagenet/test_imagenet.txt + batch_size: 128 + shuffle: False diff --git a/OpenOOD/configs/datasets/imagenet/imagenet_double_label.yml b/OpenOOD/configs/datasets/imagenet/imagenet_double_label.yml new file mode 100644 index 0000000000000000000000000000000000000000..30d8dd1e98f39134d151ada87a3f0d485b081866 --- /dev/null +++ b/OpenOOD/configs/datasets/imagenet/imagenet_double_label.yml @@ -0,0 +1,32 @@ +dataset: + name: imagenet_double_label + interpolation: bilinear + normalization_type: imagenet + split_names: [train, val, test] + num_classes: 1008 # actually it's 1000 classes but it has 8 groups + image_size: 224 + pre_size: 256 + num_workers: '@{num_workers}' + num_gpus: '@{num_gpus}' + num_machines: '@{num_machines}' + train: + dataset_class: ImglistDataset + data_dir: ./data/images_largescale/ + imglist_pth: ./data/benchmark_imglist/imagenet/train_imagenet_mos.txt + batch_size: 256 + shuffle: True + interpolation: bilinear + val: + dataset_class: ImglistDataset + data_dir: ./data/images_largescale/ + imglist_pth: ./data/benchmark_imglist/imagenet/val_imagenet_mos.txt + batch_size: 256 + shuffle: False + interpolation: bilinear + test: + dataset_class: ImglistDataset + data_dir: ./data/images_largescale/ + imglist_pth: ./data/benchmark_imglist/imagenet/test_imagenet_mos.txt + batch_size: 256 + shuffle: False + interpolation: bilinear diff --git a/OpenOOD/configs/datasets/imagenet/imagenet_double_label_fsood.yml b/OpenOOD/configs/datasets/imagenet/imagenet_double_label_fsood.yml new file mode 100644 index 0000000000000000000000000000000000000000..1dd88e04aec37d6d4c0423e4d4f85c024d00f620 --- /dev/null +++ b/OpenOOD/configs/datasets/imagenet/imagenet_double_label_fsood.yml @@ -0,0 +1,48 @@ +ood_dataset: + name: imagenet_fsood + num_classes: 200 + + dataset_class: ImglistDataset + interpolation: bilinear + batch_size: 256 + shuffle: False + + pre_size: 256 + image_size: 224 + num_workers: '@{num_workers}' + num_gpus: '@{num_gpus}' + num_machines: '@{num_machines}' + split_names: [val, nearood, farood, csid] + val: + data_dir: ./data/images_largescale/ + imglist_pth: ./data/benchmark_imglist/imagenet/val_openimage_o.txt + nearood: + datasets: [ssb_hard, ninco] + ssb_hard: + data_dir: ./data/images_largescale/ + imglist_pth: ./data/benchmark_imglist/imagenet/test_ssb_hard.txt + ninco: + data_dir: ./data/images_largescale/ + imglist_pth: ./data/benchmark_imglist/imagenet/test_ninco.txt + farood: + datasets: [inaturalist, textures, openimageo] + textures: + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/imagenet/test_textures.txt + inaturalist: + data_dir: ./data/images_largescale/ + imglist_pth: ./data/benchmark_imglist/imagenet/test_inaturalist.txt + openimageo: + data_dir: ./data/images_largescale/ + imglist_pth: ./data/benchmark_imglist/imagenet/test_openimage_o.txt + csid: + datasets: [imagenetv2, imagenetc, imagenetr] + imagenetv2: + data_dir: ./data/images_largescale/ + imglist_pth: ./data/benchmark_imglist/imagenet/test_imagenet_v2_mos.txt + imagenetc: + data_dir: ./data/images_largescale/ + imglist_pth: ./data/benchmark_imglist/imagenet/test_imagenet_c_mos.txt + imagenetr: + data_dir: ./data/images_largescale/ + imglist_pth: ./data/benchmark_imglist/imagenet/test_imagenet_r_mos.txt diff --git a/OpenOOD/configs/datasets/imagenet/imagenet_fsood.yml b/OpenOOD/configs/datasets/imagenet/imagenet_fsood.yml new file mode 100644 index 0000000000000000000000000000000000000000..bf2fb77efc3f88b61a891a2fe7db78ff6cb1b9fe --- /dev/null +++ b/OpenOOD/configs/datasets/imagenet/imagenet_fsood.yml @@ -0,0 +1,48 @@ +ood_dataset: + name: imagenet_ood + num_classes: 1000 + + dataset_class: ImglistDataset + interpolation: bilinear + batch_size: 32 + shuffle: False + + pre_size: 256 + image_size: 224 + num_workers: '@{num_workers}' + num_gpus: '@{num_gpus}' + num_machines: '@{num_machines}' + split_names: [val, nearood, farood, csid] + val: + data_dir: ./data/images_largescale/ + imglist_pth: ./data/benchmark_imglist/imagenet/val_openimage_o.txt + nearood: + datasets: [ssb_hard, ninco] + ssb_hard: + data_dir: ./data/images_largescale/ + imglist_pth: ./data/benchmark_imglist/imagenet/test_ssb_hard.txt + ninco: + data_dir: ./data/images_largescale/ + imglist_pth: ./data/benchmark_imglist/imagenet/test_ninco.txt + farood: + datasets: [inaturalist, textures, openimageo] + textures: + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/imagenet/test_textures.txt + inaturalist: + data_dir: ./data/images_largescale/ + imglist_pth: ./data/benchmark_imglist/imagenet/test_inaturalist.txt + openimageo: + data_dir: ./data/images_largescale/ + imglist_pth: ./data/benchmark_imglist/imagenet/test_openimage_o.txt + csid: + datasets: [imagenetv2, imagenetc, imagenetr] + imagenetv2: + data_dir: ./data/images_largescale/ + imglist_pth: ./data/benchmark_imglist/imagenet/test_imagenet_v2.txt + imagenetc: + data_dir: ./data/images_largescale/ + imglist_pth: ./data/benchmark_imglist/imagenet/test_imagenet_c.txt + imagenetr: + data_dir: ./data/images_largescale/ + imglist_pth: ./data/benchmark_imglist/imagenet/test_imagenet_r.txt diff --git a/OpenOOD/configs/datasets/imagenet/imagenet_ood.yml b/OpenOOD/configs/datasets/imagenet/imagenet_ood.yml new file mode 100644 index 0000000000000000000000000000000000000000..928c49a5fed44b8bbdcd47c3eeef68b820cd796d --- /dev/null +++ b/OpenOOD/configs/datasets/imagenet/imagenet_ood.yml @@ -0,0 +1,37 @@ +ood_dataset: + name: imagenet_ood + num_classes: 1000 + + dataset_class: ImglistDataset + interpolation: bilinear + batch_size: 32 + shuffle: False + + pre_size: 256 + image_size: 224 + num_workers: '@{num_workers}' + num_gpus: '@{num_gpus}' + num_machines: '@{num_machines}' + split_names: [val, nearood, farood] + val: + data_dir: ./data/images_largescale/ + imglist_pth: ./data/benchmark_imglist/imagenet/val_openimage_o.txt + nearood: + datasets: [ssb_hard, ninco] + ssb_hard: + data_dir: ./data/images_largescale/ + imglist_pth: ./data/benchmark_imglist/imagenet/test_ssb_hard.txt + ninco: + data_dir: ./data/images_largescale/ + imglist_pth: ./data/benchmark_imglist/imagenet/test_ninco.txt + farood: + datasets: [inaturalist, textures, openimageo] + textures: + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/imagenet/test_textures.txt + inaturalist: + data_dir: ./data/images_largescale/ + imglist_pth: ./data/benchmark_imglist/imagenet/test_inaturalist.txt + openimageo: + data_dir: ./data/images_largescale/ + imglist_pth: ./data/benchmark_imglist/imagenet/test_openimage_o.txt diff --git a/OpenOOD/configs/datasets/imagenet200/imagenet200.yml b/OpenOOD/configs/datasets/imagenet200/imagenet200.yml new file mode 100644 index 0000000000000000000000000000000000000000..06fe5799b92f04618e8330ab2fcb760f74bd2512 --- /dev/null +++ b/OpenOOD/configs/datasets/imagenet200/imagenet200.yml @@ -0,0 +1,33 @@ +dataset: + name: imagenet200 + num_classes: 200 + pre_size: 256 + image_size: 224 + + interpolation: bilinear + normalization_type: imagenet + + num_workers: '@{num_workers}' + num_gpus: '@{num_gpus}' + num_machines: '@{num_machines}' + + split_names: [train, val, test] + + train: + dataset_class: ImglistDataset + data_dir: ./data/images_largescale/ + imglist_pth: ./data/benchmark_imglist/imagenet200/train_imagenet200.txt + batch_size: 256 + shuffle: True + val: + dataset_class: ImglistDataset + data_dir: ./data/images_largescale/ + imglist_pth: ./data/benchmark_imglist/imagenet200/val_imagenet200.txt + batch_size: 256 + shuffle: False + test: + dataset_class: ImglistDataset + data_dir: ./data/images_largescale/ + imglist_pth: ./data/benchmark_imglist/imagenet200/test_imagenet200.txt + batch_size: 256 + shuffle: False diff --git a/OpenOOD/configs/datasets/imagenet200/imagenet200_double_label.yml b/OpenOOD/configs/datasets/imagenet200/imagenet200_double_label.yml new file mode 100644 index 0000000000000000000000000000000000000000..48c8e3292727694a4ba8b0241fb70eee5f066431 --- /dev/null +++ b/OpenOOD/configs/datasets/imagenet200/imagenet200_double_label.yml @@ -0,0 +1,32 @@ +dataset: + name: imagenet200_double_label + interpolation: bilinear + normalization_type: imagenet + split_names: [train, val, test] + num_classes: 206 # actually it's 200 classes but it has 6 groups + image_size: 224 + pre_size: 256 + num_workers: '@{num_workers}' + num_gpus: '@{num_gpus}' + num_machines: '@{num_machines}' + train: + dataset_class: ImglistDataset + data_dir: ./data/images_largescale/ + imglist_pth: ./data/benchmark_imglist/imagenet200/train_imagenet200_mos.txt + batch_size: 256 + shuffle: True + interpolation: bilinear + val: + dataset_class: ImglistDataset + data_dir: ./data/images_largescale/ + imglist_pth: ./data/benchmark_imglist/imagenet200/val_imagenet200_mos.txt + batch_size: 256 + shuffle: False + interpolation: bilinear + test: + dataset_class: ImglistDataset + data_dir: ./data/images_largescale/ + imglist_pth: ./data/benchmark_imglist/imagenet200/test_imagenet200_mos.txt + batch_size: 256 + shuffle: False + interpolation: bilinear diff --git a/OpenOOD/configs/datasets/imagenet200/imagenet200_double_label_fsood.yml b/OpenOOD/configs/datasets/imagenet200/imagenet200_double_label_fsood.yml new file mode 100644 index 0000000000000000000000000000000000000000..7dccaf6ed151a03e1fed0ecd9e5dfc4bddaabdee --- /dev/null +++ b/OpenOOD/configs/datasets/imagenet200/imagenet200_double_label_fsood.yml @@ -0,0 +1,48 @@ +ood_dataset: + name: imagenet200_fsood + num_classes: 200 + + dataset_class: ImglistDataset + interpolation: bilinear + batch_size: 256 + shuffle: False + + pre_size: 256 + image_size: 224 + num_workers: '@{num_workers}' + num_gpus: '@{num_gpus}' + num_machines: '@{num_machines}' + split_names: [val, nearood, farood, csid] + val: + data_dir: ./data/images_largescale/ + imglist_pth: ./data/benchmark_imglist/imagenet200/val_openimage_o.txt + nearood: + datasets: [ssb_hard, ninco] + ssb_hard: + data_dir: ./data/images_largescale/ + imglist_pth: ./data/benchmark_imglist/imagenet/test_ssb_hard.txt + ninco: + data_dir: ./data/images_largescale/ + imglist_pth: ./data/benchmark_imglist/imagenet/test_ninco.txt + farood: + datasets: [inaturalist, textures, openimageo] + textures: + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/imagenet/test_textures.txt + inaturalist: + data_dir: ./data/images_largescale/ + imglist_pth: ./data/benchmark_imglist/imagenet/test_inaturalist.txt + openimageo: + data_dir: ./data/images_largescale/ + imglist_pth: ./data/benchmark_imglist/imagenet/test_openimage_o.txt + csid: + datasets: [imagenetv2, imagenetc, imagenetr] + imagenetv2: + data_dir: ./data/images_largescale/ + imglist_pth: ./data/benchmark_imglist/imagenet200/test_imagenet200_v2_mos.txt + imagenetc: + data_dir: ./data/images_largescale/ + imglist_pth: ./data/benchmark_imglist/imagenet200/test_imagenet200_c_mos.txt + imagenetr: + data_dir: ./data/images_largescale/ + imglist_pth: ./data/benchmark_imglist/imagenet200/test_imagenet200_r_mos.txt diff --git a/OpenOOD/configs/datasets/imagenet200/imagenet200_fsood.yml b/OpenOOD/configs/datasets/imagenet200/imagenet200_fsood.yml new file mode 100644 index 0000000000000000000000000000000000000000..1b5f1cc1631906a6000bbeee73232417ee0f4afe --- /dev/null +++ b/OpenOOD/configs/datasets/imagenet200/imagenet200_fsood.yml @@ -0,0 +1,48 @@ +ood_dataset: + name: imagenet200_fsood + num_classes: 200 + + dataset_class: ImglistDataset + interpolation: bilinear + batch_size: 256 + shuffle: False + + pre_size: 256 + image_size: 224 + num_workers: '@{num_workers}' + num_gpus: '@{num_gpus}' + num_machines: '@{num_machines}' + split_names: [val, nearood, farood, csid] + val: + data_dir: ./data/images_largescale/ + imglist_pth: ./data/benchmark_imglist/imagenet200/val_openimage_o.txt + nearood: + datasets: [ssb_hard, ninco] + ssb_hard: + data_dir: ./data/images_largescale/ + imglist_pth: ./data/benchmark_imglist/imagenet/test_ssb_hard.txt + ninco: + data_dir: ./data/images_largescale/ + imglist_pth: ./data/benchmark_imglist/imagenet/test_ninco.txt + farood: + datasets: [inaturalist, textures, openimageo] + textures: + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/imagenet/test_textures.txt + inaturalist: + data_dir: ./data/images_largescale/ + imglist_pth: ./data/benchmark_imglist/imagenet/test_inaturalist.txt + openimageo: + data_dir: ./data/images_largescale/ + imglist_pth: ./data/benchmark_imglist/imagenet/test_openimage_o.txt + csid: + datasets: [imagenetv2, imagenetc, imagenetr] + imagenetv2: + data_dir: ./data/images_largescale/ + imglist_pth: ./data/benchmark_imglist/imagenet200/test_imagenet200_v2.txt + imagenetc: + data_dir: ./data/images_largescale/ + imglist_pth: ./data/benchmark_imglist/imagenet200/test_imagenet200_c.txt + imagenetr: + data_dir: ./data/images_largescale/ + imglist_pth: ./data/benchmark_imglist/imagenet200/test_imagenet200_r.txt diff --git a/OpenOOD/configs/datasets/imagenet200/imagenet200_oe.yml b/OpenOOD/configs/datasets/imagenet200/imagenet200_oe.yml new file mode 100644 index 0000000000000000000000000000000000000000..3c4edfb701357e31dcd01bf79d6a805e023fe117 --- /dev/null +++ b/OpenOOD/configs/datasets/imagenet200/imagenet200_oe.yml @@ -0,0 +1,12 @@ +name: imagenet200_oe + +dataset: + name: imagenet200_oe + split_names: [train, oe, val, test] + oe: + dataset_class: ImglistDataset + data_dir: ./data/images_largescale/ + imglist_pth: ./data/benchmark_imglist/imagenet200/train_imagenet800.txt + batch_size: 256 + shuffle: True + interpolation: bilinear diff --git a/OpenOOD/configs/datasets/imagenet200/imagenet200_ood.yml b/OpenOOD/configs/datasets/imagenet200/imagenet200_ood.yml new file mode 100644 index 0000000000000000000000000000000000000000..0db7c77fb2493869518ede765d6d30b4d34ea9e2 --- /dev/null +++ b/OpenOOD/configs/datasets/imagenet200/imagenet200_ood.yml @@ -0,0 +1,37 @@ +ood_dataset: + name: imagenet200_ood + num_classes: 200 + + dataset_class: ImglistDataset + interpolation: bilinear + batch_size: 256 + shuffle: False + + pre_size: 256 + image_size: 224 + num_workers: '@{num_workers}' + num_gpus: '@{num_gpus}' + num_machines: '@{num_machines}' + split_names: [val, nearood, farood] + val: + data_dir: ./data/images_largescale/ + imglist_pth: ./data/benchmark_imglist/imagenet200/val_openimage_o.txt + nearood: + datasets: [ssb_hard, ninco] + ssb_hard: + data_dir: ./data/images_largescale/ + imglist_pth: ./data/benchmark_imglist/imagenet/test_ssb_hard.txt + ninco: + data_dir: ./data/images_largescale/ + imglist_pth: ./data/benchmark_imglist/imagenet/test_ninco.txt + farood: + datasets: [inaturalist, textures, openimageo] + textures: + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/imagenet/test_textures.txt + inaturalist: + data_dir: ./data/images_largescale/ + imglist_pth: ./data/benchmark_imglist/imagenet/test_inaturalist.txt + openimageo: + data_dir: ./data/images_largescale/ + imglist_pth: ./data/benchmark_imglist/imagenet/test_openimage_o.txt diff --git a/OpenOOD/configs/datasets/mnist/mnist.yml b/OpenOOD/configs/datasets/mnist/mnist.yml new file mode 100644 index 0000000000000000000000000000000000000000..13884d9c2488febebfcb45fd5010f9e78a39ba71 --- /dev/null +++ b/OpenOOD/configs/datasets/mnist/mnist.yml @@ -0,0 +1,33 @@ +dataset: + name: mnist + num_classes: 10 + image_size: 28 + pre_size: 28 + + interpolation: bilinear + normalization_type: mnist + + num_workers: '@{num_workers}' + num_gpus: '@{num_gpus}' + num_machines: '@{num_machines}' + + split_names: [train, val, test] + + train: + dataset_class: ImglistDataset + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/mnist/train_mnist.txt + batch_size: 128 + shuffle: True + val: + dataset_class: ImglistDataset + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/mnist/val_mnist.txt + batch_size: 200 + shuffle: False + test: + dataset_class: ImglistDataset + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/mnist/test_mnist.txt + batch_size: 200 + shuffle: False diff --git a/OpenOOD/configs/datasets/mnist/mnist_fsood.yml b/OpenOOD/configs/datasets/mnist/mnist_fsood.yml new file mode 100644 index 0000000000000000000000000000000000000000..1e07ecf54af8a384f7b566c4a884254e730920fc --- /dev/null +++ b/OpenOOD/configs/datasets/mnist/mnist_fsood.yml @@ -0,0 +1,43 @@ +ood_dataset: + name: mnist_fsood + num_classes: 10 + + num_workers: '@{num_workers}' + num_gpus: '@{num_gpus}' + num_machines: '@{num_machines}' + + dataset_class: ImglistDataset + batch_size: 128 + shuffle: False + + split_names: [val, nearood, farood, csid] + val: + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/mnist/val_mnist.txt + nearood: + datasets: [notmnist, fashionmnist] + notmnist: + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/mnist/test_notmnist.txt + fashionmnist: + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/mnist/test_fashionmnist.txt + farood: + datasets: [texture, cifar10, tin, places365] + texture: + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/mnist/test_texture.txt + cifar10: + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/mnist/test_cifar10.txt + tin: + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/mnist/test_tin.txt + places365: + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/mnist/test_places365.txt + csid: + datasets: [svhn] + svhn: + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/mnist/test_svhn.txt diff --git a/OpenOOD/configs/datasets/mnist/mnist_ood.yml b/OpenOOD/configs/datasets/mnist/mnist_ood.yml new file mode 100644 index 0000000000000000000000000000000000000000..73a980848ac11abe9d39fab5bd87763d602ad965 --- /dev/null +++ b/OpenOOD/configs/datasets/mnist/mnist_ood.yml @@ -0,0 +1,38 @@ +ood_dataset: + name: mnist_ood + num_classes: 10 + + num_workers: '@{num_workers}' + num_gpus: '@{num_gpus}' + num_machines: '@{num_machines}' + + dataset_class: ImglistDataset + batch_size: 128 + shuffle: False + + split_names: [val, nearood, farood] + val: + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/mnist/val_notmnist.txt + nearood: + datasets: [notmnist, fashionmnist] + notmnist: + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/mnist/test_notmnist.txt + fashionmnist: + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/mnist/test_fashionmnist.txt + farood: + datasets: [texture, cifar10, tin, places365] + texture: + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/mnist/test_texture.txt + cifar10: + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/mnist/test_cifar10.txt + tin: + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/mnist/test_tin.txt + places365: + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/mnist/test_places365.txt diff --git a/OpenOOD/configs/datasets/mvtec/bottle.yml b/OpenOOD/configs/datasets/mvtec/bottle.yml new file mode 100644 index 0000000000000000000000000000000000000000..23289659f33070f0c60a772ca9fb501a3b8a009b --- /dev/null +++ b/OpenOOD/configs/datasets/mvtec/bottle.yml @@ -0,0 +1,52 @@ +dataset: + name: bottle + num_classes: 2 + pre_size: 256 + image_size: 256 + + interpolation: bilinear + normalization_type: cifar10 + + num_workers: '@{num_workers}' + num_gpus: '@{num_gpus}' + num_machines: '@{num_machines}' + + split_names: [train, test, val] + + train: + dataset_class: ImglistDataset + data_dir: ./data/images/ + interpolation: bilinear + imglist_pth: ./data/benchmark_imglist/mvtecList/bottle_train_good.txt + batch_size: 2 + shuffle: True + test: + dataset_class: ImglistDataset + data_dir: ./data/images/ + interpolation: bilinear + imglist_pth: ./data/benchmark_imglist/mvtecList/bottle_test_id.txt + batch_size: 1 + shuffle: False + val: + dataset_class: ImglistDataset + data_dir: ./data/images/ + interpolation: bilinear + imglist_pth: ./data/benchmark_imglist/mvtecList/bottle_test_id.txt + batch_size: 1 + shuffle: False + +ood_dataset: + name: bottle_ood + num_classes: 2 + image_size: 256 + num_workers: 4 + + dataset_class: ImglistDataset + interpolation: bilinear + batch_size: 1 + shuffle: False + + split_names: [val] + val: + data_dir: ./data/images/ + imglist_pth: ./data/benchmark_imglist/mvtecList/bottle_test.txt diff --git a/OpenOOD/configs/datasets/mvtec/cable.yml b/OpenOOD/configs/datasets/mvtec/cable.yml new file mode 100644 index 0000000000000000000000000000000000000000..16e0c90b1c47819853ee795b700f30ce7807f40b --- /dev/null +++ b/OpenOOD/configs/datasets/mvtec/cable.yml @@ -0,0 +1,52 @@ +dataset: + name: cable + num_classes: 2 + pre_size: 256 + image_size: 256 + + interpolation: bilinear + normalization_type: cifar10 + + num_workers: '@{num_workers}' + num_gpus: '@{num_gpus}' + num_machines: '@{num_machines}' + + split_names: [train, test, val] + + train: + dataset_class: ImglistDataset + data_dir: ./data/images/ + interpolation: bilinear + imglist_pth: ./data/benchmark_imglist/mvtecList/cable_train_good.txt + batch_size: 2 + shuffle: True + test: + dataset_class: ImglistDataset + data_dir: ./data/images/ + interpolation: bilinear + imglist_pth: ./data/benchmark_imglist/mvtecList/cable_test_id.txt + batch_size: 1 + shuffle: False + val: + dataset_class: ImglistDataset + data_dir: ./data/images/ + interpolation: bilinear + imglist_pth: ./data/benchmark_imglist/mvtecList/cable_test_id.txt + batch_size: 1 + shuffle: False + +ood_dataset: + name: cable_ood + num_classes: 2 + image_size: 256 + num_workers: 4 + + dataset_class: ImglistDataset + interpolation: bilinear + batch_size: 1 + shuffle: False + + split_names: [val] + val: + data_dir: ./data/images/ + imglist_pth: ./data/benchmark_imglist/mvtecList/cable_test.txt diff --git a/OpenOOD/configs/datasets/osr_cifar50/cifar50_seed1.yml b/OpenOOD/configs/datasets/osr_cifar50/cifar50_seed1.yml new file mode 100644 index 0000000000000000000000000000000000000000..ef54e476b87302280e2599068f0e11f269904843 --- /dev/null +++ b/OpenOOD/configs/datasets/osr_cifar50/cifar50_seed1.yml @@ -0,0 +1,33 @@ +dataset: + name: cifar50_seed1 + num_classes: 50 + pre_size: 32 + image_size: 32 + + interpolation: bilinear + normalization_type: cifar100 + + num_workers: '@{num_workers}' + num_gpus: '@{num_gpus}' + num_machines: '@{num_machines}' + + split_names: [train, val, test] + + train: + dataset_class: ImglistDataset + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/osr_cifar50/train/train_cifar100_50_seed1.txt + batch_size: 128 + shuffle: True + val: + dataset_class: ImglistDataset + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/osr_cifar50/val/val_cifar100_50_seed1.txt + batch_size: 200 + shuffle: False + test: + dataset_class: ImglistDataset + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/osr_cifar50/test/test_cifar100_50_id_seed1.txt + batch_size: 200 + shuffle: False diff --git a/OpenOOD/configs/datasets/osr_cifar50/cifar50_seed1_osr.yml b/OpenOOD/configs/datasets/osr_cifar50/cifar50_seed1_osr.yml new file mode 100644 index 0000000000000000000000000000000000000000..7bb218410a0fec4bffcad49a628c4d2c9b436c0e --- /dev/null +++ b/OpenOOD/configs/datasets/osr_cifar50/cifar50_seed1_osr.yml @@ -0,0 +1,23 @@ +ood_dataset: + name: cifar50_seed1_osr + num_classes: 50 + pre_size: 32 + image_size: 32 + + num_workers: '@{num_workers}' + num_gpus: '@{num_gpus}' + num_machines: '@{num_machines}' + + dataset_class: ImglistDataset + batch_size: 128 + shuffle: False + + split_names: [val, osr] + val: + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/osr_cifar50/test/test_cifar100_50_id_seed1.txt + osr: + datasets: [cifar50] + cifar50: + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/osr_cifar50/test/test_cifar100_50_ood_seed1.txt diff --git a/OpenOOD/configs/datasets/osr_cifar50/cifar50_seed2.yml b/OpenOOD/configs/datasets/osr_cifar50/cifar50_seed2.yml new file mode 100644 index 0000000000000000000000000000000000000000..5fd50428531c10b5f8672968d7f114734723eab1 --- /dev/null +++ b/OpenOOD/configs/datasets/osr_cifar50/cifar50_seed2.yml @@ -0,0 +1,33 @@ +dataset: + name: cifar50_seed2 + num_classes: 50 + pre_size: 32 + image_size: 32 + + interpolation: bilinear + normalization_type: cifar100 + + num_workers: '@{num_workers}' + num_gpus: '@{num_gpus}' + num_machines: '@{num_machines}' + + split_names: [train, val, test] + + train: + dataset_class: ImglistDataset + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/osr_cifar50/train/train_cifar100_50_seed2.txt + batch_size: 128 + shuffle: True + val: + dataset_class: ImglistDataset + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/osr_cifar50/val/val_cifar100_50_seed2.txt + batch_size: 200 + shuffle: False + test: + dataset_class: ImglistDataset + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/osr_cifar50/test/test_cifar100_50_id_seed2.txt + batch_size: 200 + shuffle: False diff --git a/OpenOOD/configs/datasets/osr_cifar50/cifar50_seed2_osr.yml b/OpenOOD/configs/datasets/osr_cifar50/cifar50_seed2_osr.yml new file mode 100644 index 0000000000000000000000000000000000000000..29dd2e612be8e14da0779df465b294b24abcfc08 --- /dev/null +++ b/OpenOOD/configs/datasets/osr_cifar50/cifar50_seed2_osr.yml @@ -0,0 +1,23 @@ +ood_dataset: + name: cifar50_seed2_osr + num_classes: 50 + pre_size: 32 + image_size: 32 + + num_workers: '@{num_workers}' + num_gpus: '@{num_gpus}' + num_machines: '@{num_machines}' + + dataset_class: ImglistDataset + batch_size: 128 + shuffle: False + + split_names: [val, osr] + val: + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/osr_cifar50/test/test_cifar100_50_id_seed2.txt + osr: + datasets: [cifar50] + cifar50: + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/osr_cifar50/test/test_cifar100_50_ood_seed2.txt diff --git a/OpenOOD/configs/datasets/osr_cifar50/cifar50_seed3.yml b/OpenOOD/configs/datasets/osr_cifar50/cifar50_seed3.yml new file mode 100644 index 0000000000000000000000000000000000000000..f7e5d11b53e50cd83105073ccfcfbef958dfa604 --- /dev/null +++ b/OpenOOD/configs/datasets/osr_cifar50/cifar50_seed3.yml @@ -0,0 +1,33 @@ +dataset: + name: cifar50_seed3 + num_classes: 50 + pre_size: 32 + image_size: 32 + + interpolation: bilinear + normalization_type: cifar100 + + num_workers: '@{num_workers}' + num_gpus: '@{num_gpus}' + num_machines: '@{num_machines}' + + split_names: [train, val, test] + + train: + dataset_class: ImglistDataset + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/osr_cifar50/train/train_cifar100_50_seed3.txt + batch_size: 128 + shuffle: True + val: + dataset_class: ImglistDataset + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/osr_cifar50/val/val_cifar100_50_seed3.txt + batch_size: 200 + shuffle: False + test: + dataset_class: ImglistDataset + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/osr_cifar50/test/test_cifar100_50_id_seed3.txt + batch_size: 200 + shuffle: False diff --git a/OpenOOD/configs/datasets/osr_cifar50/cifar50_seed3_osr.yml b/OpenOOD/configs/datasets/osr_cifar50/cifar50_seed3_osr.yml new file mode 100644 index 0000000000000000000000000000000000000000..f0f942f2bef74f75b1100a370f4ec7b237808b2c --- /dev/null +++ b/OpenOOD/configs/datasets/osr_cifar50/cifar50_seed3_osr.yml @@ -0,0 +1,23 @@ +ood_dataset: + name: cifar50_seed3_osr + num_classes: 50 + pre_size: 32 + image_size: 32 + + num_workers: '@{num_workers}' + num_gpus: '@{num_gpus}' + num_machines: '@{num_machines}' + + dataset_class: ImglistDataset + batch_size: 128 + shuffle: False + + split_names: [val, osr] + val: + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/osr_cifar50/test/test_cifar100_50_id_seed3.txt + osr: + datasets: [cifar50] + cifar50: + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/osr_cifar50/test/test_cifar100_50_ood_seed3.txt diff --git a/OpenOOD/configs/datasets/osr_cifar50/cifar50_seed4.yml b/OpenOOD/configs/datasets/osr_cifar50/cifar50_seed4.yml new file mode 100644 index 0000000000000000000000000000000000000000..99c2a78c039fffdf1705ec9d9eb146898c4a5a2e --- /dev/null +++ b/OpenOOD/configs/datasets/osr_cifar50/cifar50_seed4.yml @@ -0,0 +1,33 @@ +dataset: + name: cifar50_seed4 + num_classes: 50 + pre_size: 32 + image_size: 32 + + interpolation: bilinear + normalization_type: cifar100 + + num_workers: '@{num_workers}' + num_gpus: '@{num_gpus}' + num_machines: '@{num_machines}' + + split_names: [train, val, test] + + train: + dataset_class: ImglistDataset + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/osr_cifar50/train/train_cifar100_50_seed4.txt + batch_size: 128 + shuffle: True + val: + dataset_class: ImglistDataset + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/osr_cifar50/val/val_cifar100_50_seed4.txt + batch_size: 200 + shuffle: False + test: + dataset_class: ImglistDataset + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/osr_cifar50/test/test_cifar100_50_id_seed4.txt + batch_size: 200 + shuffle: False diff --git a/OpenOOD/configs/datasets/osr_cifar50/cifar50_seed4_osr.yml b/OpenOOD/configs/datasets/osr_cifar50/cifar50_seed4_osr.yml new file mode 100644 index 0000000000000000000000000000000000000000..a3dac495758c72bbf5d39f727a1d6c8b8812d3f6 --- /dev/null +++ b/OpenOOD/configs/datasets/osr_cifar50/cifar50_seed4_osr.yml @@ -0,0 +1,23 @@ +ood_dataset: + name: cifar50_seed4_osr + num_classes: 50 + pre_size: 32 + image_size: 32 + + num_workers: '@{num_workers}' + num_gpus: '@{num_gpus}' + num_machines: '@{num_machines}' + + dataset_class: ImglistDataset + batch_size: 128 + shuffle: False + + split_names: [val, osr] + val: + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/osr_cifar50/test/test_cifar100_50_id_seed4.txt + osr: + datasets: [cifar50] + cifar50: + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/osr_cifar50/test/test_cifar100_50_ood_seed4.txt diff --git a/OpenOOD/configs/datasets/osr_cifar50/cifar50_seed5.yml b/OpenOOD/configs/datasets/osr_cifar50/cifar50_seed5.yml new file mode 100644 index 0000000000000000000000000000000000000000..d93d1dfab0a5163ac7f98e2bf196ddda1d974022 --- /dev/null +++ b/OpenOOD/configs/datasets/osr_cifar50/cifar50_seed5.yml @@ -0,0 +1,33 @@ +dataset: + name: cifar50_seed5 + num_classes: 50 + pre_size: 32 + image_size: 32 + + interpolation: bilinear + normalization_type: cifar100 + + num_workers: '@{num_workers}' + num_gpus: '@{num_gpus}' + num_machines: '@{num_machines}' + + split_names: [train, val, test] + + train: + dataset_class: ImglistDataset + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/osr_cifar50/train/train_cifar100_50_seed5.txt + batch_size: 128 + shuffle: True + val: + dataset_class: ImglistDataset + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/osr_cifar50/val/val_cifar100_50_seed5.txt + batch_size: 200 + shuffle: False + test: + dataset_class: ImglistDataset + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/osr_cifar50/test/test_cifar100_50_id_seed5.txt + batch_size: 200 + shuffle: False diff --git a/OpenOOD/configs/datasets/osr_cifar50/cifar50_seed5_osr.yml b/OpenOOD/configs/datasets/osr_cifar50/cifar50_seed5_osr.yml new file mode 100644 index 0000000000000000000000000000000000000000..cff3e2c185fed895ddc01c1ee49711f748b1baa6 --- /dev/null +++ b/OpenOOD/configs/datasets/osr_cifar50/cifar50_seed5_osr.yml @@ -0,0 +1,23 @@ +ood_dataset: + name: cifar50_seed5_osr + num_classes: 50 + pre_size: 32 + image_size: 32 + + num_workers: '@{num_workers}' + num_gpus: '@{num_gpus}' + num_machines: '@{num_machines}' + + dataset_class: ImglistDataset + batch_size: 128 + shuffle: False + + split_names: [val, osr] + val: + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/osr_cifar50/test/test_cifar100_50_id_seed5.txt + osr: + datasets: [cifar50] + cifar50: + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/osr_cifar50/test/test_cifar100_50_ood_seed5.txt diff --git a/OpenOOD/configs/datasets/osr_cifar6/cifar6_seed1.yml b/OpenOOD/configs/datasets/osr_cifar6/cifar6_seed1.yml new file mode 100644 index 0000000000000000000000000000000000000000..f6318ecd14cab26fde27d73a970a044dd7a721e9 --- /dev/null +++ b/OpenOOD/configs/datasets/osr_cifar6/cifar6_seed1.yml @@ -0,0 +1,33 @@ +dataset: + name: cifar6_seed1 + num_classes: 6 + pre_size: 32 + image_size: 32 + + interpolation: bilinear + normalization_type: cifar10 + + num_workers: '@{num_workers}' + num_gpus: '@{num_gpus}' + num_machines: '@{num_machines}' + + split_names: [train, val, test] + + train: + dataset_class: ImglistDataset + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/osr_cifar6/train/train_cifar10_6_seed1.txt + batch_size: 128 + shuffle: True + val: + dataset_class: ImglistDataset + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/osr_cifar6/val/val_cifar10_6_seed1.txt + batch_size: 200 + shuffle: False + test: + dataset_class: ImglistDataset + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/osr_cifar6/test/test_cifar10_6_id_seed1.txt + batch_size: 200 + shuffle: False diff --git a/OpenOOD/configs/datasets/osr_cifar6/cifar6_seed1_osr.yml b/OpenOOD/configs/datasets/osr_cifar6/cifar6_seed1_osr.yml new file mode 100644 index 0000000000000000000000000000000000000000..389a42bd377fbdce3d3b5b533d0cc36993aa7146 --- /dev/null +++ b/OpenOOD/configs/datasets/osr_cifar6/cifar6_seed1_osr.yml @@ -0,0 +1,21 @@ +ood_dataset: + name: cifar6_seed1_osr + num_classes: 6 + + num_workers: '@{num_workers}' + num_gpus: '@{num_gpus}' + num_machines: '@{num_machines}' + + dataset_class: ImglistDataset + batch_size: 128 + shuffle: False + + split_names: [val, osr] + val: + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/osr_cifar6/test/test_cifar10_6_id_seed1.txt + osr: + datasets: [cifar4] + cifar4: + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/osr_cifar6/test/test_cifar10_4_ood_seed1.txt diff --git a/OpenOOD/configs/datasets/osr_cifar6/cifar6_seed2.yml b/OpenOOD/configs/datasets/osr_cifar6/cifar6_seed2.yml new file mode 100644 index 0000000000000000000000000000000000000000..4d50470b1ce5b52c80bf3a51b431b670ad2b84bc --- /dev/null +++ b/OpenOOD/configs/datasets/osr_cifar6/cifar6_seed2.yml @@ -0,0 +1,33 @@ +dataset: + name: cifar6_seed2 + num_classes: 6 + pre_size: 32 + image_size: 32 + + interpolation: bilinear + normalization_type: cifar10 + + num_workers: '@{num_workers}' + num_gpus: '@{num_gpus}' + num_machines: '@{num_machines}' + + split_names: [train, val, test] + + train: + dataset_class: ImglistDataset + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/osr_cifar6/train/train_cifar10_6_seed2.txt + batch_size: 128 + shuffle: True + val: + dataset_class: ImglistDataset + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/osr_cifar6/val/val_cifar10_6_seed2.txt + batch_size: 200 + shuffle: False + test: + dataset_class: ImglistDataset + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/osr_cifar6/test/test_cifar10_6_id_seed2.txt + batch_size: 200 + shuffle: False diff --git a/OpenOOD/configs/datasets/osr_cifar6/cifar6_seed2_osr.yml b/OpenOOD/configs/datasets/osr_cifar6/cifar6_seed2_osr.yml new file mode 100644 index 0000000000000000000000000000000000000000..588e4e131a684913f73b68580eb752449761fd90 --- /dev/null +++ b/OpenOOD/configs/datasets/osr_cifar6/cifar6_seed2_osr.yml @@ -0,0 +1,23 @@ +ood_dataset: + name: cifar6_seed2_osr + num_classes: 6 + pre_size: 32 + image_size: 32 + + num_workers: '@{num_workers}' + num_gpus: '@{num_gpus}' + num_machines: '@{num_machines}' + + dataset_class: ImglistDataset + batch_size: 128 + shuffle: False + + split_names: [val, osr] + val: + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/osr_cifar6/test/test_cifar10_6_id_seed2.txt + osr: + datasets: [cifar4] + cifar4: + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/osr_cifar6/test/test_cifar10_4_ood_seed2.txt diff --git a/OpenOOD/configs/datasets/osr_cifar6/cifar6_seed3.yml b/OpenOOD/configs/datasets/osr_cifar6/cifar6_seed3.yml new file mode 100644 index 0000000000000000000000000000000000000000..af530a8f8730aae8c1c070fd1704529378c96db5 --- /dev/null +++ b/OpenOOD/configs/datasets/osr_cifar6/cifar6_seed3.yml @@ -0,0 +1,33 @@ +dataset: + name: cifar6_seed3 + num_classes: 6 + pre_size: 32 + image_size: 32 + + interpolation: bilinear + normalization_type: cifar10 + + num_workers: '@{num_workers}' + num_gpus: '@{num_gpus}' + num_machines: '@{num_machines}' + + split_names: [train, val, test] + + train: + dataset_class: ImglistDataset + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/osr_cifar6/train/train_cifar10_6_seed3.txt + batch_size: 128 + shuffle: True + val: + dataset_class: ImglistDataset + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/osr_cifar6/val/val_cifar10_6_seed3.txt + batch_size: 200 + shuffle: False + test: + dataset_class: ImglistDataset + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/osr_cifar6/test/test_cifar10_6_id_seed3.txt + batch_size: 200 + shuffle: False diff --git a/OpenOOD/configs/datasets/osr_cifar6/cifar6_seed3_osr.yml b/OpenOOD/configs/datasets/osr_cifar6/cifar6_seed3_osr.yml new file mode 100644 index 0000000000000000000000000000000000000000..d7b90834b99eb4c3130c36bbb32f4679adbd76d4 --- /dev/null +++ b/OpenOOD/configs/datasets/osr_cifar6/cifar6_seed3_osr.yml @@ -0,0 +1,23 @@ +ood_dataset: + name: cifar6_seed3_osr + num_classes: 6 + pre_size: 32 + image_size: 32 + + num_workers: '@{num_workers}' + num_gpus: '@{num_gpus}' + num_machines: '@{num_machines}' + + dataset_class: ImglistDataset + batch_size: 128 + shuffle: False + + split_names: [val, osr] + val: + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/osr_cifar6/test/test_cifar10_6_id_seed3.txt + osr: + datasets: [cifar4] + cifar4: + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/osr_cifar6/test/test_cifar10_4_ood_seed3.txt diff --git a/OpenOOD/configs/datasets/osr_cifar6/cifar6_seed4.yml b/OpenOOD/configs/datasets/osr_cifar6/cifar6_seed4.yml new file mode 100644 index 0000000000000000000000000000000000000000..665607e6a37ff287a8c66b336115a7499a80cf84 --- /dev/null +++ b/OpenOOD/configs/datasets/osr_cifar6/cifar6_seed4.yml @@ -0,0 +1,33 @@ +dataset: + name: cifar6_seed4 + num_classes: 6 + pre_size: 32 + image_size: 32 + + interpolation: bilinear + normalization_type: cifar10 + + num_workers: '@{num_workers}' + num_gpus: '@{num_gpus}' + num_machines: '@{num_machines}' + + split_names: [train, val, test] + + train: + dataset_class: ImglistDataset + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/osr_cifar6/train/train_cifar10_6_seed4.txt + batch_size: 128 + shuffle: True + val: + dataset_class: ImglistDataset + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/osr_cifar6/val/val_cifar10_6_seed4.txt + batch_size: 200 + shuffle: False + test: + dataset_class: ImglistDataset + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/osr_cifar6/test/test_cifar10_6_id_seed4.txt + batch_size: 200 + shuffle: False diff --git a/OpenOOD/configs/datasets/osr_cifar6/cifar6_seed4_osr.yml b/OpenOOD/configs/datasets/osr_cifar6/cifar6_seed4_osr.yml new file mode 100644 index 0000000000000000000000000000000000000000..413b3c5b3b89c5430d8d0ab6e00d932a3f1b7b43 --- /dev/null +++ b/OpenOOD/configs/datasets/osr_cifar6/cifar6_seed4_osr.yml @@ -0,0 +1,23 @@ +ood_dataset: + name: cifar6_seed4_osr + num_classes: 6 + pre_size: 32 + image_size: 32 + + num_workers: '@{num_workers}' + num_gpus: '@{num_gpus}' + num_machines: '@{num_machines}' + + dataset_class: ImglistDataset + batch_size: 128 + shuffle: False + + split_names: [val, osr] + val: + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/osr_cifar6/test/test_cifar10_6_id_seed4.txt + osr: + datasets: [cifar4] + cifar4: + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/osr_cifar6/test/test_cifar10_4_ood_seed4.txt diff --git a/OpenOOD/configs/datasets/osr_cifar6/cifar6_seed5.yml b/OpenOOD/configs/datasets/osr_cifar6/cifar6_seed5.yml new file mode 100644 index 0000000000000000000000000000000000000000..102f614ca7a1f97b6a35dbe0f99a7061e1cce94c --- /dev/null +++ b/OpenOOD/configs/datasets/osr_cifar6/cifar6_seed5.yml @@ -0,0 +1,33 @@ +dataset: + name: cifar6_seed5 + num_classes: 6 + pre_size: 32 + image_size: 32 + + interpolation: bilinear + normalization_type: cifar10 + + num_workers: '@{num_workers}' + num_gpus: '@{num_gpus}' + num_machines: '@{num_machines}' + + split_names: [train, val, test] + + train: + dataset_class: ImglistDataset + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/osr_cifar6/train/train_cifar10_6_seed5.txt + batch_size: 128 + shuffle: True + val: + dataset_class: ImglistDataset + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/osr_cifar6/val/val_cifar10_6_seed5.txt + batch_size: 200 + shuffle: False + test: + dataset_class: ImglistDataset + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/osr_cifar6/test/test_cifar10_6_id_seed5.txt + batch_size: 200 + shuffle: False diff --git a/OpenOOD/configs/datasets/osr_cifar6/cifar6_seed5_osr.yml b/OpenOOD/configs/datasets/osr_cifar6/cifar6_seed5_osr.yml new file mode 100644 index 0000000000000000000000000000000000000000..1138fa3437661afa36d13be6044bfa55b44e19e0 --- /dev/null +++ b/OpenOOD/configs/datasets/osr_cifar6/cifar6_seed5_osr.yml @@ -0,0 +1,23 @@ +ood_dataset: + name: cifar6_seed5_osr + num_classes: 6 + pre_size: 32 + image_size: 32 + + num_workers: '@{num_workers}' + num_gpus: '@{num_gpus}' + num_machines: '@{num_machines}' + + dataset_class: ImglistDataset + batch_size: 128 + shuffle: False + + split_names: [val, osr] + val: + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/osr_cifar6/test/test_cifar10_6_id_seed5.txt + osr: + datasets: [cifar4] + cifar4: + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/osr_cifar6/test/test_cifar10_4_ood_seed5.txt diff --git a/OpenOOD/configs/datasets/osr_mnist6/mnist6_seed1.yml b/OpenOOD/configs/datasets/osr_mnist6/mnist6_seed1.yml new file mode 100644 index 0000000000000000000000000000000000000000..da655b3b4ec326d8f3e3ff4cccd90704349b01d3 --- /dev/null +++ b/OpenOOD/configs/datasets/osr_mnist6/mnist6_seed1.yml @@ -0,0 +1,33 @@ +dataset: + name: mnist6_seed1 + num_classes: 6 + pre_size: 28 + image_size: 28 + + interpolation: bilinear + normalization_type: mnist + + num_workers: '@{num_workers}' + num_gpus: '@{num_gpus}' + num_machines: '@{num_machines}' + + split_names: [train, val, test] + + train: + dataset_class: ImglistDataset + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/osr_mnist6/train/train_mnist_6_seed1.txt + batch_size: 128 + shuffle: True + val: + dataset_class: ImglistDataset + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/osr_mnist6/val/val_mnist_6_seed1.txt + batch_size: 200 + shuffle: False + test: + dataset_class: ImglistDataset + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/osr_mnist6/test/test_mnist_6_id_seed1.txt + batch_size: 200 + shuffle: False diff --git a/OpenOOD/configs/datasets/osr_mnist6/mnist6_seed1_osr.yml b/OpenOOD/configs/datasets/osr_mnist6/mnist6_seed1_osr.yml new file mode 100644 index 0000000000000000000000000000000000000000..42550ccd4b112f665dbf717c1af954e090f12bc6 --- /dev/null +++ b/OpenOOD/configs/datasets/osr_mnist6/mnist6_seed1_osr.yml @@ -0,0 +1,23 @@ +ood_dataset: + name: mnist6_seed1_osr + num_classes: 6 + pre_size: 28 + image_size: 28 + + num_workers: '@{num_workers}' + num_gpus: '@{num_gpus}' + num_machines: '@{num_machines}' + + dataset_class: ImglistDataset + batch_size: 128 + shuffle: False + + split_names: [val, osr] + val: + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/osr_mnist6/test/test_mnist_6_id_seed1.txt + osr: + datasets: [mnist4] + mnist4: + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/osr_mnist6/test/test_mnist_4_ood_seed1.txt diff --git a/OpenOOD/configs/datasets/osr_mnist6/mnist6_seed2.yml b/OpenOOD/configs/datasets/osr_mnist6/mnist6_seed2.yml new file mode 100644 index 0000000000000000000000000000000000000000..ce155496e80c2540293541b7e11df21e9be9d6c9 --- /dev/null +++ b/OpenOOD/configs/datasets/osr_mnist6/mnist6_seed2.yml @@ -0,0 +1,33 @@ +dataset: + name: mnist6_seed2 + num_classes: 6 + pre_size: 28 + image_size: 28 + + interpolation: bilinear + normalization_type: mnist + + num_workers: '@{num_workers}' + num_gpus: '@{num_gpus}' + num_machines: '@{num_machines}' + + split_names: [train, val, test] + + train: + dataset_class: ImglistDataset + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/osr_mnist6/train/train_mnist_6_seed2.txt + batch_size: 128 + shuffle: True + val: + dataset_class: ImglistDataset + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/osr_mnist6/val/val_mnist_6_seed2.txt + batch_size: 200 + shuffle: False + test: + dataset_class: ImglistDataset + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/osr_mnist6/test/test_mnist_6_id_seed2.txt + batch_size: 200 + shuffle: False diff --git a/OpenOOD/configs/datasets/osr_mnist6/mnist6_seed2_osr.yml b/OpenOOD/configs/datasets/osr_mnist6/mnist6_seed2_osr.yml new file mode 100644 index 0000000000000000000000000000000000000000..4f16d5b4057512a4ab7d325250b747cd32de640b --- /dev/null +++ b/OpenOOD/configs/datasets/osr_mnist6/mnist6_seed2_osr.yml @@ -0,0 +1,23 @@ +ood_dataset: + name: mnist6_seed2_osr + num_classes: 6 + pre_size: 28 + image_size: 28 + + num_workers: '@{num_workers}' + num_gpus: '@{num_gpus}' + num_machines: '@{num_machines}' + + dataset_class: ImglistDataset + batch_size: 128 + shuffle: False + + split_names: [val, osr] + val: + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/osr_mnist6/test/test_mnist_6_id_seed2.txt + osr: + datasets: [mnist4] + mnist4: + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/osr_mnist6/test/test_mnist_4_ood_seed2.txt diff --git a/OpenOOD/configs/datasets/osr_mnist6/mnist6_seed3.yml b/OpenOOD/configs/datasets/osr_mnist6/mnist6_seed3.yml new file mode 100644 index 0000000000000000000000000000000000000000..685394d76508f231367d5c72955ada25e7a025b9 --- /dev/null +++ b/OpenOOD/configs/datasets/osr_mnist6/mnist6_seed3.yml @@ -0,0 +1,33 @@ +dataset: + name: mnist6_seed3 + num_classes: 6 + pre_size: 28 + image_size: 28 + + interpolation: bilinear + normalization_type: mnist + + num_workers: '@{num_workers}' + num_gpus: '@{num_gpus}' + num_machines: '@{num_machines}' + + split_names: [train, val, test] + + train: + dataset_class: ImglistDataset + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/osr_mnist6/train/train_mnist_6_seed3.txt + batch_size: 128 + shuffle: True + val: + dataset_class: ImglistDataset + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/osr_mnist6/val/val_mnist_6_seed3.txt + batch_size: 200 + shuffle: False + test: + dataset_class: ImglistDataset + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/osr_mnist6/test/test_mnist_6_id_seed3.txt + batch_size: 200 + shuffle: False diff --git a/OpenOOD/configs/datasets/osr_mnist6/mnist6_seed3_osr.yml b/OpenOOD/configs/datasets/osr_mnist6/mnist6_seed3_osr.yml new file mode 100644 index 0000000000000000000000000000000000000000..17c4bbf5b490b757987225dc860eeb51b006d68e --- /dev/null +++ b/OpenOOD/configs/datasets/osr_mnist6/mnist6_seed3_osr.yml @@ -0,0 +1,23 @@ +ood_dataset: + name: mnist6_seed3_osr + num_classes: 6 + pre_size: 28 + image_size: 28 + + num_workers: '@{num_workers}' + num_gpus: '@{num_gpus}' + num_machines: '@{num_machines}' + + dataset_class: ImglistDataset + batch_size: 128 + shuffle: False + + split_names: [val, osr] + val: + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/osr_mnist6/test/test_mnist_6_id_seed3.txt + osr: + datasets: [mnist4] + mnist4: + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/osr_mnist6/test/test_mnist_4_ood_seed3.txt diff --git a/OpenOOD/configs/datasets/osr_mnist6/mnist6_seed4.yml b/OpenOOD/configs/datasets/osr_mnist6/mnist6_seed4.yml new file mode 100644 index 0000000000000000000000000000000000000000..1ceb782dd0b3c7ec4a8e56f8a566765d95c2ae74 --- /dev/null +++ b/OpenOOD/configs/datasets/osr_mnist6/mnist6_seed4.yml @@ -0,0 +1,33 @@ +dataset: + name: mnist6_seed4 + num_classes: 6 + pre_size: 28 + image_size: 28 + + interpolation: bilinear + normalization_type: mnist + + num_workers: '@{num_workers}' + num_gpus: '@{num_gpus}' + num_machines: '@{num_machines}' + + split_names: [train, val, test] + + train: + dataset_class: ImglistDataset + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/osr_mnist6/train/train_mnist_6_seed4.txt + batch_size: 128 + shuffle: True + val: + dataset_class: ImglistDataset + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/osr_mnist6/val/val_mnist_6_seed4.txt + batch_size: 200 + shuffle: False + test: + dataset_class: ImglistDataset + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/osr_mnist6/test/test_mnist_6_id_seed4.txt + batch_size: 200 + shuffle: False diff --git a/OpenOOD/configs/datasets/osr_mnist6/mnist6_seed4_osr.yml b/OpenOOD/configs/datasets/osr_mnist6/mnist6_seed4_osr.yml new file mode 100644 index 0000000000000000000000000000000000000000..68367d7516008b84ddecd8e9d8f79b4b65976ccd --- /dev/null +++ b/OpenOOD/configs/datasets/osr_mnist6/mnist6_seed4_osr.yml @@ -0,0 +1,23 @@ +ood_dataset: + name: mnist6_seed4_osr + num_classes: 6 + pre_size: 28 + image_size: 28 + + num_workers: '@{num_workers}' + num_gpus: '@{num_gpus}' + num_machines: '@{num_machines}' + + dataset_class: ImglistDataset + batch_size: 128 + shuffle: False + + split_names: [val, osr] + val: + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/osr_mnist6/test/test_mnist_6_id_seed4.txt + osr: + datasets: [mnist4] + mnist4: + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/osr_mnist6/test/test_mnist_4_ood_seed4.txt diff --git a/OpenOOD/configs/datasets/osr_mnist6/mnist6_seed5.yml b/OpenOOD/configs/datasets/osr_mnist6/mnist6_seed5.yml new file mode 100644 index 0000000000000000000000000000000000000000..fa02c6a7a54885c5fd0f835239848c84da11b3be --- /dev/null +++ b/OpenOOD/configs/datasets/osr_mnist6/mnist6_seed5.yml @@ -0,0 +1,33 @@ +dataset: + name: mnist6_seed5 + num_classes: 6 + pre_size: 28 + image_size: 28 + + interpolation: bilinear + normalization_type: mnist + + num_workers: '@{num_workers}' + num_gpus: '@{num_gpus}' + num_machines: '@{num_machines}' + + split_names: [train, val, test] + + train: + dataset_class: ImglistDataset + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/osr_mnist6/train/train_mnist_6_seed5.txt + batch_size: 128 + shuffle: True + val: + dataset_class: ImglistDataset + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/osr_mnist6/val/val_mnist_6_seed5.txt + batch_size: 200 + shuffle: False + test: + dataset_class: ImglistDataset + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/osr_mnist6/test/test_mnist_6_id_seed5.txt + batch_size: 200 + shuffle: False diff --git a/OpenOOD/configs/datasets/osr_mnist6/mnist6_seed5_osr.yml b/OpenOOD/configs/datasets/osr_mnist6/mnist6_seed5_osr.yml new file mode 100644 index 0000000000000000000000000000000000000000..0227732441699cf598959d640ab2adf50a718e86 --- /dev/null +++ b/OpenOOD/configs/datasets/osr_mnist6/mnist6_seed5_osr.yml @@ -0,0 +1,23 @@ +ood_dataset: + name: mnist6_seed5_osr + num_classes: 6 + pre_size: 28 + image_size: 28 + + num_workers: '@{num_workers}' + num_gpus: '@{num_gpus}' + num_machines: '@{num_machines}' + + dataset_class: ImglistDataset + batch_size: 128 + shuffle: False + + split_names: [val, osr] + val: + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/osr_mnist6/test/test_mnist_6_id_seed5.txt + osr: + datasets: [mnist4] + mnist4: + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/osr_mnist6/test/test_mnist_4_ood_seed5.txt diff --git a/OpenOOD/configs/datasets/osr_tin20/tin20_seed1.yml b/OpenOOD/configs/datasets/osr_tin20/tin20_seed1.yml new file mode 100644 index 0000000000000000000000000000000000000000..2002fe887ef74228b8b51c25f682a70d45ae8d57 --- /dev/null +++ b/OpenOOD/configs/datasets/osr_tin20/tin20_seed1.yml @@ -0,0 +1,33 @@ +dataset: + name: tin20_seed1 + num_classes: 20 + pre_size: 64 + image_size: 64 + + interpolation: bilinear + normalization_type: imagenet + + num_workers: '@{num_workers}' + num_gpus: '@{num_gpus}' + num_machines: '@{num_machines}' + + split_names: [train, val, test] + + train: + dataset_class: ImglistDataset + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/osr_tin20/train/train_tin_20_seed1.txt + batch_size: 128 + shuffle: True + val: + dataset_class: ImglistDataset + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/osr_tin20/val/val_tin_20_seed1.txt + batch_size: 200 + shuffle: False + test: + dataset_class: ImglistDataset + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/osr_tin20/test/test_tin_20_id_seed1.txt + batch_size: 200 + shuffle: False diff --git a/OpenOOD/configs/datasets/osr_tin20/tin20_seed1_osr.yml b/OpenOOD/configs/datasets/osr_tin20/tin20_seed1_osr.yml new file mode 100644 index 0000000000000000000000000000000000000000..5c685a554bde7b362c5951ef8367e63d91fbac97 --- /dev/null +++ b/OpenOOD/configs/datasets/osr_tin20/tin20_seed1_osr.yml @@ -0,0 +1,23 @@ +ood_dataset: + name: tin20_seed1_osr + num_classes: 20 + pre_size: 64 + image_size: 64 + + num_workers: '@{num_workers}' + num_gpus: '@{num_gpus}' + num_machines: '@{num_machines}' + + dataset_class: ImglistDataset + batch_size: 128 + shuffle: False + + split_names: [val, osr] + val: + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/osr_tin20/test/test_tin_20_id_seed1.txt + osr: + datasets: [tin180] + tin180: + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/osr_tin20/test/test_tin_180_ood_seed1.txt diff --git a/OpenOOD/configs/datasets/osr_tin20/tin20_seed2.yml b/OpenOOD/configs/datasets/osr_tin20/tin20_seed2.yml new file mode 100644 index 0000000000000000000000000000000000000000..6e94c2b31cac37b675712cb72715360ff175e83b --- /dev/null +++ b/OpenOOD/configs/datasets/osr_tin20/tin20_seed2.yml @@ -0,0 +1,33 @@ +dataset: + name: tin20_seed2 + num_classes: 20 + pre_size: 64 + image_size: 64 + + interpolation: bilinear + normalization_type: imagenet + + num_workers: '@{num_workers}' + num_gpus: '@{num_gpus}' + num_machines: '@{num_machines}' + + split_names: [train, val, test] + + train: + dataset_class: ImglistDataset + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/osr_tin20/train/train_tin_20_seed2.txt + batch_size: 128 + shuffle: True + val: + dataset_class: ImglistDataset + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/osr_tin20/val/val_tin_20_seed2.txt + batch_size: 200 + shuffle: False + test: + dataset_class: ImglistDataset + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/osr_tin20/test/test_tin_20_id_seed2.txt + batch_size: 200 + shuffle: False diff --git a/OpenOOD/configs/datasets/osr_tin20/tin20_seed2_osr.yml b/OpenOOD/configs/datasets/osr_tin20/tin20_seed2_osr.yml new file mode 100644 index 0000000000000000000000000000000000000000..4107527c2fb16c97da1ad06e55afeb0d4f01c81f --- /dev/null +++ b/OpenOOD/configs/datasets/osr_tin20/tin20_seed2_osr.yml @@ -0,0 +1,23 @@ +ood_dataset: + name: tin20_seed2_osr + num_classes: 20 + pre_size: 64 + image_size: 64 + + num_workers: '@{num_workers}' + num_gpus: '@{num_gpus}' + num_machines: '@{num_machines}' + + dataset_class: ImglistDataset + batch_size: 128 + shuffle: False + + split_names: [val, osr] + val: + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/osr_tin20/test/test_tin_20_id_seed2.txt + osr: + datasets: [tin180] + tin180: + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/osr_tin20/test/test_tin_180_ood_seed2.txt diff --git a/OpenOOD/configs/datasets/osr_tin20/tin20_seed3.yml b/OpenOOD/configs/datasets/osr_tin20/tin20_seed3.yml new file mode 100644 index 0000000000000000000000000000000000000000..d1de49427829524791b65f1c9145c9d1a30fd9d0 --- /dev/null +++ b/OpenOOD/configs/datasets/osr_tin20/tin20_seed3.yml @@ -0,0 +1,33 @@ +dataset: + name: tin20_seed3 + num_classes: 20 + pre_size: 64 + image_size: 64 + + interpolation: bilinear + normalization_type: imagenet + + num_workers: '@{num_workers}' + num_gpus: '@{num_gpus}' + num_machines: '@{num_machines}' + + split_names: [train, val, test] + + train: + dataset_class: ImglistDataset + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/osr_tin20/train/train_tin_20_seed3.txt + batch_size: 128 + shuffle: True + val: + dataset_class: ImglistDataset + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/osr_tin20/val/val_tin_20_seed3.txt + batch_size: 200 + shuffle: False + test: + dataset_class: ImglistDataset + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/osr_tin20/test/test_tin_20_id_seed3.txt + batch_size: 200 + shuffle: False diff --git a/OpenOOD/configs/datasets/osr_tin20/tin20_seed3_osr.yml b/OpenOOD/configs/datasets/osr_tin20/tin20_seed3_osr.yml new file mode 100644 index 0000000000000000000000000000000000000000..0e0d900ffb2167acecaca993692baa9a02ba0033 --- /dev/null +++ b/OpenOOD/configs/datasets/osr_tin20/tin20_seed3_osr.yml @@ -0,0 +1,23 @@ +ood_dataset: + name: tin20_seed3_osr + num_classes: 20 + pre_size: 64 + image_size: 64 + + num_workers: '@{num_workers}' + num_gpus: '@{num_gpus}' + num_machines: '@{num_machines}' + + dataset_class: ImglistDataset + batch_size: 128 + shuffle: False + + split_names: [val, osr] + val: + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/osr_tin20/test/test_tin_20_id_seed3.txt + osr: + datasets: [tin180] + tin180: + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/osr_tin20/test/test_tin_180_ood_seed3.txt diff --git a/OpenOOD/configs/datasets/osr_tin20/tin20_seed4.yml b/OpenOOD/configs/datasets/osr_tin20/tin20_seed4.yml new file mode 100644 index 0000000000000000000000000000000000000000..c4fbe06588ae9045112e19f92b525d4de443a4ba --- /dev/null +++ b/OpenOOD/configs/datasets/osr_tin20/tin20_seed4.yml @@ -0,0 +1,33 @@ +dataset: + name: tin20_seed4 + num_classes: 20 + pre_size: 64 + image_size: 64 + + interpolation: bilinear + normalization_type: imagenet + + num_workers: '@{num_workers}' + num_gpus: '@{num_gpus}' + num_machines: '@{num_machines}' + + split_names: [train, val, test] + + train: + dataset_class: ImglistDataset + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/osr_tin20/train/train_tin_20_seed4.txt + batch_size: 128 + shuffle: True + val: + dataset_class: ImglistDataset + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/osr_tin20/val/val_tin_20_seed4.txt + batch_size: 200 + shuffle: False + test: + dataset_class: ImglistDataset + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/osr_tin20/test/test_tin_20_id_seed4.txt + batch_size: 200 + shuffle: False diff --git a/OpenOOD/configs/datasets/osr_tin20/tin20_seed4_osr.yml b/OpenOOD/configs/datasets/osr_tin20/tin20_seed4_osr.yml new file mode 100644 index 0000000000000000000000000000000000000000..861db0ba2db8d7f34f4e2b1e3cd8f0054c212ee1 --- /dev/null +++ b/OpenOOD/configs/datasets/osr_tin20/tin20_seed4_osr.yml @@ -0,0 +1,23 @@ +ood_dataset: + name: tin20_seed4_osr + num_classes: 20 + pre_size: 64 + image_size: 64 + + num_workers: '@{num_workers}' + num_gpus: '@{num_gpus}' + num_machines: '@{num_machines}' + + dataset_class: ImglistDataset + batch_size: 128 + shuffle: False + + split_names: [val, osr] + val: + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/osr_tin20/test/test_tin_20_id_seed4.txt + osr: + datasets: [tin180] + tin180: + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/osr_tin20/test/test_tin_180_ood_seed4.txt diff --git a/OpenOOD/configs/datasets/osr_tin20/tin20_seed5.yml b/OpenOOD/configs/datasets/osr_tin20/tin20_seed5.yml new file mode 100644 index 0000000000000000000000000000000000000000..0284e64fc3d1ca846f224afb143628813083f243 --- /dev/null +++ b/OpenOOD/configs/datasets/osr_tin20/tin20_seed5.yml @@ -0,0 +1,33 @@ +dataset: + name: tin20_seed5 + num_classes: 20 + pre_size: 64 + image_size: 64 + + interpolation: bilinear + normalization_type: imagenet + + num_workers: '@{num_workers}' + num_gpus: '@{num_gpus}' + num_machines: '@{num_machines}' + + split_names: [train, val, test] + + train: + dataset_class: ImglistDataset + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/osr_tin20/train/train_tin_20_seed5.txt + batch_size: 128 + shuffle: True + val: + dataset_class: ImglistDataset + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/osr_tin20/val/val_tin_20_seed5.txt + batch_size: 200 + shuffle: False + test: + dataset_class: ImglistDataset + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/osr_tin20/test/test_tin_20_id_seed5.txt + batch_size: 200 + shuffle: False diff --git a/OpenOOD/configs/datasets/osr_tin20/tin20_seed5_osr.yml b/OpenOOD/configs/datasets/osr_tin20/tin20_seed5_osr.yml new file mode 100644 index 0000000000000000000000000000000000000000..845876d60708b4b68d1b5b75cf61e7300dc11455 --- /dev/null +++ b/OpenOOD/configs/datasets/osr_tin20/tin20_seed5_osr.yml @@ -0,0 +1,23 @@ +ood_dataset: + name: tin20_seed5_osr + num_classes: 20 + pre_size: 64 + image_size: 64 + + num_workers: '@{num_workers}' + num_gpus: '@{num_gpus}' + num_machines: '@{num_machines}' + + dataset_class: ImglistDataset + batch_size: 128 + shuffle: False + + split_names: [val, osr] + val: + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/osr_tin20/test/test_tin_20_id_seed5.txt + osr: + datasets: [tin180] + tin180: + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/osr_tin20/test/test_tin_180_ood_seed5.txt diff --git a/OpenOOD/configs/networks/Bronze2_OursNetwork.yml b/OpenOOD/configs/networks/Bronze2_OursNetwork.yml new file mode 100644 index 0000000000000000000000000000000000000000..e9f59acd745a48b9d398266c07e1e9a9e6916805 --- /dev/null +++ b/OpenOOD/configs/networks/Bronze2_OursNetwork.yml @@ -0,0 +1,18 @@ +network: + name: OursBronze2 + num_classes: '@{dataset.num_classes}' + image_size: '@{dataset.image_size}' + pretrained: True # set 'True' to load pretrained model + + # # default pretrained model: https://download.pytorch.org/models/resnet50-0676ba61.pth + checkpoint: "/home/zhourixin/OOD_Folder/CODE/other_methods/openOOD_code/OpenOOD/results/bronze2_ours_resnet50_415_NotLine_train/s0/model_state_dict_epoch90.pth" # ignore if pretrained is false + num_gpus: '@{num_gpus}' + + # network used for feature extraction + # model_config: + # model_path: "/home/zhourixin/OOD_Folder/CODE/other_methods/openOOD_code/OpenOOD/results/bronze_2_ours_resnet50_415_train/s0/model_state_dict.pth" + # num_classes: '@{dataset.num_classes}' + # image_size: '@{dataset.image_size}' + # pretrained: True + # checkpoint: '/home/zhourixin/OOD_Folder/CODE/other_methods/openOOD_code/OpenOOD/results/pretrained_weights/resnet50_imagenet1k_v1.pth' + # num_gpus: '@{num_gpus}' diff --git a/OpenOOD/configs/networks/OursBronze2.yml b/OpenOOD/configs/networks/OursBronze2.yml new file mode 100644 index 0000000000000000000000000000000000000000..92b89b0496a5a8e5d77425a0b2467b9e9015e399 --- /dev/null +++ b/OpenOOD/configs/networks/OursBronze2.yml @@ -0,0 +1,18 @@ +network: + name: OursBronze2 + num_classes: '@{dataset.num_classes}' + image_size: '@{dataset.image_size}' + pretrained: True # set 'True' to load pretrained model + + # default pretrained model: https://download.pytorch.org/models/resnet50-0676ba61.pth + checkpoint: ./checkpoints/resnet50-0676ba61.pth # ignore if pretrained is false + num_gpus: '@{num_gpus}' + + # network used for feature extraction + backbone: + name: resnet50 + num_classes: '@{dataset.num_classes}' + image_size: '@{dataset.image_size}' + pretrained: True + checkpoint: '/home/zhourixin/OOD_Folder/CODE/other_methods/openOOD_code/OpenOOD/results/pretrained_weights/resnet50_imagenet1k_v1.pth' + num_gpus: '@{num_gpus}' \ No newline at end of file diff --git a/OpenOOD/configs/networks/arpl_gan.yml b/OpenOOD/configs/networks/arpl_gan.yml new file mode 100644 index 0000000000000000000000000000000000000000..e03e1550a85279c94e6eccb29090c4659883a1dd --- /dev/null +++ b/OpenOOD/configs/networks/arpl_gan.yml @@ -0,0 +1,28 @@ +network: + name: arpl_gan + num_classes: '@{dataset.num_classes}' + image_size: '@{dataset.image_size}' + pretrained: False + checkpoint: none # ignore if pretrained is false + num_gpus: '@{num_gpus}' + + # Number of channels in the training images. For color images this is 3 + nc: 3 + # Size of z latent vector (i.e. size of generator input) + nz: 100 + # Size of feature maps in generator + ngf: 64 + # Size of feature maps in discriminator + ndf : 64 + ns: 1 + + weight_pl: 0.1 + temp: 1.0 + + # network used for feature extraction + feat_extract_network: + name: resnet34ABN + num_classes: '@{dataset.num_classes}' + image_size: '@{dataset.image_size}' + pretrained: False + num_gpus: '@{num_gpus}' diff --git a/OpenOOD/configs/networks/arpl_net.yml b/OpenOOD/configs/networks/arpl_net.yml new file mode 100644 index 0000000000000000000000000000000000000000..cc16e3dcf552a440cef2bcba2d02ab1624f332d9 --- /dev/null +++ b/OpenOOD/configs/networks/arpl_net.yml @@ -0,0 +1,18 @@ +network: + name: arpl_net + num_classes: '@{dataset.num_classes}' + image_size: '@{dataset.image_size}' + pretrained: False + checkpoint: none # ignore if pretrained is false + num_gpus: '@{num_gpus}' + + weight_pl: 0.1 + temp: 1.0 + + # network used for feature extraction + feat_extract_network: + name: lenet + num_classes: '@{dataset.num_classes}' + pretrained: False # set 'True' to load pretrained model + checkpoint: none # ignore if pretrained is false + num_gpus: '@{num_gpus}' diff --git a/OpenOOD/configs/networks/bit.yml b/OpenOOD/configs/networks/bit.yml new file mode 100644 index 0000000000000000000000000000000000000000..8c93d09f37d3ca65f2209f8aefe047426b40460f --- /dev/null +++ b/OpenOOD/configs/networks/bit.yml @@ -0,0 +1,12 @@ +network: + name: bit + model: BiT-S-R101x1 + num_classes: '@{dataset.num_classes}' + image_size: '@{dataset.image_size}' + pretrained: True # set 'True' to load pretrained model + checkpoint: ./bit_pretrained_models/BiT-S-R101x1.npz # ignore if pretrained is false + num_gpus: '@{num_gpus}' +dataset: + image_size: 480 +ood_dataset: + image_size: 480 diff --git a/OpenOOD/configs/networks/cider_net.yml b/OpenOOD/configs/networks/cider_net.yml new file mode 100644 index 0000000000000000000000000000000000000000..a923f56629cbe00d0cc12c85be18af352a728480 --- /dev/null +++ b/OpenOOD/configs/networks/cider_net.yml @@ -0,0 +1,17 @@ +network: + name: cider_net + num_classes: '@{dataset.num_classes}' + pretrained: False # In training pipeline:"False"; In testing pipeline:"True" + num_gpus: '@{num_gpus}' + checkpoint: none + + feat_dim: 128 + head: mlp + + backbone: + name: resnet18_32x32 + num_classes: '@{dataset.num_classes}' + image_size: '@{dataset.image_size}' + pretrained: False + checkpoint: none + num_gpus: '@{num_gpus}' diff --git a/OpenOOD/configs/networks/conf_branch.yml b/OpenOOD/configs/networks/conf_branch.yml new file mode 100644 index 0000000000000000000000000000000000000000..2b8926f1b1b3b6048c0deff961bc416385cc4dc8 --- /dev/null +++ b/OpenOOD/configs/networks/conf_branch.yml @@ -0,0 +1,13 @@ +network: + name: conf_branch_net + num_classes: '@{dataset.num_classes}' + pretrained: False # In training pipeline:"False"; In testing pipeline:"True" + num_gpus: '@{num_gpus}' + checkpoint: none + backbone: + name: resnet18_32x32 + num_classes: '@{dataset.num_classes}' + image_size: '@{dataset.image_size}' + pretrained: False + checkpoint: ./results/mnist_conf_net_conf_esti/best.pth + num_gpus: '@{num_gpus}' diff --git a/OpenOOD/configs/networks/csi_net.yml b/OpenOOD/configs/networks/csi_net.yml new file mode 100644 index 0000000000000000000000000000000000000000..520bf8255f8e36eca6319edbb6a572bca7ad737e --- /dev/null +++ b/OpenOOD/configs/networks/csi_net.yml @@ -0,0 +1,16 @@ +network: + name: csi_net + num_classes: '@{dataset.num_classes}' + image_size: '@{dataset.image_size}' + pretrained: False + checkpoint: ./results/cifar10_csinet_csi_step2_e100_lr0.1/best.ckpt + num_gpus: '@{num_gpus}' + simclr_dim: 128 # Dimension of simclr layer + shift_trans_type: rotation # choice ['rotation', 'cutperm', 'none'] + + backbone: + name: resnet18_32x32 + num_classes: '@{dataset.num_classes}' + image_size: '@{dataset.image_size}' + pretrained: False + num_gpus: '@{num_gpus}' diff --git a/OpenOOD/configs/networks/cutpaste.yml b/OpenOOD/configs/networks/cutpaste.yml new file mode 100644 index 0000000000000000000000000000000000000000..d2b447595cc2d28758daff6b06503371007f41da --- /dev/null +++ b/OpenOOD/configs/networks/cutpaste.yml @@ -0,0 +1,15 @@ +network: + name: projectionNet + num_classes: '@{dataset.num_classes}' + pretrained: False # set 'True' to load pretrained model + checkpoint: none # ignore if pretrained is false + num_gpus: '@{num_gpus}' + + # network used for feature extraction + backbone: + name: resnet18_32x32 + num_classes: '@{dataset.num_classes}' + image_size: '@{dataset.image_size}' + pretrained: True + checkpoint: 'results/cifar100_resnet18_32x32_base_e100_lr0.1/best.ckpt' + num_gpus: '@{num_gpus}' diff --git a/OpenOOD/configs/networks/dcae.yml b/OpenOOD/configs/networks/dcae.yml new file mode 100644 index 0000000000000000000000000000000000000000..814469fb3e62ac713b442f555d500285ef26d7d8 --- /dev/null +++ b/OpenOOD/configs/networks/dcae.yml @@ -0,0 +1,6 @@ +network: + name: dcae + type: cifar10_LeNet + num_classes: '@{dataset.num_classes}' + num_gpus: 1 + pretrained: False diff --git a/OpenOOD/configs/networks/draem.yml b/OpenOOD/configs/networks/draem.yml new file mode 100644 index 0000000000000000000000000000000000000000..43723762eac1f4ae02f6869207a1bd57e6449e85 --- /dev/null +++ b/OpenOOD/configs/networks/draem.yml @@ -0,0 +1,10 @@ +network: + name: draem + num_classes: '@{dataset.num_classes}' + image_size: '@{dataset.image_size}' + pretrained: False # set 'True' to load pretrained model + checkpoint: none # ignore if pretrained is false + num_gpus: '@{num_gpus}' + +use_gt: False +image_auroc_only: True diff --git a/OpenOOD/configs/networks/dropout_net.yml b/OpenOOD/configs/networks/dropout_net.yml new file mode 100644 index 0000000000000000000000000000000000000000..53f4424e1b747c0b5f137220ffa4d7d506941ef6 --- /dev/null +++ b/OpenOOD/configs/networks/dropout_net.yml @@ -0,0 +1,17 @@ +network: + name: dropout_net + num_classes: '@{dataset.num_classes}' + image_size: '@{dataset.image_size}' + pretrained: False + checkpoint: none # ignore if pretrained is false + num_gpus: '@{num_gpus}' + dropout_p: 0.5 + + # network used for feature extraction + backbone: + name: resnet18_32x32 + num_classes: '@{dataset.num_classes}' + image_size: '@{dataset.image_size}' + pretrained: False + checkpoint: none + num_gpus: '@{num_gpus}' diff --git a/OpenOOD/configs/networks/dsvdd.yml b/OpenOOD/configs/networks/dsvdd.yml new file mode 100644 index 0000000000000000000000000000000000000000..619b9032b1c61ec4bc583c5dced4956446ebdb6d --- /dev/null +++ b/OpenOOD/configs/networks/dsvdd.yml @@ -0,0 +1,7 @@ +network: + name: dsvdd + type: cifar10_LeNet + num_classes: '@{dataset.num_classes}' + num_gpus: 1 + pretrained: True + checkpoint: './results/cifar10_dcae_dcae/AE_best_epoch1_roc_auc0.4976.pth' diff --git a/OpenOOD/configs/networks/godin_net.yml b/OpenOOD/configs/networks/godin_net.yml new file mode 100644 index 0000000000000000000000000000000000000000..83d3bd57cab951d388b70150bdd218ef97410a63 --- /dev/null +++ b/OpenOOD/configs/networks/godin_net.yml @@ -0,0 +1,17 @@ +network: + name: godin_net + num_classes: '@{dataset.num_classes}' + image_size: '@{dataset.image_size}' + pretrained: False + checkpoint: none # ignore if pretrained is false + num_gpus: '@{num_gpus}' + similarity_measure: 'cosine' # value in ['cosine', 'inner', 'euclid'] + + # network used for feature extraction + backbone: + name: resnet18_32x32 + num_classes: '@{dataset.num_classes}' + image_size: '@{dataset.image_size}' + pretrained: False + num_gpus: '@{num_gpus}' + checkpoint: none diff --git a/OpenOOD/configs/networks/lenet.yml b/OpenOOD/configs/networks/lenet.yml new file mode 100644 index 0000000000000000000000000000000000000000..3c714f9c0c75e1c829018a563f9f55db767bd898 --- /dev/null +++ b/OpenOOD/configs/networks/lenet.yml @@ -0,0 +1,6 @@ +network: + name: lenet + num_classes: '@{dataset.num_classes}' + pretrained: False # set 'True' to load pretrained model + checkpoint: none # ignore if pretrained is false + num_gpus: '@{num_gpus}' diff --git a/OpenOOD/configs/networks/mcd_net.yml b/OpenOOD/configs/networks/mcd_net.yml new file mode 100644 index 0000000000000000000000000000000000000000..68688f1cda695fe3182f6ee3223d40983bb43977 --- /dev/null +++ b/OpenOOD/configs/networks/mcd_net.yml @@ -0,0 +1,14 @@ +network: + name: mcd + num_classes: '@{dataset.num_classes}' + pretrained: False # set 'True' to load pretrained model + checkpoint: '' + num_gpus: '@{num_gpus}' + + backbone: + name: lenet + num_classes: '@{dataset.num_classes}' + image_size: '@{dataset.image_size}' + pretrained: False + checkpoint: '' + num_gpus: '@{num_gpus}' diff --git a/OpenOOD/configs/networks/mos.yml b/OpenOOD/configs/networks/mos.yml new file mode 100644 index 0000000000000000000000000000000000000000..8b40a171e90b6a4065fefe0958ce0cf450cdf035 --- /dev/null +++ b/OpenOOD/configs/networks/mos.yml @@ -0,0 +1,14 @@ +network: + name: bit + num_classes: '@{dataset.num_classes}' + model: BiT-S-R101x1 + num_block_open: 0 + bit_pretrained_dir: bit_pretrained_models + num_logits: 120 # total classes add num_group + pretrained: True # set 'True' to load pretrained model + normal_load: True # set True if it's load normal False if it load from the bit's own load_from + # if you want to load a pre trained model downloaded from bit github you should set normal_load to False + # otherwise if you want to load a pretrained model from this frame you should set normal_load to True + checkpoint: ./results/cifar100_double_label_resnet18_32x32_mos_e100_lr0.003/model_epoch100.ckpt + # checkpoint: ./bit_pretrained_models/BiT-S-R101x1.npz # download from https://github.com/google-research/big_transfer + num_gpus: '@{num_gpus}' diff --git a/OpenOOD/configs/networks/npos_net.yml b/OpenOOD/configs/networks/npos_net.yml new file mode 100644 index 0000000000000000000000000000000000000000..62d709b91930acebcc75aaccd52335f0c9722eed --- /dev/null +++ b/OpenOOD/configs/networks/npos_net.yml @@ -0,0 +1,17 @@ +network: + name: npos_net + num_classes: '@{dataset.num_classes}' + pretrained: False # In training pipeline:"False"; In testing pipeline:"True" + num_gpus: '@{num_gpus}' + checkpoint: none + + feat_dim: 128 + head: mlp + + backbone: + name: resnet18_32x32 + num_classes: '@{dataset.num_classes}' + image_size: '@{dataset.image_size}' + pretrained: False + checkpoint: none + num_gpus: '@{num_gpus}' diff --git a/OpenOOD/configs/networks/opengan.yml b/OpenOOD/configs/networks/opengan.yml new file mode 100644 index 0000000000000000000000000000000000000000..62da2fa79eda7c16ed71210a685343af2790631c --- /dev/null +++ b/OpenOOD/configs/networks/opengan.yml @@ -0,0 +1,25 @@ +network: + name: opengan + num_classes: '@{dataset.num_classes}' + image_size: '@{dataset.image_size}' + pretrained: False + checkpoint: none # ignore if pretrained is false + num_gpus: '@{num_gpus}' + + # Number of channels in the training images. For color images this is 3 + nc: 512 + # Size of z latent vector (i.e. size of generator input) + nz: 100 + # Size of feature maps in generator + ngf: 64 + # Size of feature maps in discriminator + ndf : 64 + + # network used for feature extraction + backbone: + name: resnet50 + num_classes: '@{dataset.num_classes}' + image_size: '@{dataset.image_size}' + pretrained: False + checkpoint: none + num_gpus: '@{num_gpus}' diff --git a/OpenOOD/configs/networks/openmax.yml b/OpenOOD/configs/networks/openmax.yml new file mode 100644 index 0000000000000000000000000000000000000000..c654f64d98cb3eee57dbf5a759a2cfac09747d50 --- /dev/null +++ b/OpenOOD/configs/networks/openmax.yml @@ -0,0 +1,14 @@ +network: + name: openmax_network + num_classes: '@{dataset.num_classes}' + pretrained: False # set 'True' to load pretrained model + checkpoint: ./results/mvtec_openmax_network_OpenMax_e100_lr0.1/best.ckpt # ignore if pretrained is false + num_gpus: '@{num_gpus}' + + backbone: + name: lenet + num_classes: '@{dataset.num_classes}' + image_size: '@{dataset.image_size}' + pretrained: False + checkpoint: '' + num_gpus: '@{num_gpus}' diff --git a/OpenOOD/configs/networks/patchcore_net.yml b/OpenOOD/configs/networks/patchcore_net.yml new file mode 100644 index 0000000000000000000000000000000000000000..d098ea8569e2d47055b15ab8f63c3c5c89b804c4 --- /dev/null +++ b/OpenOOD/configs/networks/patchcore_net.yml @@ -0,0 +1,16 @@ +network: + name: patchcore_net + num_classes: '@{dataset.num_classes}' + pretrained: False # set 'True' to load pretrained model + checkpoint: none # ignore if pretrained is false + num_gpus: '@{num_gpus}' + load_cached_faiss: True + + # network used for feature extraction + backbone: + name: resnet18_32x32 + num_classes: '@{dataset.num_classes}' + image_size: '@{dataset.image_size}' + pretrained: True + checkpoint: 'results/checkpoints/cifar10_res18_acc94.30.ckpt' + num_gpus: '@{num_gpus}' diff --git a/OpenOOD/configs/networks/rd4ad_net.yml b/OpenOOD/configs/networks/rd4ad_net.yml new file mode 100644 index 0000000000000000000000000000000000000000..1af8c7522df8de438bcd9b22929d1d021db54169 --- /dev/null +++ b/OpenOOD/configs/networks/rd4ad_net.yml @@ -0,0 +1,15 @@ +network: + name: rd4ad_net + num_classes: '@{dataset.num_classes}' + image_size: '@{dataset.image_size}' + pretrained: False + checkpoint: none # ignore if pretrained is false + num_gpus: '@{num_gpus}' + + backbone: + name: resnet18_256x256 + num_classes: '@{dataset.num_classes}' + image_size: '@{dataset.image_size}' + pretrained: True # set 'True' to load pretrained model + checkpoint: 'results/resnet18_rd4ad_teacher/resnet18-f37072fd.pth' # ignore if pretrained is false + num_gpus: '@{num_gpus}' diff --git a/OpenOOD/configs/networks/react_net.yml b/OpenOOD/configs/networks/react_net.yml new file mode 100644 index 0000000000000000000000000000000000000000..36742bf9912f04fe24f2c8daad073daa96caa8c3 --- /dev/null +++ b/OpenOOD/configs/networks/react_net.yml @@ -0,0 +1,17 @@ +network: + name: react_net + num_classes: '@{dataset.num_classes}' + image_size: '@{dataset.image_size}' + pretrained: False + checkpoint: none # ignore if pretrained is false + num_gpus: '@{num_gpus}' + similarity_measure: 'cosine' # value in ['cosine', 'inner', 'euclid'] + + # network used for feature extraction + backbone: + name: resnet50 + num_classes: '@{dataset.num_classes}' + image_size: '@{dataset.image_size}' + pretrained: True + checkpoint: 'results/checkpoints/imagenet_res50_acc76.10.pth' + num_gpus: '@{num_gpus}' diff --git a/OpenOOD/configs/networks/repvgg.yml b/OpenOOD/configs/networks/repvgg.yml new file mode 100644 index 0000000000000000000000000000000000000000..cc2ef3b3b00ca64e7fb98dbec1bf5cca6dd14fb8 --- /dev/null +++ b/OpenOOD/configs/networks/repvgg.yml @@ -0,0 +1,12 @@ +network: + name: repvgg_b3 + model: repvgg_b3 + num_classes: '@{dataset.num_classes}' + image_size: '@{dataset.image_size}' + pretrained: True # set 'True' to load pretrained model + checkpoint: timm_load # ignore if pretrained is false + num_gpus: '@{num_gpus}' +dataset: + image_size: 224 +ood_dataset: + image_size: 224 diff --git a/OpenOOD/configs/networks/resnet18_224x224.yml b/OpenOOD/configs/networks/resnet18_224x224.yml new file mode 100644 index 0000000000000000000000000000000000000000..911cff7de8144dc305ab1b8e745f8e7c3485ab03 --- /dev/null +++ b/OpenOOD/configs/networks/resnet18_224x224.yml @@ -0,0 +1,6 @@ +network: + name: resnet18_224x224 + num_classes: '@{dataset.num_classes}' + pretrained: False # set 'True' to load pretrained model + checkpoint: none # ignore if pretrained is false + num_gpus: '@{num_gpus}' diff --git a/OpenOOD/configs/networks/resnet18_32x32.yml b/OpenOOD/configs/networks/resnet18_32x32.yml new file mode 100644 index 0000000000000000000000000000000000000000..5d400b47bdfe4982c9832075589806d87f042cc8 --- /dev/null +++ b/OpenOOD/configs/networks/resnet18_32x32.yml @@ -0,0 +1,6 @@ +network: + name: resnet18_32x32 + num_classes: '@{dataset.num_classes}' + pretrained: False # set 'True' to load pretrained model + checkpoint: ./results/cifar10_double_label_resnet18_32x32_mos_e100_lr0.003/best.ckpt # ignore if pretrained is false + num_gpus: '@{num_gpus}' diff --git a/OpenOOD/configs/networks/resnet18_64x64.yml b/OpenOOD/configs/networks/resnet18_64x64.yml new file mode 100644 index 0000000000000000000000000000000000000000..c0ce68872d118e9d82f0cb6d4a86d3e8b2edbb08 --- /dev/null +++ b/OpenOOD/configs/networks/resnet18_64x64.yml @@ -0,0 +1,6 @@ +network: + name: resnet18_64x64 + num_classes: '@{dataset.num_classes}' + pretrained: False # set 'True' to load pretrained model + checkpoint: ./results/cifar10_resnet18_32x32_base_e200_lr_0.1/best.ckpt # ignore if pretrained is false + num_gpus: '@{num_gpus}' diff --git a/OpenOOD/configs/networks/resnet50.yml b/OpenOOD/configs/networks/resnet50.yml new file mode 100644 index 0000000000000000000000000000000000000000..67bbb8f9b44db29628093cb0f8b2f11bfacdb349 --- /dev/null +++ b/OpenOOD/configs/networks/resnet50.yml @@ -0,0 +1,9 @@ +network: + name: resnet50 + num_classes: '@{dataset.num_classes}' + image_size: '@{dataset.image_size}' + pretrained: True # set 'True' to load pretrained model + + # # default pretrained model: https://download.pytorch.org/models/resnet50-0676ba61.pth + # checkpoint: ./checkpoints/resnet50-0676ba61.pth # ignore if pretrained is false + num_gpus: '@{num_gpus}' diff --git a/OpenOOD/configs/networks/rot_net.yml b/OpenOOD/configs/networks/rot_net.yml new file mode 100644 index 0000000000000000000000000000000000000000..d7dcfde17cd62790aceb53dc6affb60ceb699a51 --- /dev/null +++ b/OpenOOD/configs/networks/rot_net.yml @@ -0,0 +1,13 @@ +network: + name: rot_net + num_classes: '@{dataset.num_classes}' + pretrained: False # In training pipeline:"False"; In testing pipeline:"True" + num_gpus: '@{num_gpus}' + checkpoint: none + backbone: + name: resnet18_32x32 + num_classes: '@{dataset.num_classes}' + image_size: '@{dataset.image_size}' + pretrained: False + checkpoint: none + num_gpus: '@{num_gpus}' diff --git a/OpenOOD/configs/networks/rts_net.yml b/OpenOOD/configs/networks/rts_net.yml new file mode 100644 index 0000000000000000000000000000000000000000..25d935fa6d037dfd17a3b5a37afad05bbb1c38c9 --- /dev/null +++ b/OpenOOD/configs/networks/rts_net.yml @@ -0,0 +1,17 @@ +network: + name: rts_net + num_classes: '@{dataset.num_classes}' + image_size: '@{dataset.image_size}' + pretrained: False + checkpoint: none # ignore if pretrained is false + num_gpus: '@{num_gpus}' + dof: 32 + kl_scale: 0.1 + + # network used for feature extraction + backbone: + name: resnet18_32x32 + num_classes: '@{dataset.num_classes}' + image_size: '@{dataset.image_size}' + pretrained: False + num_gpus: '@{num_gpus}' diff --git a/OpenOOD/configs/networks/simclr.yml b/OpenOOD/configs/networks/simclr.yml new file mode 100644 index 0000000000000000000000000000000000000000..3bd5ef73606dca758f38d56f354ba91816c0bd36 --- /dev/null +++ b/OpenOOD/configs/networks/simclr.yml @@ -0,0 +1,16 @@ +network: + name: simclr_net + num_classes: '@{dataset.num_classes}' + image_size: '@{dataset.image_size}' + pretrained: True + checkpoint: 'results/checkpoints/SSD/last_new.pth' # ignore if pretrained is false + num_gpus: '@{num_gpus}' + + # network used for feature extraction + backbone: + name: resnet50 + num_classes: '@{dataset.num_classes}' + image_size: '@{dataset.image_size}' + pretrained: False + checkpoint: none + num_gpus: '@{num_gpus}' diff --git a/OpenOOD/configs/networks/train_mos.yml b/OpenOOD/configs/networks/train_mos.yml new file mode 100644 index 0000000000000000000000000000000000000000..9449cc0013907b09372c4a61a240abc6f2fbdd23 --- /dev/null +++ b/OpenOOD/configs/networks/train_mos.yml @@ -0,0 +1,14 @@ +network: + name: bit + num_classes: '@{dataset.num_classes}' + model: BiT-S-R101x1 + num_block_open: 0 + bit_pretrained_dir: bit_pretrained_models + num_logits: 120 # total classes add num_group + pretrained: True # set 'True' to load pretrained model + normal_load: False # set True if it's load normal False if it load from the bit's own load_from + # if you want to load a pre trained model downloaded from bit github you should set normal_load to False + # otherwise if you want to load a pretrained model from this frame you should set normal_load to True + # checkpoint: ./results/cifar100_double_label_bit_mos_e100_lr0.003/mos_epoch_latest.ckpt + checkpoint: ./bit_pretrained_models/BiT-S-R101x1.npz # download from https://github.com/google-research/big_transfer + num_gpus: '@{num_gpus}' diff --git a/OpenOOD/configs/networks/udg_net.yml b/OpenOOD/configs/networks/udg_net.yml new file mode 100644 index 0000000000000000000000000000000000000000..008be006465e7e9b0a07aaaf5f55e05789db0777 --- /dev/null +++ b/OpenOOD/configs/networks/udg_net.yml @@ -0,0 +1,15 @@ +network: + name: udg + num_classes: '@{dataset.num_classes}' + num_clusters: 1000 + pretrained: False # set 'True' to load pretrained model + checkpoint: '' + num_gpus: '@{num_gpus}' + + backbone: + name: resnet18_32x32 + num_classes: '@{dataset.num_classes}' + image_size: '@{dataset.image_size}' + pretrained: False + checkpoint: '' + num_gpus: '@{num_gpus}' diff --git a/OpenOOD/configs/networks/vit.yml b/OpenOOD/configs/networks/vit.yml new file mode 100644 index 0000000000000000000000000000000000000000..4a61c640d6688902879d27a6723853e388d82ac7 --- /dev/null +++ b/OpenOOD/configs/networks/vit.yml @@ -0,0 +1,12 @@ +network: + name: vit + model: openood/networks/vit-base-p16-384.py + num_classes: '@{dataset.num_classes}' + image_size: '@{dataset.image_size}' + pretrained: True # set 'True' to load pretrained model + checkpoint: ./checkpoints/vit-base-p16_in21k-pre-3rdparty_ft-64xb64_in1k-384_20210928-98e8652b.pth # ignore if pretrained is false + num_gpus: '@{num_gpus}' +dataset: + image_size: 384 +ood_dataset: + image_size: 384 diff --git a/OpenOOD/configs/networks/vos_net.yml b/OpenOOD/configs/networks/vos_net.yml new file mode 100644 index 0000000000000000000000000000000000000000..12654ac55cda46b92fc3a3ce48356edd79d36089 --- /dev/null +++ b/OpenOOD/configs/networks/vos_net.yml @@ -0,0 +1,16 @@ +network: + name: vos + num_classes: '@{dataset.num_classes}' + pretrained: False # In training pipeline:"False"; In testing pipeline:"True" + num_gpus: '@{num_gpus}' + num_layers: 40 + widen_factor: 2 + droprate: 0.3 + + backbone: #for network without feature_list + name: lenet + num_classes: '@{dataset.num_classes}' + image_size: '@{dataset.image_size}' + pretrained: False + checkpoint: None + num_gpus: '@{num_gpus}' diff --git a/OpenOOD/configs/pipelines/test/feat_extract.yml b/OpenOOD/configs/pipelines/test/feat_extract.yml new file mode 100644 index 0000000000000000000000000000000000000000..b39d3a7ecc487e8bfd92a84a10ee9f2d151eec38 --- /dev/null +++ b/OpenOOD/configs/pipelines/test/feat_extract.yml @@ -0,0 +1,20 @@ +exp_name: "'@{dataset.name}'_'@{network.name}'_'@{pipeline.name}'" +output_dir: ./results/ +save_output: True +merge_option: default # disabled if 'save_output' is False +mark: default + +num_gpus: 1 +num_workers: 4 +num_machines: 1 +machine_rank: 0 + +network: + pretrained: True + +pipeline: + name: feat_extract + extract_target: test + +evaluator: + name: base diff --git a/OpenOOD/configs/pipelines/test/test_acc.yml b/OpenOOD/configs/pipelines/test/test_acc.yml new file mode 100644 index 0000000000000000000000000000000000000000..df858b26a94f65ca535bfc019ab1e3a0281c564f --- /dev/null +++ b/OpenOOD/configs/pipelines/test/test_acc.yml @@ -0,0 +1,18 @@ +exp_name: "'@{dataset.name}'_'@{network.name}'_'@{pipeline.name}'" +output_dir: ./results/ +save_output: False +merge_option: merge # disabled if 'save_output' is False choices: [default, pass, merge] + +num_gpus: 1 +num_workers: 4 +num_machines: 1 +machine_rank: 0 + +network: + pretrained: True + +pipeline: + name: test_acc + +evaluator: + name: base diff --git a/OpenOOD/configs/pipelines/test/test_arpl.yml b/OpenOOD/configs/pipelines/test/test_arpl.yml new file mode 100644 index 0000000000000000000000000000000000000000..a13d34829035f84f450e52679079ec4003563db7 --- /dev/null +++ b/OpenOOD/configs/pipelines/test/test_arpl.yml @@ -0,0 +1,27 @@ +exp_name: "'@{dataset.name}'_'@{network.name}'_'@{pipeline.name}'_'@{evaluator.name}'_'@{postprocessor.name}'_'@{mark}'/s'@{seed}'/'@{evaluator.ood_scheme}'" +output_dir: ./results/ +save_output: True +merge_option: default # disabled if 'save_output' is False +mark: default # to mark the version of experiment +seed: 0 + +num_gpus: 1 +num_workers: 0 +num_machines: 1 +machine_rank: 0 + +network: + pretrained: True + checkpoint: ["results/imagenet200_arpl_net_arpl_e90_lr0.1/s0/best_NetF.ckpt", + "results/imagenet200_arpl_net_arpl_e90_lr0.1/s0/best_criterion.ckpt"] + +pipeline: + name: test_ood + +evaluator: + name: arpl + ood_scheme: ood # [ood, fsood] + +recorder: + save_scores: True + save_csv: True diff --git a/OpenOOD/configs/pipelines/test/test_arplgan.yml b/OpenOOD/configs/pipelines/test/test_arplgan.yml new file mode 100644 index 0000000000000000000000000000000000000000..8c9cc0d5ea92158b2023b0c86c02976f71955b6a --- /dev/null +++ b/OpenOOD/configs/pipelines/test/test_arplgan.yml @@ -0,0 +1,27 @@ +exp_name: "'@{dataset.name}'_'@{network.name}'_'@{pipeline.name}'_'@{evaluator.name}'_'@{postprocessor.name}'_'@{mark}'" +output_dir: ./results/ +save_output: True +merge_option: default # disabled if 'save_output' is False +mark: default # to mark the version of experiment + +num_gpus: 1 +num_workers: 0 +num_machines: 1 +machine_rank: 0 + +network: + pretrained: True + checkpoint: ["./results/mnist_arpl_gan_arpl_gan_e100_lr0.1/best_NetF.ckpt", + "./results/mnist_arpl_gan_arpl_gan_e100_lr0.1/best_criterion.ckpt", + null, + null] + +pipeline: + name: test_ood + +evaluator: + name: arpl + +recorder: + save_scores: True + save_csv: True diff --git a/OpenOOD/configs/pipelines/test/test_cutpaste.yml b/OpenOOD/configs/pipelines/test/test_cutpaste.yml new file mode 100644 index 0000000000000000000000000000000000000000..f4b43188b55c8719f8acc5eb54dbba93ad054ea0 --- /dev/null +++ b/OpenOOD/configs/pipelines/test/test_cutpaste.yml @@ -0,0 +1,23 @@ +exp_name: "'@{dataset.name}'_'@{network.name}'_'@{pipeline.name}'" +output_dir: ./results/ +save_output: True +merge_option: merge # disabled if 'save_output' is False choices: [default, pass, merge] + +num_gpus: 1 +num_workers: 0 +num_machines: 1 +machine_rank: 0 + +network: + pretrained: True + checkpoint: results/bottle_projectionNet_cutpaste_e100_lr0.03/best_epoch15_auroc97.48015873015873.ckpt + +pipeline: + name: test_ad + +evaluator: + name: ood + +recorder: + save_scores: True + save_csv: True diff --git a/OpenOOD/configs/pipelines/test/test_draem.yml b/OpenOOD/configs/pipelines/test/test_draem.yml new file mode 100644 index 0000000000000000000000000000000000000000..39e04e839ce40b7191a9004011afe0f0ede3c844 --- /dev/null +++ b/OpenOOD/configs/pipelines/test/test_draem.yml @@ -0,0 +1,25 @@ +exp_name: "'@{dataset.name}'_'@{network.name}'_'@{pipeline.name}'" +output_dir: ./results/ +save_output: True +merge_option: merge # disabled if 'save_output' is False choices: [default, pass, merge] + +num_gpus: 1 +num_workers: 0 +num_machines: 1 +machine_rank: 0 + +network: + pretrained: True + checkpoint: ["results/osr_mnist6_seed1_draem_train_e100_lr0.0001/draem_test_0.0001_100_bs32_osr_mnist6_seed1_best_epoch1_loss0.5001.ckpt", + "results/osr_mnist6_seed1_draem_train_e100_lr0.0001/draem_test_0.0001_100_bs32_osr_mnist6_seed1_best_epoch1_loss0.5001_seg.ckpt"] + # ignore if pretrained is false + +pipeline: + name: test_ad + +evaluator: + name: ood + +recorder: + save_scores: True + save_csv: True diff --git a/OpenOOD/configs/pipelines/test/test_dsvdd.yml b/OpenOOD/configs/pipelines/test/test_dsvdd.yml new file mode 100644 index 0000000000000000000000000000000000000000..6a48bcc98c3817fde987874284183bd8ebb03b75 --- /dev/null +++ b/OpenOOD/configs/pipelines/test/test_dsvdd.yml @@ -0,0 +1,29 @@ +exp_name: "'@{dataset.name}'_'@{network.name}'_'@{pipeline.name}'_'@{evaluator.name}'_'@{postprocessor.name}'_'@{mark}'" +output_dir: ./results/ +save_output: True +merge_option: default # disabled if 'save_output' is False +mark: default # to mark the version of experiment + +num_gpus: 1 +num_workers: 0 +num_machines: 1 +machine_rank: 0 + + +R: 0 +c: None +objective: one-class + +network: + pretrained: True + +pipeline: + name: test_ad + +evaluator: + name: ood + use_react: False + +recorder: + save_scores: True + save_csv: True diff --git a/OpenOOD/configs/pipelines/test/test_fsood.yml b/OpenOOD/configs/pipelines/test/test_fsood.yml new file mode 100644 index 0000000000000000000000000000000000000000..c6a78037af25881f4de3e987c4a2974541b442a3 --- /dev/null +++ b/OpenOOD/configs/pipelines/test/test_fsood.yml @@ -0,0 +1,23 @@ +exp_name: "'@{dataset.name}'_'@{network.name}'_'@{pipeline.name}'_'@{evaluator.name}'_'@{postprocessor.name}'_'@{mark}'" +output_dir: ./results/ +save_output: True +merge_option: default # disabled if 'save_output' is False +mark: default # to mark the version of experiment + +num_gpus: 1 +num_workers: 0 +num_machines: 1 +machine_rank: 0 + +network: + pretrained: True + +pipeline: + name: test_ood + +evaluator: + name: fsood + +recorder: + save_scores: True + save_csv: True diff --git a/OpenOOD/configs/pipelines/test/test_kdad.yml b/OpenOOD/configs/pipelines/test/test_kdad.yml new file mode 100644 index 0000000000000000000000000000000000000000..8b54cdb348a5a0d775d94e59793e09e9f357e79a --- /dev/null +++ b/OpenOOD/configs/pipelines/test/test_kdad.yml @@ -0,0 +1,21 @@ +exp_name: "'@{dataset.name}'_'@{network.name}'_'@{trainer_name}'" +output_dir: ./results/ +save_output: False +merge_option: merge # disabled if 'save_output' is False choices: [default, pass, merge] +normal_class: 3 #use @ may let int -->str +lamda: 0.01 + +num_gpus: 1 +num_workers: 0 +num_machines: 1 +machine_rank: 0 + +trainer_name: kdad +dataset_name: '@{dataset.name}' +direction_loss_only: False +last_checkpoint: 201 +metrics: roc_auc +pipeline: + name: test_ad +evaluator: + name: kdad diff --git a/OpenOOD/configs/pipelines/test/test_mos.yml b/OpenOOD/configs/pipelines/test/test_mos.yml new file mode 100644 index 0000000000000000000000000000000000000000..3c83ec0bfc445a7c2b050b3a15482f71151a2b67 --- /dev/null +++ b/OpenOOD/configs/pipelines/test/test_mos.yml @@ -0,0 +1,36 @@ +exp_name: "'@{dataset.name}'_'@{network.name}'_'@{pipeline.name}'_'@{evaluator.name}'/s'@{seed}'/'@{evaluator.ood_scheme}'" +output_dir: ./results/ +save_output: True +merge_option: default +seed: 0 + +num_gpus: 1 +num_workers: 0 +num_machines: 1 +machine_rank: 0 + +pipeline: + name: test_ood + +trainer: + name: mos + # group_config: ./data/group_config/cifar100_group_config.txt + group_config: Auto # if set to none the program will auto re-compute it + # group_config is a list that the num of classes in each super classes + # It should be noted that the configuration of automatic calculation may be inconsistent with + # the category classification used in training, resulting in errors + +evaluator: + name: mos + ood_scheme: ood # [ood, fsood] + +optimizer: + name: sgd + num_epochs: 100 + lr: 0.003 + +recorder: + name: base + save_scores: True + save_csv: True + save_all_models: True diff --git a/OpenOOD/configs/pipelines/test/test_ood.yml b/OpenOOD/configs/pipelines/test/test_ood.yml new file mode 100644 index 0000000000000000000000000000000000000000..73d3b63eaeb616f3288c5fa1b59fcd041433118a --- /dev/null +++ b/OpenOOD/configs/pipelines/test/test_ood.yml @@ -0,0 +1,25 @@ +exp_name: "'@{dataset.name}'_'@{network.name}'_'@{pipeline.name}'_'@{evaluator.name}'_'@{postprocessor.name}'_'@{mark}'/s'@{seed}'/'@{evaluator.ood_scheme}'" +output_dir: ./results/ +save_output: True +merge_option: default # disabled if 'save_output' is False +mark: default # to mark the version of experiment +seed: 0 + +num_gpus: 1 +num_workers: 0 +num_machines: 1 +machine_rank: 0 + +network: + pretrained: True + +pipeline: + name: test_ood + +evaluator: + name: ood + ood_scheme: ood + +recorder: + save_scores: True + save_csv: True diff --git a/OpenOOD/configs/pipelines/test/test_ood_aps.yml b/OpenOOD/configs/pipelines/test/test_ood_aps.yml new file mode 100644 index 0000000000000000000000000000000000000000..09cac1b15d1c5343656927d0d8e03d4b92ca5cad --- /dev/null +++ b/OpenOOD/configs/pipelines/test/test_ood_aps.yml @@ -0,0 +1,23 @@ +exp_name: "'@{dataset.name}'_'@{network.name}'_'@{pipeline.name}'_'@{evaluator.name}'_'@{postprocessor.name}'_'@{mark}'" +output_dir: ./results/ +save_output: True +force_merge: False # disabled if 'save_output' is False +mark: default # to mark the version of experiment + +num_gpus: 1 +num_workers: 0 +num_machines: 1 +machine_rank: 0 + +network: + pretrained: True + +pipeline: + name: test_ood_aps + +evaluator: + name: ood + +recorder: + save_scores: True + save_csv: True diff --git a/OpenOOD/configs/pipelines/test/test_opengan.yml b/OpenOOD/configs/pipelines/test/test_opengan.yml new file mode 100644 index 0000000000000000000000000000000000000000..826f3dc74d9f1ae6f526c3d08affc604e074053b --- /dev/null +++ b/OpenOOD/configs/pipelines/test/test_opengan.yml @@ -0,0 +1,34 @@ +exp_name: "'@{dataset.name}'_'@{network.name}'_'@{pipeline.name}'_'@{evaluator.name}'_'@{postprocessor.name}'_'@{mark}'/s'@{seed}'/'@{evaluator.ood_scheme}'" +output_dir: ./results/ +save_output: True +merge_option: default # disabled if 'save_output' is False choices: [default, pass, merge] +mark: default # to mark the version of experiment +seed: 0 + +num_gpus: 1 +num_workers: 4 +num_machines: 1 +machine_rank: 0 + +network: + # checkpoint setting: first load generator then discriminator + pretrained: True + checkpoint: ["/home/zhourixin/OOD_Folder/CODE/other_methods/openOOD_code/OpenOOD/results/bronze2_opengan_opengan_e90_lr0.0001_default/s0/best_GNet.ckpt", + "/home/zhourixin/OOD_Folder/CODE/other_methods/openOOD_code/OpenOOD/results/bronze2_opengan_opengan_e90_lr0.0001_default/s0/best_DNet.ckpt", + null] + + # load checkpoint for feature extraction network + backbone: + pretrained: True + checkpoint: "./results/cifar10_resnet18_32x32_base_e100_lr0.1_default/s0/best.ckpt" + +pipeline: + name: test_ood + +evaluator: + name: ood + ood_scheme: ood + +recorder: + save_scores: True + save_csv: True diff --git a/OpenOOD/configs/pipelines/test/test_osr.yml b/OpenOOD/configs/pipelines/test/test_osr.yml new file mode 100644 index 0000000000000000000000000000000000000000..1139dbfa3a2ca45667046a5d134d0dcc6e497af4 --- /dev/null +++ b/OpenOOD/configs/pipelines/test/test_osr.yml @@ -0,0 +1,23 @@ +exp_name: "'@{dataset.name}'_'@{network.name}'_'@{pipeline.name}'_'@{evaluator.name}'_'@{postprocessor.name}'_'@{mark}'" +output_dir: ./results/ +save_output: True +merge_option: merge # disabled if 'save_output' is False choices: [default, pass, merge] +mark: default # to mark the version of experiment + +num_gpus: 1 +num_workers: 4 +num_machines: 1 +machine_rank: 0 + +network: + pretrained: True + +pipeline: + name: test_ood + +evaluator: + name: osr + +recorder: + save_scores: True + save_csv: True diff --git a/OpenOOD/configs/pipelines/test/test_patchcore.yml b/OpenOOD/configs/pipelines/test/test_patchcore.yml new file mode 100644 index 0000000000000000000000000000000000000000..a31431829034fcbab89a9f8b09b1b1231276eb7b --- /dev/null +++ b/OpenOOD/configs/pipelines/test/test_patchcore.yml @@ -0,0 +1,23 @@ +exp_name: "'@{dataset.name}'_'@{network.name}'_'@{pipeline.name}'_'@{evaluator.name}'_'@{postprocessor.name}'" +output_dir: ./results/ +save_output: True +merge_option: default # disabled if 'save_output' is False + +num_gpus: 1 +num_workers: 0 +num_machines: 1 +machine_rank: 0 + +network: + pretrained: False + +pipeline: + name: test_ad + +evaluator: + name: ad + test_pix: True + +recorder: + save_scores: True + save_csv: True diff --git a/OpenOOD/configs/pipelines/test/test_rd4ad.yml b/OpenOOD/configs/pipelines/test/test_rd4ad.yml new file mode 100644 index 0000000000000000000000000000000000000000..27a56b2000a6332dd5f34cd4a24e032085b71b08 --- /dev/null +++ b/OpenOOD/configs/pipelines/test/test_rd4ad.yml @@ -0,0 +1,25 @@ +exp_name: "'@{dataset.name}'_'@{network.name}'_'@{pipeline.name}'" +output_dir: ./results/ +save_output: True +merge_option: merge # disabled if 'save_output' is False choices: [default, pass, merge] + +num_gpus: 1 +num_workers: 0 +num_machines: 1 +machine_rank: 0 + +network: + pretrained: True + checkpoint: ["results/cifar10_rd4ad_net_rd4ad_e200_lr0.005_default/bn_best.ckpt", + "results/cifar10_rd4ad_net_rd4ad_e200_lr0.005_default/decoder_best.ckpt"] + # ignore if pretrained is false + +pipeline: + name: test_ad + +evaluator: + name: ood + +recorder: + save_scores: True + save_csv: True diff --git a/OpenOOD/configs/pipelines/train/baseline.yml b/OpenOOD/configs/pipelines/train/baseline.yml new file mode 100644 index 0000000000000000000000000000000000000000..6c9df555644f415268c64e9e4b2b80a779bf6731 --- /dev/null +++ b/OpenOOD/configs/pipelines/train/baseline.yml @@ -0,0 +1,37 @@ +exp_name: "'@{dataset.name}'_'@{network.name}'_'@{trainer.name}'_e'@{optimizer.num_epochs}'_lr'@{optimizer.lr}'_'@{mark}'/s'@{seed}'" +output_dir: ./results/ +save_output: True +merge_option: default +mark: default +seed: 0 + +num_gpus: 1 +num_workers: 8 +num_machines: 1 +machine_rank: 0 + +preprocessor: + name: base + +network: + pretrained: False + +pipeline: + name: train + +trainer: + name: base + +evaluator: + name: base + +optimizer: + name: sgd + num_epochs: 100 + lr: 0.1 + momentum: 0.9 + weight_decay: 0.0005 + +recorder: + name: base + save_all_models: False diff --git a/OpenOOD/configs/pipelines/train/train_arpl.yml b/OpenOOD/configs/pipelines/train/train_arpl.yml new file mode 100644 index 0000000000000000000000000000000000000000..91dfa8ed4c57135f0ef45255760ab7a23032e252 --- /dev/null +++ b/OpenOOD/configs/pipelines/train/train_arpl.yml @@ -0,0 +1,30 @@ +exp_name: "'@{dataset.name}'_'@{network.name}'_'@{trainer.name}'_e'@{optimizer.num_epochs}'_lr'@{optimizer.lr}'/s'@{seed}'" +output_dir: ./results/ +save_output: True +merge_option: default +seed: 0 + +num_gpus: 1 +num_workers: 0 +num_machines: 1 +machine_rank: 0 + +pipeline: + name: train + +trainer: + name: arpl + +evaluator: + name: arpl + +optimizer: + name: sgd + num_epochs: 100 + lr: 0.1 + momentum: 0.9 + weight_decay: 0.0001 + +recorder: + name: arpl + save_all_models: False diff --git a/OpenOOD/configs/pipelines/train/train_arpl_gan.yml b/OpenOOD/configs/pipelines/train/train_arpl_gan.yml new file mode 100644 index 0000000000000000000000000000000000000000..44869d314b147b67b588357281883243cd599dd1 --- /dev/null +++ b/OpenOOD/configs/pipelines/train/train_arpl_gan.yml @@ -0,0 +1,34 @@ +exp_name: "'@{dataset.name}'_'@{network.name}'_'@{trainer.name}'_e'@{optimizer.num_epochs}'_lr'@{optimizer.lr}'" +output_dir: ./results/ +save_output: True +merge_option: default + +num_gpus: 1 +num_workers: 0 +num_machines: 1 +machine_rank: 0 + +loss: + beta: 0.1 + +pipeline: + name: train_arplgan + +trainer: + name: arpl_gan + auxiliary: arpl + +evaluator: + name: arpl + +optimizer: + name: sgd + num_epochs: 100 + lr: 0.1 + gan_lr: 0.0002 + momentum: 0.9 + weight_decay: 0.0005 + +recorder: + name: arpl + save_all_models: False diff --git a/OpenOOD/configs/pipelines/train/train_augmix.yml b/OpenOOD/configs/pipelines/train/train_augmix.yml new file mode 100644 index 0000000000000000000000000000000000000000..7f917990633daa63d24b3702631382fc676f311f --- /dev/null +++ b/OpenOOD/configs/pipelines/train/train_augmix.yml @@ -0,0 +1,34 @@ +exp_name: "'@{dataset.name}'_'@{network.name}'_'@{trainer.name}'_e'@{optimizer.num_epochs}'_lr'@{optimizer.lr}'_'@{mark}'/s'@{seed}'" +output_dir: ./results/ +save_output: True +merge_option: default +mark: default +seed: 0 + +num_gpus: 1 +num_workers: 8 +num_machines: 1 +machine_rank: 0 + +pipeline: + name: train + +trainer: + name: augmix + trainer_args: + jsd: True + lam: 12 + +evaluator: + name: base + +optimizer: + name: sgd + num_epochs: 100 + lr: 0.1 + momentum: 0.9 + weight_decay: 0.0005 + +recorder: + name: base + save_all_models: False diff --git a/OpenOOD/configs/pipelines/train/train_cider.yml b/OpenOOD/configs/pipelines/train/train_cider.yml new file mode 100644 index 0000000000000000000000000000000000000000..83a9cede6eb36cefdf077f9ca3d65fffebe90997 --- /dev/null +++ b/OpenOOD/configs/pipelines/train/train_cider.yml @@ -0,0 +1,42 @@ +exp_name: "'@{dataset.name}'_'@{network.name}'_'@{trainer.name}'_e'@{optimizer.num_epochs}'_lr'@{optimizer.lr}'_protom'@{trainer.trainer_args.proto_m}'_'@{mark}'/s'@{seed}'" +output_dir: ./results/ +save_output: True +merge_option: default +mark: default +seed: 0 + +num_gpus: 1 +num_workers: 8 +num_machines: 1 +machine_rank: 0 + +preprocessor: + name: base + +pipeline: + name: train + +trainer: + name: cider + trainer_args: + proto_m: 0.95 + temp: 0.1 + w: 2 + +evaluator: + name: base + +optimizer: + name: sgd + num_epochs: 100 + lr: 0.5 + momentum: 0.9 + weight_decay: 0.0001 + warm: True + cosine: True + lr_decay_rate: 0.1 + lr_decay_epochs: [50, 75, 90] + +recorder: + name: cider + save_all_models: False diff --git a/OpenOOD/configs/pipelines/train/train_conf_branch.yml b/OpenOOD/configs/pipelines/train/train_conf_branch.yml new file mode 100644 index 0000000000000000000000000000000000000000..b7609ac42d2922cae122c9fa07855f095cf7f461 --- /dev/null +++ b/OpenOOD/configs/pipelines/train/train_conf_branch.yml @@ -0,0 +1,37 @@ +exp_name: "'@{dataset.name}'_'@{network.name}'_'@{trainer.name}'_e'@{optimizer.num_epochs}'_lr'@{optimizer.lr}'_'@{mark}'/s'@{seed}'" +output_dir: ./results/ +save_output: True +merge_option: default # disabled if 'save_output' is False +num_classes: '@{dataset.num_classes}' +mark: default + +num_gpus: 1 +num_workers: 8 +num_machines: 1 +machine_rank: 0 +seed: 0 + +baseline: False + +pipeline: + name: train + +trainer: + name: conf_branch + budget: 0.3 + lmbda: 0.1 + eps: 1.0e-12 + +evaluator: + name: base + +optimizer: + num_epochs: 100 + lr: 0.1 + momentum: 0.9 + nesterov: True + weight_decay: 5.0e-4 + +recorder: + name: base + save_all_models: False diff --git a/OpenOOD/configs/pipelines/train/train_csi.yml b/OpenOOD/configs/pipelines/train/train_csi.yml new file mode 100644 index 0000000000000000000000000000000000000000..a1b43d38e1d7fa4d68c71b005a4d3f89c79ce18c --- /dev/null +++ b/OpenOOD/configs/pipelines/train/train_csi.yml @@ -0,0 +1,37 @@ +exp_name: "'@{dataset.name}'_'@{network.name}'_'@{mode}'_e'@{optimizer.num_epochs}'_lr'@{optimizer.lr}'/s'@{seed}'" +output_dir: ./results/ +save_output: True +merge_option: default +seed: 0 + +num_gpus: 1 +num_workers: 8 +num_machines: 1 +machine_rank: 0 + +pipeline: + name: train + +trainer: + name: csi + +evaluator: + name: base + +optimizer: + name: sgd + num_epochs: 100 # step 1 700 epochs, step 2 100 epochs + lr: 0.1 + momentum: 0.9 + weight_decay: 0.000001 + warmup: 10 # warm-up epochs + +recorder: + name: base + save_all_models: False + +mode: csi_step2 # csi_step1, csi_step2 +sim_lambda: 1.0 # Weight for SimCLR loss +temperature: 0.07 # Temperature for similarity +resize_factor: 0.08 # resize scale is sampled from [resize_factor, 1.0] +resize_fix: False # resize scale is fixed to resize_factor (not [resize_factor, 1.0]) diff --git a/OpenOOD/configs/pipelines/train/train_cutmix.yml b/OpenOOD/configs/pipelines/train/train_cutmix.yml new file mode 100644 index 0000000000000000000000000000000000000000..27b857d41f06be0c41ed3e5b91979130b00dcbcb --- /dev/null +++ b/OpenOOD/configs/pipelines/train/train_cutmix.yml @@ -0,0 +1,33 @@ +exp_name: "'@{dataset.name}'_'@{network.name}'_'@{trainer.name}'_e'@{optimizer.num_epochs}'_lr'@{optimizer.lr}'_'@{mark}'" +output_dir: ./results/ +save_output: True +merge_option: default + +mark: default +num_gpus: 1 +num_workers: 8 +num_machines: 1 +machine_rank: 0 + +pipeline: + name: train + +trainer: + name: cutmix + trainer_args: + beta: 1.0 + cutmix_prob: 1.0 # cutmix probability + +evaluator: + name: base + +optimizer: + name: sgd + num_epochs: 100 + lr: 0.1 + momentum: 0.9 + weight_decay: 0.0005 + +recorder: + name: base + save_all_models: False diff --git a/OpenOOD/configs/pipelines/train/train_cutpaste.yml b/OpenOOD/configs/pipelines/train/train_cutpaste.yml new file mode 100644 index 0000000000000000000000000000000000000000..42695c7c4d7e5321e3390c34c18f89654cee9451 --- /dev/null +++ b/OpenOOD/configs/pipelines/train/train_cutpaste.yml @@ -0,0 +1,30 @@ +exp_name: "'@{dataset.name}'_'@{network.name}'_'@{trainer.name}'_e'@{optimizer.num_epochs}'_lr'@{optimizer.lr}'" +output_dir: ./results/ +save_output: True +merge_option: default + +num_gpus: 1 +num_workers: 0 +num_machines: 1 +machine_rank: 0 + +pipeline: + name: train_ad + +trainer: + name: cutpaste + +evaluator: + name: ad + +optimizer: + name: sgd + num_epochs: 100 + lr: 0.03 + momentum: 0.9 + weight_decay: 0.0005 + +recorder: + name: ad + save_all_models: False + save_csv: False diff --git a/OpenOOD/configs/pipelines/train/train_dcae.yml b/OpenOOD/configs/pipelines/train/train_dcae.yml new file mode 100644 index 0000000000000000000000000000000000000000..ba1f0fe75f52af7e56e773e1dd303a6762ddafc3 --- /dev/null +++ b/OpenOOD/configs/pipelines/train/train_dcae.yml @@ -0,0 +1,30 @@ +exp_name: "'@{dataset.name}'_'@{network.name}'_'@{trainer.name}'_e'@{optimizer.num_epochs}'" +save_output: True +merge_option: default +normal_class: 3 +output_dir: ./results/ +lr: 0.0001 + +num_gpus: 1 +num_workers: 0 +num_machines: 1 +machine_rank: 0 + +lr_milestones: [50] +weight_decay: 0.5e-6 + +R: 0 +c: None + +pipeline: + name: train_ad +evaluator: + name: dcae +trainer: + name: dcae +recorder: + name: dcae + save_all_models: False +optimizer: + name: adam + num_epochs: 150 diff --git a/OpenOOD/configs/pipelines/train/train_draem.yml b/OpenOOD/configs/pipelines/train/train_draem.yml new file mode 100644 index 0000000000000000000000000000000000000000..55a89113ac44d395e89d7cf0860c4f0c6ca3ad2b --- /dev/null +++ b/OpenOOD/configs/pipelines/train/train_draem.yml @@ -0,0 +1,30 @@ +exp_name: "'@{dataset.name}'_'@{network.name}'_train_e'@{optimizer.num_epochs}'_lr'@{optimizer.lr}'" +output_dir: ./results/ +save_output: True +merge_option: merge + +num_gpus: 1 +num_workers: 0 +num_machines: 1 +machine_rank: 0 + +pipeline: + name: train_ad + +trainer: + name: draem + +evaluator: + name: ad + +optimizer: + name: MultiStep + num_epochs: 700 + steps: [0.8, 0.9] + lr: 0.0001 + gamma: 0.2 + +recorder: + name: ad + best_model_basis: image_auroc + save_all_models: False diff --git a/OpenOOD/configs/pipelines/train/train_dropout.yml b/OpenOOD/configs/pipelines/train/train_dropout.yml new file mode 100644 index 0000000000000000000000000000000000000000..2c49f0e29d979cfe5a9c2d599b915cfa4dc9dedf --- /dev/null +++ b/OpenOOD/configs/pipelines/train/train_dropout.yml @@ -0,0 +1,28 @@ +exp_name: "'@{dataset.name}'_'@{network.name}'_'@{trainer.name}'_e'@{optimizer.num_epochs}'_lr'@{optimizer.lr}'_dropout" +output_dir: ./results/ +save_output: True +merge_option: default + +num_gpus: 1 +num_workers: 0 + +pipeline: + name: train + +trainer: + name: dropout + dropout_p: 0.5 + +evaluator: + name: base + +optimizer: + name: sgd + num_epochs: 100 + lr: 0.1 + momentum: 0.9 + weight_decay: 0.0005 + +recorder: + name: base + save_all_models: False diff --git a/OpenOOD/configs/pipelines/train/train_dsvdd.yml b/OpenOOD/configs/pipelines/train/train_dsvdd.yml new file mode 100644 index 0000000000000000000000000000000000000000..d3e843e268886c60839ba455a9f24c611613f13f --- /dev/null +++ b/OpenOOD/configs/pipelines/train/train_dsvdd.yml @@ -0,0 +1,34 @@ +exp_name: "'@{dataset.name}'_'@{network.name}'_'@{trainer.name}'_e'@{optimizer.num_epochs}'" +save_output: True +merge_option: default +normal_class: 3 +output_dir: ./results/ +lr: 0.0001 + +num_gpus: 1 +num_workers: 0 +num_machines: 1 +machine_rank: 0 + +lr_milestones: [50] +weight_decay: 0.5e-6 +warm_up_n_epochs: 10 + +R: 0 +c: None + +pipeline: + name: train_ad +evaluator: + name: ad + +trainer: + name: dsvdd + +recorder: + name: ad + save_all_models: False + +optimizer: + name: adam + num_epochs: 150 diff --git a/OpenOOD/configs/pipelines/train/train_ece.yml b/OpenOOD/configs/pipelines/train/train_ece.yml new file mode 100644 index 0000000000000000000000000000000000000000..595535eaa7b39166c164ab224848048fda2bbc3d --- /dev/null +++ b/OpenOOD/configs/pipelines/train/train_ece.yml @@ -0,0 +1,32 @@ +exp_name: "'@{dataset.name}'_'@{network.name}'_'@{trainer.name}'_e'@{optimizer.num_epochs}'_lr'@{optimizer.lr}'" +output_dir: ./results/ +save_output: True +merge_option: default + +num_gpus: 1 +num_workers: 0 +num_machines: 1 +machine_rank: 0 + +preprocessor: + name: base + +pipeline: + name: train + +trainer: + name: base + +evaluator: + name: ece + +optimizer: + name: sgd + num_epochs: 100 + lr: 0.1 + momentum: 0.9 + weight_decay: 0.0005 + +recorder: + name: base + save_all_models: False diff --git a/OpenOOD/configs/pipelines/train/train_ish.yml b/OpenOOD/configs/pipelines/train/train_ish.yml new file mode 100644 index 0000000000000000000000000000000000000000..5e4d49689daa4a0ab528c8a8296914ffdcf35ea8 --- /dev/null +++ b/OpenOOD/configs/pipelines/train/train_ish.yml @@ -0,0 +1,44 @@ +exp_name: "'@{dataset.name}'_'@{network.name}'_'@{trainer.name}'_e'@{optimizer.num_epochs}'_lr'@{optimizer.lr}'_param'@{trainer.trainer_args.param}'_bs_'@{dataset.train.batch_size}'/s'@{seed}'" + +output_dir: ./results/ +save_output: True +merge_option: default +seed: 0 + +num_gpus: 1 +num_workers: 8 +num_machines: 1 +machine_rank: 0 + +preprocessor: + name: base + +pipeline: + name: train + + + +trainer: + name: ish + trainer_args: + mode: minksample_expscale + param: 0.85 + layer: r1 + + +evaluator: + name: base + +optimizer: + name: sgd + num_epochs: 100 + lr: 0.1 + momentum: 0.9 + weight_decay: 0.0005 + weight_decay_fc: 0.00005 + nesterov: True + nesterov_fc: True + +recorder: + name: base + save_all_models: False diff --git a/OpenOOD/configs/pipelines/train/train_kdad.yml b/OpenOOD/configs/pipelines/train/train_kdad.yml new file mode 100644 index 0000000000000000000000000000000000000000..b232108729f3fa086149a7b5cfd42af8c99579e5 --- /dev/null +++ b/OpenOOD/configs/pipelines/train/train_kdad.yml @@ -0,0 +1,29 @@ +exp_name: "'@{dataset.name}'_'@{network.name}'_'@{trainer.name}'" +output_dir: ./results/ +save_output: True +merge_option: default # disabled if 'save_output' is False +normal_class: 3 +lamda: 0.01 +dataset_name: '@{dataset.name}' +direction_loss_only: False +learning_rate: 1e-3 +metrics: roc_auc +last_checkpoint: 201 +continue_train: False + +num_gpus: 1 +num_workers: 0 +num_machines: 1 +machine_rank: 0 + +pipeline: + name: train_ad +evaluator: + name: kdad +trainer: + name: kdad +recorder: + name: kdad + save_all_models: False +optimizer: + num_epochs: 201 diff --git a/OpenOOD/configs/pipelines/train/train_logitnorm.yml b/OpenOOD/configs/pipelines/train/train_logitnorm.yml new file mode 100644 index 0000000000000000000000000000000000000000..bc299da138016315e72398e3e689f60750cbde0d --- /dev/null +++ b/OpenOOD/configs/pipelines/train/train_logitnorm.yml @@ -0,0 +1,36 @@ +exp_name: "'@{dataset.name}'_'@{network.name}'_'@{trainer.name}'_e'@{optimizer.num_epochs}'_lr'@{optimizer.lr}'_alpha'@{trainer.trainer_args.tau}'_'@{mark}'/s'@{seed}'" +output_dir: ./results/ +save_output: True +merge_option: default +mark: default +seed: 0 + +num_gpus: 1 +num_workers: 8 +num_machines: 1 +machine_rank: 0 + +preprocessor: + name: base + +pipeline: + name: train + +trainer: + name: logitnorm + trainer_args: + tau: 0.04 + +evaluator: + name: base + +optimizer: + name: sgd + num_epochs: 100 + lr: 0.1 + momentum: 0.9 + weight_decay: 0.0005 + +recorder: + name: base + save_all_models: False diff --git a/OpenOOD/configs/pipelines/train/train_mcd.yml b/OpenOOD/configs/pipelines/train/train_mcd.yml new file mode 100644 index 0000000000000000000000000000000000000000..66a6c256614a6277c77949963560141a2f78bcf6 --- /dev/null +++ b/OpenOOD/configs/pipelines/train/train_mcd.yml @@ -0,0 +1,8 @@ +pipeline: + name: train_oe + +trainer: + name: mcd + lambda_oe: 1 + margin: 1.2 + start_epoch_ft: 90 diff --git a/OpenOOD/configs/pipelines/train/train_mixoe.yml b/OpenOOD/configs/pipelines/train/train_mixoe.yml new file mode 100644 index 0000000000000000000000000000000000000000..2c2886366801dd0f4f1cdcaf761b10899ecbd99c --- /dev/null +++ b/OpenOOD/configs/pipelines/train/train_mixoe.yml @@ -0,0 +1,11 @@ +exp_name: "'@{dataset.name}'_'@{network.name}'_'@{trainer.name}'_e'@{optimizer.num_epochs}'_lr'@{optimizer.lr}'_alpha'@{trainer.alpha}'_beta'@{trainer.beta}'_'@{trainer.mix_op}'_lam'@{trainer.lambda_oe}'_'@{mark}'/s'@{seed}'" + +pipeline: + name: train_oe + +trainer: + name: mixoe + lambda_oe: 1.0 + alpha: 0.1 + beta: 1.0 + mix_op: cutmix diff --git a/OpenOOD/configs/pipelines/train/train_mixup.yml b/OpenOOD/configs/pipelines/train/train_mixup.yml new file mode 100644 index 0000000000000000000000000000000000000000..fa6f24112f08ba2f685e65228f338e6325973ef0 --- /dev/null +++ b/OpenOOD/configs/pipelines/train/train_mixup.yml @@ -0,0 +1,35 @@ +exp_name: "'@{dataset.name}'_'@{network.name}'_'@{trainer.name}'\ +_e'@{optimizer.num_epochs}'_lr'@{optimizer.lr}'\ +_alpha'@{trainer.trainer_args.alpha}'_'@{mark}'" +output_dir: ./results/ +save_output: True +merge_option: default +mark: default + + +num_gpus: 1 +num_workers: 8 +num_machines: 1 +machine_rank: 0 + +pipeline: + name: train + +trainer: + name: mixup + trainer_args: + alpha: 0.2 + +evaluator: + name: base + +optimizer: + name: sgd + num_epochs: 100 + lr: 0.1 + momentum: 0.9 + weight_decay: 0.0005 + +recorder: + name: base + save_all_models: False diff --git a/OpenOOD/configs/pipelines/train/train_mos.yml b/OpenOOD/configs/pipelines/train/train_mos.yml new file mode 100644 index 0000000000000000000000000000000000000000..21c6cfa4abd1e835a9194f4add0dce632c33e034 --- /dev/null +++ b/OpenOOD/configs/pipelines/train/train_mos.yml @@ -0,0 +1,33 @@ +exp_name: "'@{dataset.name}'_'@{network.name}'_'@{trainer.name}'_e'@{optimizer.num_epochs}'_lr'@{optimizer.lr}'/s'@{seed}'" +output_dir: ./results/ +save_output: True +merge_option: default +seed: 0 + +num_gpus: 1 +num_workers: 8 +num_machines: 1 +machine_rank: 0 + +pipeline: + name: train + +trainer: + name: mos + # group_config: ./data/group_config/cifar100_group_config.txt + group_config: Auto # if set to none the program will auto re-compute it + # group_config is a list that the num of classes in each super classes + # It should be noted that the configuration of automatic calculation may be inconsistent with + # the category classification used in training, resulting in errors + +evaluator: + name: mos + +optimizer: + name: sgd + num_epochs: 100 + lr: 0.003 + +recorder: + name: base + save_all_models: False diff --git a/OpenOOD/configs/pipelines/train/train_npos.yml b/OpenOOD/configs/pipelines/train/train_npos.yml new file mode 100644 index 0000000000000000000000000000000000000000..1494cc133cd0ac2ea5fcdac39586367b0707c5fb --- /dev/null +++ b/OpenOOD/configs/pipelines/train/train_npos.yml @@ -0,0 +1,53 @@ +exp_name: "'@{dataset.name}'_'@{network.name}'_'@{trainer.name}'_e'@{optimizer.num_epochs}'_lr'@{optimizer.lr}'_'@{mark}'/s'@{seed}'" +output_dir: ./results/ +save_output: True +merge_option: default +mark: default +seed: 0 + +num_gpus: 1 +num_workers: 8 +num_machines: 1 +machine_rank: 0 + +preprocessor: + name: base + +pipeline: + name: train + +trainer: + name: npos + trainer_args: + proto_m: 0.95 + temp: 0.1 + sample_number: 1000 + sample_from: 600 + start_epoch_KNN: 40 + K: 300 + cov_mat: 0.1 + select: 200 + ID_points_num: 200 + pick_nums: 2 + w_disp: 0.5 + w_comp: 1 + loss_weight: 0.1 + +evaluator: + name: base + +optimizer: + name: sgd + num_epochs: 100 + lr: 0.5 + momentum: 0.9 + weight_decay: 0.0001 + warm: True + cosine: True + lr_decay_rate: 0.1 + lr_decay_epochs: [30, 50, 120] + mlp_decay_rate: 0.1 + +recorder: + name: cider + save_all_models: False diff --git a/OpenOOD/configs/pipelines/train/train_oe.yml b/OpenOOD/configs/pipelines/train/train_oe.yml new file mode 100644 index 0000000000000000000000000000000000000000..6b785c2834f18a17985986971949165f526ed8bc --- /dev/null +++ b/OpenOOD/configs/pipelines/train/train_oe.yml @@ -0,0 +1,8 @@ +exp_name: "'@{dataset.name}'_'@{network.name}'_'@{trainer.name}'_e'@{optimizer.num_epochs}'_lr'@{optimizer.lr}'_lam'@{trainer.lambda_oe}'_'@{mark}'/s'@{seed}'" + +pipeline: + name: train_oe + +trainer: + name: oe + lambda_oe: 0.5 diff --git a/OpenOOD/configs/pipelines/train/train_opengan.yml b/OpenOOD/configs/pipelines/train/train_opengan.yml new file mode 100644 index 0000000000000000000000000000000000000000..4f241ba5550e9e521c540f4475174c5a94fca182 --- /dev/null +++ b/OpenOOD/configs/pipelines/train/train_opengan.yml @@ -0,0 +1,34 @@ +exp_name: "'@{dataset.name}'_'@{network.name}'_'@{trainer.name}'_e'@{optimizer.num_epochs}'_lr'@{optimizer.lr}'_'@{mark}'/s'@{seed}'" +output_dir: ./results/ +save_output: True +merge_option: default +mark: default +seed: 0 + +dataset: + # cached features extracted from classifier + feat_root: './results/cifar10_resnet18_32x32_feat_extract_opengan_default/s0' + +num_gpus: 1 +num_workers: 8 +num_machines: 1 +machine_rank: 0 + +pipeline: + name: train_opengan + +trainer: + name: opengan + +evaluator: + name: ood + +optimizer: + name: Adam + num_epochs: 100 + lr: 0.0001 + beta1: 0.5 + +recorder: + name: opengan + save_all_models: False diff --git a/OpenOOD/configs/pipelines/train/train_opengan_feat_extract.yml b/OpenOOD/configs/pipelines/train/train_opengan_feat_extract.yml new file mode 100644 index 0000000000000000000000000000000000000000..71c340156b2323eb303a56215d021261b0c484d7 --- /dev/null +++ b/OpenOOD/configs/pipelines/train/train_opengan_feat_extract.yml @@ -0,0 +1,20 @@ +exp_name: "'@{dataset.name}'_'@{network.name}'_'@{pipeline.name}'_'@{mark}'/s'@{seed}'" +# output_dir: ./results/ +save_output: True +merge_option: default # disabled if 'save_output' is False +mark: default +seed: 0 + +num_gpus: 1 +num_workers: 8 +num_machines: 1 +machine_rank: 0 + +network: + pretrained: True + +pipeline: + name: feat_extract_opengan + +evaluator: + name: base diff --git a/OpenOOD/configs/pipelines/train/train_rd4ad.yml b/OpenOOD/configs/pipelines/train/train_rd4ad.yml new file mode 100644 index 0000000000000000000000000000000000000000..ec8acaa0aa7df41419d76e6370cd64e6f67d3a59 --- /dev/null +++ b/OpenOOD/configs/pipelines/train/train_rd4ad.yml @@ -0,0 +1,33 @@ +exp_name: "'@{dataset.name}'_'@{network.name}'_'@{trainer.name}'_e'@{optimizer.num_epochs}'_lr'@{optimizer.lr}'_'@{mark}'" +output_dir: ./results/ +save_output: True +force_merge: False +merge_option: merge +mark: default + +num_gpus: 1 +num_workers: 0 +num_machines: 1 +machine_rank: 0 + +preprocessor: + name: base + +pipeline: + name: train_ad + +trainer: + name: rd4ad + +evaluator: + name: ad + +optimizer: + name: adam + num_epochs: 200 + lr: 0.005 + betas: [0.5,0.999] + +recorder: + name: rd4ad + save_all_models: False diff --git a/OpenOOD/configs/pipelines/train/train_regmixup.yml b/OpenOOD/configs/pipelines/train/train_regmixup.yml new file mode 100644 index 0000000000000000000000000000000000000000..14b27325b7642b56397d3e19a0120eec565ea1bf --- /dev/null +++ b/OpenOOD/configs/pipelines/train/train_regmixup.yml @@ -0,0 +1,35 @@ +exp_name: "'@{dataset.name}'_'@{network.name}'_'@{trainer.name}'\ +_e'@{optimizer.num_epochs}'_lr'@{optimizer.lr}'\ +_alpha'@{trainer.trainer_args.alpha}'_'@{mark}'/s'@{seed}'" +output_dir: ./results/ +save_output: True +merge_option: default +mark: default +seed: 0 + +num_gpus: 1 +num_workers: 8 +num_machines: 1 +machine_rank: 0 + +pipeline: + name: train + +trainer: + name: regmixup + trainer_args: + alpha: 20 + +evaluator: + name: base + +optimizer: + name: sgd + num_epochs: 100 + lr: 0.1 + momentum: 0.9 + weight_decay: 0.0005 + +recorder: + name: base + save_all_models: False diff --git a/OpenOOD/configs/pipelines/train/train_sem.yml b/OpenOOD/configs/pipelines/train/train_sem.yml new file mode 100644 index 0000000000000000000000000000000000000000..b9b2a42ecbb37b994890d3bea042241fe913eef5 --- /dev/null +++ b/OpenOOD/configs/pipelines/train/train_sem.yml @@ -0,0 +1,39 @@ +exp_name: "'@{dataset.name}'_'@{network.name}'_'@{trainer.name}'\ +_e'@{optimizer.num_epochs}'_lr'@{optimizer.lr}'" +output_dir: ./results/ +save_output: True +merge_option: default + +num_gpus: 1 +num_workers: 0 +num_machines: 1 +machine_rank: 0 + +pipeline: + name: finetune + +network: + pretrained: True + +trainer: + name: sae + trainer_args: + num_clusters: 3 + feature_type: stat # flat/mean/stat + reduce_dim: pca_50 # none/capca_10/pca_50 + loss_weight: [0.5, 0.5, 0.1, 0.1] # [cls_std, cls_mix, sae_id, sae_ood] + alpha: 0.5 + +evaluator: + name: base + +optimizer: + name: sgd + num_epochs: 30 + lr: 0.05 + momentum: 0.9 + weight_decay: 0.0005 + +recorder: + name: base + save_all_models: False diff --git a/OpenOOD/configs/pipelines/train/train_t2fnorm.yml b/OpenOOD/configs/pipelines/train/train_t2fnorm.yml new file mode 100644 index 0000000000000000000000000000000000000000..9c06c71d47e93e91632f6ec32cc538e4d01e23b4 --- /dev/null +++ b/OpenOOD/configs/pipelines/train/train_t2fnorm.yml @@ -0,0 +1,40 @@ +exp_name: "'@{dataset.name}'_'@{network.name}'_'@{trainer.name}'_e'@{optimizer.num_epochs}'_lr'@{optimizer.lr}'_tau'@{network.tau}'_'@{mark}'" +output_dir: ./results/ +save_output: True +force_merge: False +merge_option: default +mark: T2FNorm +seed: 42 + +num_gpus: 1 +num_workers: 4 +num_machines: 1 +machine_rank: 0 + +preprocessor: + name: base + +network: + pretrained: False + modification: t2fnorm + tau: 0.1 + +pipeline: + name: train + +trainer: + name: base + +evaluator: + name: base + +optimizer: + name: sgd + num_epochs: 100 + lr: 0.1 + momentum: 0.9 + weight_decay: 0.0005 + +recorder: + name: base + save_all_models: False diff --git a/OpenOOD/configs/pipelines/train/train_udg.yml b/OpenOOD/configs/pipelines/train/train_udg.yml new file mode 100644 index 0000000000000000000000000000000000000000..d74b3aac018459605ac8398b0ccabd6c3de5cf4d --- /dev/null +++ b/OpenOOD/configs/pipelines/train/train_udg.yml @@ -0,0 +1,12 @@ +pipeline: + name: train_oe + +trainer: + name: udg + num_clusters: 1000 + pca_dim: 256 + purity_ind_thresh: 0.8 + purity_ood_thresh: 0.8 + oe_enhance_ratio: 2.0 + lambda_oe: 0.5 + lambda_aux: 0.1 diff --git a/OpenOOD/configs/pipelines/train/train_vos.yml b/OpenOOD/configs/pipelines/train/train_vos.yml new file mode 100644 index 0000000000000000000000000000000000000000..b67cddf91d94eb390ac844151a1bd7fe663aa7fa --- /dev/null +++ b/OpenOOD/configs/pipelines/train/train_vos.yml @@ -0,0 +1,43 @@ +exp_name: "'@{dataset.name}'_'@{network.name}'_'@{trainer.name}'_e'@{optimizer.num_epochs}'_lr'@{optimizer.lr}'_'@{mark}'/s'@{seed}'" +output_dir: ./results/ +save_output: True +merge_option: default # disabled if 'save_output' is False +num_classes: '@{dataset.num_classes}' +mark: default +seed: 0 + +num_gpus: 1 +num_workers: 8 +num_machines: 1 +machine_rank: 0 + + +sample_number: 1000 +sample_from: 10000 +select: 1 +feature_dim: 512 #resnet 512, lenet 120 + +pipeline: + name: train + +trainer: + name: vos + loss_weight: 0.1 + +evaluator: + name: base + +optimizer: + num_epochs: 100 + lr: 0.1 + momentum: 0.9 + weight_decay: 5.0e-4 + +recorder: + name: base + save_all_models: False + +preprocessor: + name: base + +start_epoch: 0 diff --git a/OpenOOD/configs/postprocessors/BronzeNet2.yml b/OpenOOD/configs/postprocessors/BronzeNet2.yml new file mode 100644 index 0000000000000000000000000000000000000000..abfa85c5415d7b16b7885b0403c8ed3bbf09aa0d --- /dev/null +++ b/OpenOOD/configs/postprocessors/BronzeNet2.yml @@ -0,0 +1,7 @@ +postprocessor: + name: bronzenet2 + APS_mode: False + postprocessor_args: + K: 50 + postprocessor_sweep: + K_list: [50, 100, 200, 500, 1000] diff --git a/OpenOOD/configs/postprocessors/_gmm_iter/cifar_gmm_0.yml b/OpenOOD/configs/postprocessors/_gmm_iter/cifar_gmm_0.yml new file mode 100644 index 0000000000000000000000000000000000000000..dec8cefc2d74b11f1cb69330ae0459bd1a30a09b --- /dev/null +++ b/OpenOOD/configs/postprocessors/_gmm_iter/cifar_gmm_0.yml @@ -0,0 +1,15 @@ +exp_name: "'@{dataset.name}'_'@{network.name}'_'@{pipeline.name}'\ +_'@{evaluator.name}'_'@{postprocessor.name}'\ +_'@{postprocessor.postprocessor_args.num_clusters_list}'\ +_'@{postprocessor.postprocessor_args.feature_type_list}'\ +_'@{postprocessor.postprocessor_args.alpha_list}'\ +_'@{postprocessor.postprocessor_args.reduce_dim_list}'\ +_'@{mark}'" + +postprocessor: + name: gmm + postprocessor_args: + num_clusters_list: [3, 1, 1, 1, 10] + feature_type_list: [stat, mean, mean, mean, flat] # flat/mean/stat + alpha_list: [-0.0001, 0, 0, 0, 1] + reduce_dim_list: [pca_10, none, none, none, pca_10] # none/capca_10/pca_50 diff --git a/OpenOOD/configs/postprocessors/_gmm_iter/cifar_gmm_1.yml b/OpenOOD/configs/postprocessors/_gmm_iter/cifar_gmm_1.yml new file mode 100644 index 0000000000000000000000000000000000000000..c275d37939fe535a03c31d6252ceefbe2616b4db --- /dev/null +++ b/OpenOOD/configs/postprocessors/_gmm_iter/cifar_gmm_1.yml @@ -0,0 +1,15 @@ +exp_name: "'@{dataset.name}'_'@{network.name}'_'@{pipeline.name}'\ +_'@{evaluator.name}'_'@{postprocessor.name}'\ +_'@{postprocessor.postprocessor_args.num_clusters_list}'\ +_'@{postprocessor.postprocessor_args.feature_type_list}'\ +_'@{postprocessor.postprocessor_args.alpha_list}'\ +_'@{postprocessor.postprocessor_args.reduce_dim_list}'\ +_'@{mark}'" + +postprocessor: + name: gmm + postprocessor_args: + num_clusters_list: [3, 1, 1, 1, 10] + feature_type_list: [stat, mean, mean, mean, flat] # flat/mean/stat + alpha_list: [-0.0001, 0, 0, 0, 1] + reduce_dim_list: [pca_50, none, none, none, pca_50] # none/capca_10/pca_50 diff --git a/OpenOOD/configs/postprocessors/_gmm_iter/cifar_gmm_2.yml b/OpenOOD/configs/postprocessors/_gmm_iter/cifar_gmm_2.yml new file mode 100644 index 0000000000000000000000000000000000000000..c275d37939fe535a03c31d6252ceefbe2616b4db --- /dev/null +++ b/OpenOOD/configs/postprocessors/_gmm_iter/cifar_gmm_2.yml @@ -0,0 +1,15 @@ +exp_name: "'@{dataset.name}'_'@{network.name}'_'@{pipeline.name}'\ +_'@{evaluator.name}'_'@{postprocessor.name}'\ +_'@{postprocessor.postprocessor_args.num_clusters_list}'\ +_'@{postprocessor.postprocessor_args.feature_type_list}'\ +_'@{postprocessor.postprocessor_args.alpha_list}'\ +_'@{postprocessor.postprocessor_args.reduce_dim_list}'\ +_'@{mark}'" + +postprocessor: + name: gmm + postprocessor_args: + num_clusters_list: [3, 1, 1, 1, 10] + feature_type_list: [stat, mean, mean, mean, flat] # flat/mean/stat + alpha_list: [-0.0001, 0, 0, 0, 1] + reduce_dim_list: [pca_50, none, none, none, pca_50] # none/capca_10/pca_50 diff --git a/OpenOOD/configs/postprocessors/_gmm_iter/cifar_gmm_3.yml b/OpenOOD/configs/postprocessors/_gmm_iter/cifar_gmm_3.yml new file mode 100644 index 0000000000000000000000000000000000000000..c275d37939fe535a03c31d6252ceefbe2616b4db --- /dev/null +++ b/OpenOOD/configs/postprocessors/_gmm_iter/cifar_gmm_3.yml @@ -0,0 +1,15 @@ +exp_name: "'@{dataset.name}'_'@{network.name}'_'@{pipeline.name}'\ +_'@{evaluator.name}'_'@{postprocessor.name}'\ +_'@{postprocessor.postprocessor_args.num_clusters_list}'\ +_'@{postprocessor.postprocessor_args.feature_type_list}'\ +_'@{postprocessor.postprocessor_args.alpha_list}'\ +_'@{postprocessor.postprocessor_args.reduce_dim_list}'\ +_'@{mark}'" + +postprocessor: + name: gmm + postprocessor_args: + num_clusters_list: [3, 1, 1, 1, 10] + feature_type_list: [stat, mean, mean, mean, flat] # flat/mean/stat + alpha_list: [-0.0001, 0, 0, 0, 1] + reduce_dim_list: [pca_50, none, none, none, pca_50] # none/capca_10/pca_50 diff --git a/OpenOOD/configs/postprocessors/_gmm_iter/cifar_gmm_4.yml b/OpenOOD/configs/postprocessors/_gmm_iter/cifar_gmm_4.yml new file mode 100644 index 0000000000000000000000000000000000000000..c275d37939fe535a03c31d6252ceefbe2616b4db --- /dev/null +++ b/OpenOOD/configs/postprocessors/_gmm_iter/cifar_gmm_4.yml @@ -0,0 +1,15 @@ +exp_name: "'@{dataset.name}'_'@{network.name}'_'@{pipeline.name}'\ +_'@{evaluator.name}'_'@{postprocessor.name}'\ +_'@{postprocessor.postprocessor_args.num_clusters_list}'\ +_'@{postprocessor.postprocessor_args.feature_type_list}'\ +_'@{postprocessor.postprocessor_args.alpha_list}'\ +_'@{postprocessor.postprocessor_args.reduce_dim_list}'\ +_'@{mark}'" + +postprocessor: + name: gmm + postprocessor_args: + num_clusters_list: [3, 1, 1, 1, 10] + feature_type_list: [stat, mean, mean, mean, flat] # flat/mean/stat + alpha_list: [-0.0001, 0, 0, 0, 1] + reduce_dim_list: [pca_50, none, none, none, pca_50] # none/capca_10/pca_50 diff --git a/OpenOOD/configs/postprocessors/_gmm_iter/covid_gmm_0.yml b/OpenOOD/configs/postprocessors/_gmm_iter/covid_gmm_0.yml new file mode 100644 index 0000000000000000000000000000000000000000..faec8e10dc4cdfa1fcda69d4911b0116d4b116e0 --- /dev/null +++ b/OpenOOD/configs/postprocessors/_gmm_iter/covid_gmm_0.yml @@ -0,0 +1,15 @@ +exp_name: "'@{dataset.name}'_'@{network.name}'_'@{pipeline.name}'\ +_'@{evaluator.name}'_'@{postprocessor.name}'\ +_'@{postprocessor.postprocessor_args.num_clusters_list}'\ +_'@{postprocessor.postprocessor_args.feature_type_list}'\ +_'@{postprocessor.postprocessor_args.alpha_list}'\ +_'@{postprocessor.postprocessor_args.reduce_dim_list}'\ +_'@{mark}'" + +postprocessor: + name: gmm + postprocessor_args: + num_clusters_list: [1, 1, 1, 1, 5] + feature_type_list: [stat, mean, mean, mean, flat] # flat/mean/stat + alpha_list: [-0.001, 0, 0, 0, 1] + reduce_dim_list: [pca_10, pca_10, pca_10, pca_10, pca_10] # none/capca_10/pca_50 diff --git a/OpenOOD/configs/postprocessors/_gmm_iter/mnist_gmm_0.yml b/OpenOOD/configs/postprocessors/_gmm_iter/mnist_gmm_0.yml new file mode 100644 index 0000000000000000000000000000000000000000..019f79f8aaeb77f00d4344cc07aded5d2c9c7156 --- /dev/null +++ b/OpenOOD/configs/postprocessors/_gmm_iter/mnist_gmm_0.yml @@ -0,0 +1,10 @@ +exp_name: "'@{dataset.name}'_'@{network.name}'_'@{pipeline.name}'\ +_'@{evaluator.name}'_gmm0_'@{mark}'" + +postprocessor: + name: gmm + postprocessor_args: + num_clusters_list: [3, 1, 10] + feature_type_list: [flat, mean, flat] # flat/mean/stat + alpha_list: [1, 0, 0] + reduce_dim_list: [pca_50, none, pca_50] # none/capca_10/pca_50 diff --git a/OpenOOD/configs/postprocessors/_gmm_iter/mnist_gmm_1.yml b/OpenOOD/configs/postprocessors/_gmm_iter/mnist_gmm_1.yml new file mode 100644 index 0000000000000000000000000000000000000000..a721f73b99d577ef228065a875bdd7ac3139332f --- /dev/null +++ b/OpenOOD/configs/postprocessors/_gmm_iter/mnist_gmm_1.yml @@ -0,0 +1,10 @@ +exp_name: "'@{dataset.name}'_'@{network.name}'_'@{pipeline.name}'\ +_'@{evaluator.name}'_gmm1_'@{mark}'" + +postprocessor: + name: gmm + postprocessor_args: + num_clusters_list: [3, 1, 10] + feature_type_list: [flat, mean, flat] # flat/mean/stat + alpha_list: [-0.001, 0, 1] + reduce_dim_list: [none, none, pca_50] # none/capca_10/pca_50 diff --git a/OpenOOD/configs/postprocessors/_gmm_iter/mnist_gmm_2.yml b/OpenOOD/configs/postprocessors/_gmm_iter/mnist_gmm_2.yml new file mode 100644 index 0000000000000000000000000000000000000000..1c9b0bce6908cb772e2853193db1163b8947e256 --- /dev/null +++ b/OpenOOD/configs/postprocessors/_gmm_iter/mnist_gmm_2.yml @@ -0,0 +1,10 @@ +exp_name: "'@{dataset.name}'_'@{network.name}'_'@{pipeline.name}'\ +_'@{evaluator.name}'_gmm2_'@{mark}'" + +postprocessor: + name: gmm + postprocessor_args: + num_clusters_list: [3, 1, 10] + feature_type_list: [flat, mean, flat] # flat/mean/stat + alpha_list: [-0.01, 0, 1] + reduce_dim_list: [none, none, pca_50] # none/capca_10/pca_50 diff --git a/OpenOOD/configs/postprocessors/_gmm_iter/mnist_gmm_3.yml b/OpenOOD/configs/postprocessors/_gmm_iter/mnist_gmm_3.yml new file mode 100644 index 0000000000000000000000000000000000000000..2e1072b677baa89ef7d574ce771875cc6dd27bb7 --- /dev/null +++ b/OpenOOD/configs/postprocessors/_gmm_iter/mnist_gmm_3.yml @@ -0,0 +1,10 @@ +exp_name: "'@{dataset.name}'_'@{network.name}'_'@{pipeline.name}'\ +_'@{evaluator.name}'_gmm3_'@{mark}'" + +postprocessor: + name: gmm + postprocessor_args: + num_clusters_list: [3, 1, 10] + feature_type_list: [stat, mean, flat] # flat/mean/stat + alpha_list: [-0.01, 0, 1] + reduce_dim_list: [none, none, pca_50] # none/capca_10/pca_50 diff --git a/OpenOOD/configs/postprocessors/_gmm_iter/mnist_gmm_4.yml b/OpenOOD/configs/postprocessors/_gmm_iter/mnist_gmm_4.yml new file mode 100644 index 0000000000000000000000000000000000000000..39c74220bd136970afa9afadd49bed30e541051c --- /dev/null +++ b/OpenOOD/configs/postprocessors/_gmm_iter/mnist_gmm_4.yml @@ -0,0 +1,10 @@ +exp_name: "'@{dataset.name}'_'@{network.name}'_'@{pipeline.name}'\ +_'@{evaluator.name}'_gmm4_'@{mark}'" + +postprocessor: + name: gmm + postprocessor_args: + num_clusters_list: [3, 1, 10] + feature_type_list: [stat, mean, flat] # flat/mean/stat + alpha_list: [-0.01, 0, 1] + reduce_dim_list: [none, none, pca_50] # none/capca_10/pca_50 diff --git a/OpenOOD/configs/postprocessors/_gmm_iter/mnist_gmm_5.yml b/OpenOOD/configs/postprocessors/_gmm_iter/mnist_gmm_5.yml new file mode 100644 index 0000000000000000000000000000000000000000..42d7075aaa19307894cbd53ef01c662224035d96 --- /dev/null +++ b/OpenOOD/configs/postprocessors/_gmm_iter/mnist_gmm_5.yml @@ -0,0 +1,10 @@ +exp_name: "'@{dataset.name}'_'@{network.name}'_'@{pipeline.name}'\ +_'@{evaluator.name}'_gmm0_'@{mark}'" + +postprocessor: + name: gmm + postprocessor_args: + num_clusters_list: [3, 1, 10] + feature_type_list: [stat, mean, flat] # flat/mean/stat + alpha_list: [-0.01, 0, 1] + reduce_dim_list: [none, none, pca_50] # none/capca_10/pca_50 diff --git a/OpenOOD/configs/postprocessors/ash.yml b/OpenOOD/configs/postprocessors/ash.yml new file mode 100644 index 0000000000000000000000000000000000000000..3b1e7fb4a24d29c3f1bec722b3b651715cb44c82 --- /dev/null +++ b/OpenOOD/configs/postprocessors/ash.yml @@ -0,0 +1,7 @@ +postprocessor: + name: ash + APS_mode: True + postprocessor_args: + percentile: 90 + postprocessor_sweep: + percentile_list: [65, 70, 75, 80, 85, 90, 95] diff --git a/OpenOOD/configs/postprocessors/cider.yml b/OpenOOD/configs/postprocessors/cider.yml new file mode 100644 index 0000000000000000000000000000000000000000..2f86e7d6800a26a3d7b9003d2b2081b7fdee7762 --- /dev/null +++ b/OpenOOD/configs/postprocessors/cider.yml @@ -0,0 +1,7 @@ +postprocessor: + name: cider + APS_mode: True + postprocessor_args: + K: 50 + postprocessor_sweep: + K_list: [50, 100, 200, 500, 1000] diff --git a/OpenOOD/configs/postprocessors/conf_branch.yml b/OpenOOD/configs/postprocessors/conf_branch.yml new file mode 100644 index 0000000000000000000000000000000000000000..e393628f10617644d74ac306f7cbdcf73be627a5 --- /dev/null +++ b/OpenOOD/configs/postprocessors/conf_branch.yml @@ -0,0 +1,3 @@ +postprocessor: + name: conf_branch + APS_mode: False diff --git a/OpenOOD/configs/postprocessors/cutpaste.yml b/OpenOOD/configs/postprocessors/cutpaste.yml new file mode 100644 index 0000000000000000000000000000000000000000..e62f6534599293fc8ec56c63f51114264808e765 --- /dev/null +++ b/OpenOOD/configs/postprocessors/cutpaste.yml @@ -0,0 +1,2 @@ +postprocessor: + name: cutpaste diff --git a/OpenOOD/configs/postprocessors/dice.yml b/OpenOOD/configs/postprocessors/dice.yml new file mode 100644 index 0000000000000000000000000000000000000000..198e978aadc028ecd8c523f427d6e8a999687316 --- /dev/null +++ b/OpenOOD/configs/postprocessors/dice.yml @@ -0,0 +1,7 @@ +postprocessor: + name: dice + APS_mode: False + postprocessor_args: + p: 90 + postprocessor_sweep: + p_list: [90] diff --git a/OpenOOD/configs/postprocessors/draem.yml b/OpenOOD/configs/postprocessors/draem.yml new file mode 100644 index 0000000000000000000000000000000000000000..abec05867d95584436fa9532d11c17ce19dfff75 --- /dev/null +++ b/OpenOOD/configs/postprocessors/draem.yml @@ -0,0 +1,2 @@ +postprocessor: + name: draem diff --git a/OpenOOD/configs/postprocessors/dropout.yml b/OpenOOD/configs/postprocessors/dropout.yml new file mode 100644 index 0000000000000000000000000000000000000000..4a51c06df4862f4548a96fe2d91636c51a34eef7 --- /dev/null +++ b/OpenOOD/configs/postprocessors/dropout.yml @@ -0,0 +1,6 @@ +postprocessor: + name: dropout + APS_mode: False + postprocessor_args: + dropout_p: 0.5 + dropout_times: 5 diff --git a/OpenOOD/configs/postprocessors/dsvdd.yml b/OpenOOD/configs/postprocessors/dsvdd.yml new file mode 100644 index 0000000000000000000000000000000000000000..178092c9e623577000a9a96e1626a9b339588f39 --- /dev/null +++ b/OpenOOD/configs/postprocessors/dsvdd.yml @@ -0,0 +1,2 @@ +postprocessor: + name: dsvdd diff --git a/OpenOOD/configs/postprocessors/ebo.yml b/OpenOOD/configs/postprocessors/ebo.yml new file mode 100644 index 0000000000000000000000000000000000000000..a84b9a639180f97673f135038c31c18b49c6c762 --- /dev/null +++ b/OpenOOD/configs/postprocessors/ebo.yml @@ -0,0 +1,7 @@ +postprocessor: + name: ebo + APS_mode: True + postprocessor_args: + temperature: 1 + postprocessor_sweep: + temperature_list: [1] diff --git a/OpenOOD/configs/postprocessors/ensemble.yml b/OpenOOD/configs/postprocessors/ensemble.yml new file mode 100644 index 0000000000000000000000000000000000000000..c96edef1b3feae3532a16cd0c0617dea1bc8e086 --- /dev/null +++ b/OpenOOD/configs/postprocessors/ensemble.yml @@ -0,0 +1,7 @@ +postprocessor: + name: ensemble + postprocessor_args: + network_name: lenet + checkpoint_root: ./results/lenet_ensemble_pretrained + checkpoints: [net1, net2, net3, net4, net5] + num_networks: 5 # number of networks to ensembel diff --git a/OpenOOD/configs/postprocessors/gen.yml b/OpenOOD/configs/postprocessors/gen.yml new file mode 100644 index 0000000000000000000000000000000000000000..c2ef7af979e9ebdbe257e8f3abc665241dc7f5d2 --- /dev/null +++ b/OpenOOD/configs/postprocessors/gen.yml @@ -0,0 +1,9 @@ +postprocessor: + name: gen + APS_mode: True + postprocessor_args: + gamma: 0.1 + M: 100 + postprocessor_sweep: + gamma_list: [0.01,0.1,0.5,1,2,5,10] + M_list: [10,50,100,200,500,1000] diff --git a/OpenOOD/configs/postprocessors/gmm.yml b/OpenOOD/configs/postprocessors/gmm.yml new file mode 100644 index 0000000000000000000000000000000000000000..f3a06e8ba4b6ee7f8a2246b00f9f0b151c079448 --- /dev/null +++ b/OpenOOD/configs/postprocessors/gmm.yml @@ -0,0 +1,14 @@ +exp_name: "'@{dataset.name}'_'@{network.name}'_'@{pipeline.name}'\ +_'@{evaluator.name}'_'@{postprocessor.name}'\ +_'@{postprocessor.postprocessor_args.num_clusters_list}'\ +_'@{postprocessor.postprocessor_args.alpha_list}'\ +_'@{postprocessor.postprocessor_args.reduce_dim_list}'\ +_'@{mark}'" + +postprocessor: + name: gmm + postprocessor_args: + num_clusters_list: [3, 1, 50] + feature_type_list: [stat, mean, flat] # flat/mean/stat + alpha_list: [0, 0, 1] + reduce_dim_list: [none, none, pca_50] # none/capca_10/pca_50 diff --git a/OpenOOD/configs/postprocessors/godin.yml b/OpenOOD/configs/postprocessors/godin.yml new file mode 100644 index 0000000000000000000000000000000000000000..f0e7e78e1231532fa3bf4dbb38b19bec007ace12 --- /dev/null +++ b/OpenOOD/configs/postprocessors/godin.yml @@ -0,0 +1,6 @@ +postprocessor: + name: godin + APS_mode: False + postprocessor_args: + score_func: h # use h or g + noise_magnitude: 0.0025 # in range of [0, 0.0025, 0.005, 0.01, 0.02, 0.04, 0.08] diff --git a/OpenOOD/configs/postprocessors/gradnorm.yml b/OpenOOD/configs/postprocessors/gradnorm.yml new file mode 100644 index 0000000000000000000000000000000000000000..7270e08f469459d28ad5538cef645ab0ec04fcc1 --- /dev/null +++ b/OpenOOD/configs/postprocessors/gradnorm.yml @@ -0,0 +1,4 @@ +postprocessor: + name: gradnorm + APS_mode: False + postprocessor_args: diff --git a/OpenOOD/configs/postprocessors/gram.yml b/OpenOOD/configs/postprocessors/gram.yml new file mode 100644 index 0000000000000000000000000000000000000000..f7fb7337ccc490d63f699fa6269a9bda536cac10 --- /dev/null +++ b/OpenOOD/configs/postprocessors/gram.yml @@ -0,0 +1,7 @@ +postprocessor: + name: gram + APS_mode: True + postprocessor_args: + powers: [1,2,3,4,5] + postprocessor_sweep: + powers_list: [[1,2,3,4,5]] diff --git a/OpenOOD/configs/postprocessors/kdad.yml b/OpenOOD/configs/postprocessors/kdad.yml new file mode 100644 index 0000000000000000000000000000000000000000..03410074e0a088178ef1edfc1ffb8d71bef46eb1 --- /dev/null +++ b/OpenOOD/configs/postprocessors/kdad.yml @@ -0,0 +1,2 @@ +postprocessor: + name: msp diff --git a/OpenOOD/configs/postprocessors/klm.yml b/OpenOOD/configs/postprocessors/klm.yml new file mode 100644 index 0000000000000000000000000000000000000000..961cec750a1de9ef3f15068a073b551824ea35e8 --- /dev/null +++ b/OpenOOD/configs/postprocessors/klm.yml @@ -0,0 +1,3 @@ +postprocessor: + name: klm + APS_mode: False diff --git a/OpenOOD/configs/postprocessors/knn.yml b/OpenOOD/configs/postprocessors/knn.yml new file mode 100644 index 0000000000000000000000000000000000000000..a9684c101f526fa42ea6e96b24f6f32c9f0b4371 --- /dev/null +++ b/OpenOOD/configs/postprocessors/knn.yml @@ -0,0 +1,8 @@ +postprocessor: + name: knn + APS_mode: True + postprocessor_args: + K: 50 + postprocessor_sweep: + K_list: [2, 4, 6, 8, 10, 11] + # K_list: [50, 100, 200, 500, 1000] diff --git a/OpenOOD/configs/postprocessors/mcd.yml b/OpenOOD/configs/postprocessors/mcd.yml new file mode 100644 index 0000000000000000000000000000000000000000..077dc88a9c3d80eee3cd38dd4594bc205a87c965 --- /dev/null +++ b/OpenOOD/configs/postprocessors/mcd.yml @@ -0,0 +1,3 @@ +postprocessor: + name: mcd + APS_mode: False diff --git a/OpenOOD/configs/postprocessors/mcm.yml b/OpenOOD/configs/postprocessors/mcm.yml new file mode 100644 index 0000000000000000000000000000000000000000..f24e791315ca4e1d135c0ebf0e881e7644c5c43b --- /dev/null +++ b/OpenOOD/configs/postprocessors/mcm.yml @@ -0,0 +1,7 @@ +postprocessor: + name: mcm + APS_mode: False + postprocessor_args: + tau: 1 + postprocessor_sweep: + tau_list: [0.01, 1, 10] diff --git a/OpenOOD/configs/postprocessors/mds.yml b/OpenOOD/configs/postprocessors/mds.yml new file mode 100644 index 0000000000000000000000000000000000000000..d913206fe99035bc6e88c2b9de965f6fb76aa3f5 --- /dev/null +++ b/OpenOOD/configs/postprocessors/mds.yml @@ -0,0 +1,3 @@ +postprocessor: + name: mds + APS_mode: False diff --git a/OpenOOD/configs/postprocessors/mds_ensemble.yml b/OpenOOD/configs/postprocessors/mds_ensemble.yml new file mode 100644 index 0000000000000000000000000000000000000000..3d8f72a19f27a6233bbdb38954b086bad619ef33 --- /dev/null +++ b/OpenOOD/configs/postprocessors/mds_ensemble.yml @@ -0,0 +1,10 @@ +postprocessor: + name: mds_ensemble + APS_mode: True + postprocessor_args: + noise: 0.0014 + feature_type_list: [mean] # flat/mean/stat + alpha_list: [1] + reduce_dim_list: [none] # none/capca/pca_50/lda + postprocessor_sweep: + noise_list: [0.0014] diff --git a/OpenOOD/configs/postprocessors/mls.yml b/OpenOOD/configs/postprocessors/mls.yml new file mode 100644 index 0000000000000000000000000000000000000000..1182df42b69d2d42242bcf9c320056c8b72b41cf --- /dev/null +++ b/OpenOOD/configs/postprocessors/mls.yml @@ -0,0 +1,4 @@ +postprocessor: + name: mls + APS_mode: False + postprocessor_args: diff --git a/OpenOOD/configs/postprocessors/mos.yml b/OpenOOD/configs/postprocessors/mos.yml new file mode 100644 index 0000000000000000000000000000000000000000..6dcb8230d7342d154142c159ffb361fe2d1bd12c --- /dev/null +++ b/OpenOOD/configs/postprocessors/mos.yml @@ -0,0 +1,4 @@ +postprocessor: + name: mos + postprocessor_args: + coreset_sampling_ratio: 0.01 diff --git a/OpenOOD/configs/postprocessors/msp.yml b/OpenOOD/configs/postprocessors/msp.yml new file mode 100644 index 0000000000000000000000000000000000000000..0753f79d8cc327db52798e8698257f0df63afeb2 --- /dev/null +++ b/OpenOOD/configs/postprocessors/msp.yml @@ -0,0 +1,3 @@ +postprocessor: + name: msp + APS_mode: False diff --git a/OpenOOD/configs/postprocessors/nnguide.yml b/OpenOOD/configs/postprocessors/nnguide.yml new file mode 100644 index 0000000000000000000000000000000000000000..5a6b68af9647cca927d7aa92e34e9eb687dfc3dd --- /dev/null +++ b/OpenOOD/configs/postprocessors/nnguide.yml @@ -0,0 +1,9 @@ +postprocessor: + name: nnguide + APS_mode: False + postprocessor_args: + alpha : 0.01 + K: 100 + postprocessor_sweep: + K_list: [100] + alpha_list: [0.01] diff --git a/OpenOOD/configs/postprocessors/npos.yml b/OpenOOD/configs/postprocessors/npos.yml new file mode 100644 index 0000000000000000000000000000000000000000..d6ebfc2e77b412c21bc1f624d8c1097c60b0c40d --- /dev/null +++ b/OpenOOD/configs/postprocessors/npos.yml @@ -0,0 +1,7 @@ +postprocessor: + name: npos + APS_mode: True + postprocessor_args: + K: 50 + postprocessor_sweep: + K_list: [50, 100, 200, 500, 1000] diff --git a/OpenOOD/configs/postprocessors/odin.yml b/OpenOOD/configs/postprocessors/odin.yml new file mode 100644 index 0000000000000000000000000000000000000000..3f192fbc8bb7d18712377cb8d592bac7c86823a3 --- /dev/null +++ b/OpenOOD/configs/postprocessors/odin.yml @@ -0,0 +1,9 @@ +postprocessor: + name: odin + APS_mode: True + postprocessor_args: + temperature: 1000 + noise: 0.0014 + postprocessor_sweep: + temperature: [1, 10, 100, 1000] + noise: [0.0014, 0.0028] diff --git a/OpenOOD/configs/postprocessors/opengan.yml b/OpenOOD/configs/postprocessors/opengan.yml new file mode 100644 index 0000000000000000000000000000000000000000..ed8080378d3bda036facdf43f33dab9a6eaf40b1 --- /dev/null +++ b/OpenOOD/configs/postprocessors/opengan.yml @@ -0,0 +1,3 @@ +postprocessor: + name: opengan + APS_mode: False diff --git a/OpenOOD/configs/postprocessors/openmax.yml b/OpenOOD/configs/postprocessors/openmax.yml new file mode 100644 index 0000000000000000000000000000000000000000..235a4144020b759a0e138b9327da1d7a075c5874 --- /dev/null +++ b/OpenOOD/configs/postprocessors/openmax.yml @@ -0,0 +1,12 @@ +postprocessor: + name: openmax + APS_mode: False + postprocessor_args: + coreset_sampling_ratio: 0.01 + n_neighbors: 9 + phase: test + category: test + save_src_code: True + save_anomaly_map: True + noise: + feature_type_list: diff --git a/OpenOOD/configs/postprocessors/patch.yml b/OpenOOD/configs/postprocessors/patch.yml new file mode 100644 index 0000000000000000000000000000000000000000..0664dac2ce27024aad53fd9abf83a1cac93ce3ab --- /dev/null +++ b/OpenOOD/configs/postprocessors/patch.yml @@ -0,0 +1,11 @@ +postprocessor: + name: patchcore + postprocessor_args: + coreset_sampling_ratio: 0.01 + n_neighbors: 9 + phase: test + category: hazelnut + save_src_code: True + save_anomaly_map: True + noise: + feature_type_list: diff --git a/OpenOOD/configs/postprocessors/rankfeat.yml b/OpenOOD/configs/postprocessors/rankfeat.yml new file mode 100644 index 0000000000000000000000000000000000000000..89f2f3b8623f7ee09314a7ae0960217a81d78cbe --- /dev/null +++ b/OpenOOD/configs/postprocessors/rankfeat.yml @@ -0,0 +1,6 @@ +postprocessor: + name: rankfeat + APS_mode: False + postprocessor_args: + accelerate: False + temperature: 1 diff --git a/OpenOOD/configs/postprocessors/rd4ad.yml b/OpenOOD/configs/postprocessors/rd4ad.yml new file mode 100644 index 0000000000000000000000000000000000000000..45f43ea075b3fa864ece20a72650145aab7f7062 --- /dev/null +++ b/OpenOOD/configs/postprocessors/rd4ad.yml @@ -0,0 +1,3 @@ +postprocessor: + name: rd4ad + APS_mode: False diff --git a/OpenOOD/configs/postprocessors/react.yml b/OpenOOD/configs/postprocessors/react.yml new file mode 100644 index 0000000000000000000000000000000000000000..1e63a8a4d8edc41e916d23a21090df73f3b28945 --- /dev/null +++ b/OpenOOD/configs/postprocessors/react.yml @@ -0,0 +1,7 @@ +postprocessor: + name: react + APS_mode: True + postprocessor_args: + percentile: 90 + postprocessor_sweep: + percentile_list: [85, 90, 95, 99] diff --git a/OpenOOD/configs/postprocessors/relation.yml b/OpenOOD/configs/postprocessors/relation.yml new file mode 100644 index 0000000000000000000000000000000000000000..3456c8a945125c725981014c21b057c6c5cb12fb --- /dev/null +++ b/OpenOOD/configs/postprocessors/relation.yml @@ -0,0 +1,7 @@ +postprocessor: + name: relation + APS_mode: True + postprocessor_args: + pow: 1 + postprocessor_sweep: + pow_list: [1, 2, 4, 6, 8] diff --git a/OpenOOD/configs/postprocessors/residual.yml b/OpenOOD/configs/postprocessors/residual.yml new file mode 100644 index 0000000000000000000000000000000000000000..36343601c214e27462860839092fb5aefa08c535 --- /dev/null +++ b/OpenOOD/configs/postprocessors/residual.yml @@ -0,0 +1,4 @@ +postprocessor: + name: residual + postprocessor_args: + dim: 512 diff --git a/OpenOOD/configs/postprocessors/rmds.yml b/OpenOOD/configs/postprocessors/rmds.yml new file mode 100644 index 0000000000000000000000000000000000000000..858f6089ad2f22201ea367bfa83cb76f7e2c7cd3 --- /dev/null +++ b/OpenOOD/configs/postprocessors/rmds.yml @@ -0,0 +1,3 @@ +postprocessor: + name: rmds + APS_mode: False diff --git a/OpenOOD/configs/postprocessors/rot.yml b/OpenOOD/configs/postprocessors/rot.yml new file mode 100644 index 0000000000000000000000000000000000000000..4339f4a06464d680dc1b1f7a48a1873fea134e59 --- /dev/null +++ b/OpenOOD/configs/postprocessors/rot.yml @@ -0,0 +1,3 @@ +postprocessor: + name: rot + APS_mode: False diff --git a/OpenOOD/configs/postprocessors/rotpred.yml b/OpenOOD/configs/postprocessors/rotpred.yml new file mode 100644 index 0000000000000000000000000000000000000000..4339f4a06464d680dc1b1f7a48a1873fea134e59 --- /dev/null +++ b/OpenOOD/configs/postprocessors/rotpred.yml @@ -0,0 +1,3 @@ +postprocessor: + name: rot + APS_mode: False diff --git a/OpenOOD/configs/postprocessors/rts.yml b/OpenOOD/configs/postprocessors/rts.yml new file mode 100644 index 0000000000000000000000000000000000000000..209c393b8d4690405963ddd720e4547d0d7bf8d3 --- /dev/null +++ b/OpenOOD/configs/postprocessors/rts.yml @@ -0,0 +1,5 @@ +postprocessor: + name: rts + APS_mode: False + postprocessor_args: + ood_score: 'var' # msp or var \ No newline at end of file diff --git a/OpenOOD/configs/postprocessors/scale.yml b/OpenOOD/configs/postprocessors/scale.yml new file mode 100644 index 0000000000000000000000000000000000000000..5027abff2b1f90ae0e12a1c13cecdc6d060b9ff6 --- /dev/null +++ b/OpenOOD/configs/postprocessors/scale.yml @@ -0,0 +1,7 @@ +postprocessor: + name: scale + APS_mode: True + postprocessor_args: + percentile: 85 + postprocessor_sweep: + percentile_list: [65, 70, 75, 80, 85, 90, 95] diff --git a/OpenOOD/configs/postprocessors/she.yml b/OpenOOD/configs/postprocessors/she.yml new file mode 100644 index 0000000000000000000000000000000000000000..3e639f943ce3ea6a7b613f91aef9fede0d01fff3 --- /dev/null +++ b/OpenOOD/configs/postprocessors/she.yml @@ -0,0 +1,5 @@ +postprocessor: + name: she + APS_mode: False + postprocessor_args: + metric: inner_product diff --git a/OpenOOD/configs/postprocessors/ssd.yml b/OpenOOD/configs/postprocessors/ssd.yml new file mode 100644 index 0000000000000000000000000000000000000000..e2b01e7b6c6fa574d1c8a84d7a470ef2c456bc3c --- /dev/null +++ b/OpenOOD/configs/postprocessors/ssd.yml @@ -0,0 +1,10 @@ +postprocessor: + name: mds + APS_mode: True + postprocessor_args: + noise: 0.0014 + feature_type_list: [mean] # flat/mean/stat + alpha_list: [1] + reduce_dim_list: [none] # none/capca/pca_50/lda + postprocessor_sweep: + noise_list: [0.0014] diff --git a/OpenOOD/configs/postprocessors/temp_scaling.yml b/OpenOOD/configs/postprocessors/temp_scaling.yml new file mode 100644 index 0000000000000000000000000000000000000000..86197a46ea00c2c947a3040f9f7e20e087464854 --- /dev/null +++ b/OpenOOD/configs/postprocessors/temp_scaling.yml @@ -0,0 +1,3 @@ +postprocessor: + name: temperature_scaling + APS_mode: False diff --git a/OpenOOD/configs/postprocessors/vim.yml b/OpenOOD/configs/postprocessors/vim.yml new file mode 100644 index 0000000000000000000000000000000000000000..6d7a8c51a6b4e1d62060d0554dd4a5b50c8d7734 --- /dev/null +++ b/OpenOOD/configs/postprocessors/vim.yml @@ -0,0 +1,7 @@ +postprocessor: + name: vim + APS_mode: True + postprocessor_args: + dim: 256 + postprocessor_sweep: + dim_list: [256, 1000] diff --git a/OpenOOD/configs/preprocessors/augmix_preprocessor.yml b/OpenOOD/configs/preprocessors/augmix_preprocessor.yml new file mode 100644 index 0000000000000000000000000000000000000000..53d7484a619da3afefb163b94daf969d46b9fda5 --- /dev/null +++ b/OpenOOD/configs/preprocessors/augmix_preprocessor.yml @@ -0,0 +1,7 @@ +preprocessor: + name: augmix + severity: 1 # see torchvision docs for meaning of the args + all_ops: true + mixture_width: 3 + alpha: 1.0 + chain_depth: -1 diff --git a/OpenOOD/configs/preprocessors/base_preprocessor.yml b/OpenOOD/configs/preprocessors/base_preprocessor.yml new file mode 100644 index 0000000000000000000000000000000000000000..c093dc2a6a32fc2ec047616a9b31900b220bd70a --- /dev/null +++ b/OpenOOD/configs/preprocessors/base_preprocessor.yml @@ -0,0 +1,2 @@ +preprocessor: + name: base diff --git a/OpenOOD/configs/preprocessors/csi_preprocessor.yml b/OpenOOD/configs/preprocessors/csi_preprocessor.yml new file mode 100644 index 0000000000000000000000000000000000000000..2fbcf2aca5c6e871879fcb412becd845825075bf --- /dev/null +++ b/OpenOOD/configs/preprocessors/csi_preprocessor.yml @@ -0,0 +1,2 @@ +preprocessor: + name: csi diff --git a/OpenOOD/configs/preprocessors/cutout_preprocessor.yml b/OpenOOD/configs/preprocessors/cutout_preprocessor.yml new file mode 100644 index 0000000000000000000000000000000000000000..30eba6656d2afb34176529bc8915e1d2208232c9 --- /dev/null +++ b/OpenOOD/configs/preprocessors/cutout_preprocessor.yml @@ -0,0 +1,4 @@ +preprocessor: + name: cutout + n_holes: 1 + length: 16 diff --git a/OpenOOD/configs/preprocessors/cutpaste_preprocessor.yml b/OpenOOD/configs/preprocessors/cutpaste_preprocessor.yml new file mode 100644 index 0000000000000000000000000000000000000000..da746909efb322d24d7695684858cd13dcae945b --- /dev/null +++ b/OpenOOD/configs/preprocessors/cutpaste_preprocessor.yml @@ -0,0 +1,5 @@ +preprocessor: + name: cutpaste + preprocessor_args: + area_ratio: [0.02, 0.15] + aspect_ratio: 0.3 diff --git a/OpenOOD/configs/preprocessors/draem_preprocessor.yml b/OpenOOD/configs/preprocessors/draem_preprocessor.yml new file mode 100644 index 0000000000000000000000000000000000000000..c89499f1d9701320a20e951fee8fbf433cbd108b --- /dev/null +++ b/OpenOOD/configs/preprocessors/draem_preprocessor.yml @@ -0,0 +1,5 @@ +preprocessor: + name: draem + preprocessor_args: + image_size: 256 + anomaly_source: ./data/images_classic/texture diff --git a/OpenOOD/configs/preprocessors/patchcore_preprocessor.yml b/OpenOOD/configs/preprocessors/patchcore_preprocessor.yml new file mode 100644 index 0000000000000000000000000000000000000000..acb138fd43800c26d3e9370a570a01386cac3526 --- /dev/null +++ b/OpenOOD/configs/preprocessors/patchcore_preprocessor.yml @@ -0,0 +1,2 @@ +preprocessor: + name: patchcore diff --git a/OpenOOD/configs/preprocessors/pixmix_preprocessor.yml b/OpenOOD/configs/preprocessors/pixmix_preprocessor.yml new file mode 100644 index 0000000000000000000000000000000000000000..4925fbe532561cad5206f2b93f46b2f32aaeeebe --- /dev/null +++ b/OpenOOD/configs/preprocessors/pixmix_preprocessor.yml @@ -0,0 +1,8 @@ +preprocessor: + name: pixmix + preprocessor_args: + mixing_set_dir: data/benchmark_imglist/cifar10/fractals_fvis.txt + aug_severity: 3 # severity of base augmentation operators + all_ops: true # turn on all augmentation operations (+brightness,contrast,color,sharpness + k: 4 # augment the image a random number of times with a maximum of k times (mixing iterations) + beta: 3 # severity of mixing diff --git a/OpenOOD/configs/preprocessors/randaugment_preprocessor.yml b/OpenOOD/configs/preprocessors/randaugment_preprocessor.yml new file mode 100644 index 0000000000000000000000000000000000000000..45c1c707d874d139fdd08851f2011fc9a37a8438 --- /dev/null +++ b/OpenOOD/configs/preprocessors/randaugment_preprocessor.yml @@ -0,0 +1,4 @@ +preprocessor: + name: randaugment + n: 1 + m: 14 diff --git a/OpenOOD/easy_dev.ipynb b/OpenOOD/easy_dev.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..ce6637a5e73f26405274d79d207982b0a8afcc8b --- /dev/null +++ b/OpenOOD/easy_dev.ipynb @@ -0,0 +1,669 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "f09246e8", + "metadata": {}, + "source": [ + "# Easy Dev for Post-hoc OOD Detectors\n", + "\n", + "This notebook integrates some simple post-hoc OOD detection methods.\n", + "\n", + "We choose ImageNet-1K as in-distribution (ID) and load a pretrained vision transformer (ViT)." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "4dc297c1", + "metadata": {}, + "outputs": [], + "source": [ + "%load_ext autoreload\n", + "%autoreload 2" + ] + }, + { + "cell_type": "markdown", + "id": "8b218878", + "metadata": {}, + "source": [ + "## Load Models and Dataset" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "id": "f155277c", + "metadata": {}, + "outputs": [], + "source": [ + "from openood.utils import config\n", + "from openood.datasets import get_dataloader, get_ood_dataloader\n", + "from openood.evaluators import get_evaluator\n", + "from openood.networks import get_network" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "id": "48968bc2", + "metadata": {}, + "outputs": [], + "source": [ + "# load config files for cifar10 baseline\n", + "config_files = [\n", + " './configs/datasets/cifar10/cifar10.yml',\n", + " './configs/datasets/cifar10/cifar10_ood.yml',\n", + " './configs/networks/resnet18_32x32.yml',\n", + " './configs/pipelines/test/test_ood.yml',\n", + " './configs/preprocessors/base_preprocessor.yml',\n", + " './configs/postprocessors/msp.yml',\n", + "]\n", + "config = config.Config(*config_files)\n", + "# modify config \n", + "config.network.checkpoint = './results/cifar10_resnet18_32x32_base_e100_lr0.1/best.ckpt'\n", + "config.network.pretrained = True\n", + "config.num_workers = 8\n", + "config.save_output = False\n", + "config.parse_refs()" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "id": "a99ab6de", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "dataset:\n", + " image_size: 32\n", + " name: cifar10\n", + " num_classes: 10\n", + " num_gpus: 1\n", + " num_machines: 1\n", + " num_workers: 8\n", + " pre_size: 32\n", + " split_names: ['train', 'val', 'test']\n", + " test:\n", + " batch_size: 200\n", + " data_dir: ./data/images_classic/\n", + " dataset_class: ImglistDataset\n", + " imglist_pth: ./data/benchmark_imglist/cifar10/test_cifar10.txt\n", + " interpolation: bilinear\n", + " shuffle: False\n", + " train:\n", + " batch_size: 128\n", + " data_dir: ./data/images_classic/\n", + " dataset_class: ImglistDataset\n", + " imglist_pth: ./data/benchmark_imglist/cifar10/train_cifar10.txt\n", + " interpolation: bilinear\n", + " shuffle: True\n", + " val:\n", + " batch_size: 200\n", + " data_dir: ./data/images_classic/\n", + " dataset_class: ImglistDataset\n", + " imglist_pth: ./data/benchmark_imglist/cifar10/val_cifar10.txt\n", + " interpolation: bilinear\n", + " shuffle: False\n", + "evaluator:\n", + " name: ood\n", + "exp_name: cifar10_resnet18_32x32_test_ood_ood_msp_default\n", + "merge_option: default\n", + "machine_rank: 0\n", + "mark: default\n", + "network:\n", + " checkpoint: ./results/cifar10_resnet18_32x32_base_e100_lr0.1/best.ckpt\n", + " name: resnet18_32x32\n", + " num_classes: 10\n", + " num_gpus: 1\n", + " pretrained: True\n", + "num_gpus: 1\n", + "num_machines: 1\n", + "num_workers: 8\n", + "ood_dataset:\n", + " batch_size: 128\n", + " dataset_class: ImglistDataset\n", + " farood:\n", + " datasets: ['mnist', 'svhn', 'texture', 'place365']\n", + " mnist:\n", + " data_dir: ./data/images_classic/\n", + " imglist_pth: ./data/benchmark_imglist/cifar10/test_mnist.txt\n", + " place365:\n", + " data_dir: ./data/images_classic/\n", + " imglist_pth: ./data/benchmark_imglist/cifar10/test_places365.txt\n", + " svhn:\n", + " data_dir: ./data/images_classic/\n", + " imglist_pth: ./data/benchmark_imglist/cifar10/test_svhn.txt\n", + " texture:\n", + " data_dir: ./data/images_classic/\n", + " imglist_pth: ./data/benchmark_imglist/cifar10/test_texture.txt\n", + " image_size: 32\n", + " interpolation: bilinear\n", + " name: cifar10_ood\n", + " nearood:\n", + " cifar100:\n", + " data_dir: ./data/images_classic/\n", + " imglist_pth: ./data/benchmark_imglist/cifar10/test_cifar100.txt\n", + " datasets: ['cifar100', 'tin']\n", + " tin:\n", + " data_dir: ./data/images_classic/\n", + " imglist_pth: ./data/benchmark_imglist/osr_tin20/test_tin.txt\n", + " num_classes: 10\n", + " num_gpus: 1\n", + " num_machines: 1\n", + " num_workers: 8\n", + " shuffle: False\n", + " split_names: ['val', 'nearood', 'farood']\n", + " val:\n", + " data_dir: ./data/images_classic/\n", + " imglist_pth: ./data/benchmark_imglist/cifar10/val_cifar100.txt\n", + "output_dir: ./results/\n", + "pipeline:\n", + " name: test_ood\n", + "postprocessor:\n", + " name: msp\n", + "preprocessor:\n", + " name: base\n", + "recorder:\n", + " save_csv: True\n", + " save_scores: True\n", + "save_output: False" + ] + }, + "execution_count": 15, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "config" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "id": "35d30b38", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Model Loading resnet18_32x32 Completed!\n" + ] + } + ], + "source": [ + "# get dataloader\n", + "id_loader_dict = get_dataloader(config)\n", + "ood_loader_dict = get_ood_dataloader(config)\n", + "# init network\n", + "net = get_network(config.network).cuda()\n", + "# init ood evaluator\n", + "evaluator = get_evaluator(config)" + ] + }, + { + "cell_type": "markdown", + "id": "2043b57f", + "metadata": {}, + "source": [ + "## Feature Extraction" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "f430f169", + "metadata": {}, + "outputs": [], + "source": [ + "from tqdm import tqdm\n", + "import numpy as np\n", + "import torch\n", + "import os.path as osp" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "512fe29c", + "metadata": {}, + "outputs": [], + "source": [ + "def save_arr_to_dir(arr, dir):\n", + " with open(dir, 'wb') as f:\n", + " np.save(f, arr)" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "418778f7", + "metadata": {}, + "outputs": [], + "source": [ + "save_root = f'./results/{config.exp_name}'" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "id": "e2f12809", + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Extracting reults...: 100%|██████████| 45/45 [00:03<00:00, 13.48it/s]\n", + "Extracting reults...: 100%|██████████| 5/5 [00:00<00:00, 6.84it/s]\n" + ] + } + ], + "source": [ + "# save id (test & val) results\n", + "net.eval()\n", + "modes = ['test', 'val']\n", + "for mode in modes:\n", + " dl = id_loader_dict[mode]\n", + " dataiter = iter(dl)\n", + " \n", + " logits_list = []\n", + " feature_list = []\n", + " label_list = []\n", + " \n", + " for i in tqdm(range(1,\n", + " len(dataiter) + 1),\n", + " desc='Extracting reults...',\n", + " position=0,\n", + " leave=True):\n", + " batch = next(dataiter)\n", + " data = batch['data'].cuda()\n", + " label = batch['label']\n", + " with torch.no_grad():\n", + " logits_cls, feature = net(data, return_feature=True)\n", + " logits_list.append(logits_cls.data.to('cpu').numpy())\n", + " feature_list.append(feature.data.to('cpu').numpy())\n", + " label_list.append(label.numpy())\n", + "\n", + " logits_arr = np.concatenate(logits_list)\n", + " feature_arr = np.concatenate(feature_list)\n", + " label_arr = np.concatenate(label_list)\n", + " \n", + " save_arr_to_dir(logits_arr, osp.join(save_root, 'id', f'{mode}_logits.npy'))\n", + " save_arr_to_dir(feature_arr, osp.join(save_root, 'id', f'{mode}_feature.npy'))\n", + " save_arr_to_dir(label_arr, osp.join(save_root, 'id', f'{mode}_labels.npy'))" + ] + }, + { + "cell_type": "code", + "execution_count": 41, + "id": "5cded214", + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Extracting reults...: 100%|██████████| 71/71 [00:01<00:00, 35.57it/s]\n", + "Extracting reults...: 100%|██████████| 141/141 [00:04<00:00, 32.65it/s]\n", + "Extracting reults...: 100%|██████████| 547/547 [00:15<00:00, 34.35it/s]\n", + "Extracting reults...: 100%|██████████| 204/204 [00:05<00:00, 34.51it/s]\n", + "Extracting reults...: 100%|██████████| 45/45 [00:06<00:00, 7.47it/s]\n", + "Extracting reults...: 100%|██████████| 286/286 [00:19<00:00, 14.37it/s]\n" + ] + } + ], + "source": [ + "# save ood results\n", + "net.eval()\n", + "ood_splits = ['nearood', 'farood']\n", + "for ood_split in ood_splits:\n", + " for dataset_name, ood_dl in ood_loader_dict[ood_split].items():\n", + " dataiter = iter(ood_dl)\n", + " \n", + " logits_list = []\n", + " feature_list = []\n", + " label_list = []\n", + "\n", + " for i in tqdm(range(1,\n", + " len(dataiter) + 1),\n", + " desc='Extracting reults...',\n", + " position=0,\n", + " leave=True):\n", + " batch = next(dataiter)\n", + " data = batch['data'].cuda()\n", + " label = batch['label']\n", + "\n", + " with torch.no_grad():\n", + " logits_cls, feature = net(data, return_feature=True)\n", + " logits_list.append(logits_cls.data.to('cpu').numpy())\n", + " feature_list.append(feature.data.to('cpu').numpy())\n", + " label_list.append(label.numpy())\n", + "\n", + " logits_arr = np.concatenate(logits_list)\n", + " feature_arr = np.concatenate(feature_list)\n", + " label_arr = np.concatenate(label_list)\n", + "\n", + " save_arr_to_dir(logits_arr, osp.join(save_root, ood_split, f'{dataset_name}_logits.npy'))\n", + " save_arr_to_dir(feature_arr, osp.join(save_root, ood_split, f'{dataset_name}_feature.npy'))\n", + " save_arr_to_dir(label_arr, osp.join(save_root, ood_split, f'{dataset_name}_labels.npy'))" + ] + }, + { + "cell_type": "markdown", + "id": "0b69a9ed", + "metadata": {}, + "source": [ + "## MSP Evaluation" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "db3f705b", + "metadata": {}, + "outputs": [], + "source": [ + "# build msp method (pass in pre-saved logits)\n", + "def msp_postprocess(logits):\n", + " score = torch.softmax(logits, dim=1)\n", + " conf, pred = torch.max(score, dim=1)\n", + " return pred, conf" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "a1cac34c", + "metadata": {}, + "outputs": [], + "source": [ + "# load logits, feature, label for this benchmark\n", + "results = dict()\n", + "# for id\n", + "modes = ['val', 'test']\n", + "results['id'] = dict()\n", + "for mode in modes:\n", + " results['id'][mode] = dict()\n", + " results['id'][mode]['feature'] = np.load(osp.join(save_root, 'id', f'{mode}_feature.npy'))\n", + " results['id'][mode]['logits'] = np.load(osp.join(save_root, 'id', f'{mode}_logits.npy'))\n", + " results['id'][mode]['labels'] = np.load(osp.join(save_root, 'id', f'{mode}_labels.npy'))\n", + "\n", + "# for ood\n", + "split_types = ['nearood', 'farood']\n", + "for split_type in split_types:\n", + " results[split_type] = dict()\n", + " dataset_names = config['ood_dataset'][split_type].datasets\n", + " for dataset_name in dataset_names:\n", + " results[split_type][dataset_name] = dict()\n", + " results[split_type][dataset_name]['feature'] = np.load(osp.join(save_root, split_type, f'{dataset_name}_feature.npy'))\n", + " results[split_type][dataset_name]['logits'] = np.load(osp.join(save_root, split_type, f'{dataset_name}_logits.npy'))\n", + " results[split_type][dataset_name]['labels'] = np.load(osp.join(save_root, split_type, f'{dataset_name}_labels.npy'))\n" + ] + }, + { + "cell_type": "code", + "execution_count": 60, + "id": "79697d8c", + "metadata": {}, + "outputs": [], + "source": [ + "def print_nested_dict(dict_obj, indent = 0):\n", + " ''' Pretty Print nested dictionary with given indent level \n", + " '''\n", + " # Iterate over all key-value pairs of dictionary\n", + " for key, value in dict_obj.items():\n", + " # If value is dict type, then print nested dict \n", + " if isinstance(value, dict):\n", + " print(' ' * indent, key, ':', '{')\n", + " print_nested_dict(value, indent + 2)\n", + " print(' ' * indent, '}')\n", + " else:\n", + " print(' ' * indent, key, ':', value.shape)\n" + ] + }, + { + "cell_type": "code", + "execution_count": 61, + "id": "2dfe3614", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + " id : {\n", + " val : {\n", + " feature : (1000, 512)\n", + " logits : (1000, 10)\n", + " labels : (1000,)\n", + " }\n", + " test : {\n", + " feature : (9000, 512)\n", + " logits : (9000, 10)\n", + " labels : (9000,)\n", + " }\n", + " }\n", + " nearood : {\n", + " cifar100 : {\n", + " feature : (9000, 512)\n", + " logits : (9000, 10)\n", + " labels : (9000,)\n", + " }\n", + " tin : {\n", + " feature : (18000, 512)\n", + " logits : (18000, 10)\n", + " labels : (18000,)\n", + " }\n", + " }\n", + " farood : {\n", + " mnist : {\n", + " feature : (70000, 512)\n", + " logits : (70000, 10)\n", + " labels : (70000,)\n", + " }\n", + " svhn : {\n", + " feature : (26032, 512)\n", + " logits : (26032, 10)\n", + " labels : (26032,)\n", + " }\n", + " texture : {\n", + " feature : (5640, 512)\n", + " logits : (5640, 10)\n", + " labels : (5640,)\n", + " }\n", + " place365 : {\n", + " feature : (36500, 512)\n", + " logits : (36500, 10)\n", + " labels : (36500,)\n", + " }\n", + " }\n" + ] + } + ], + "source": [ + "print_nested_dict(results)" + ] + }, + { + "cell_type": "code", + "execution_count": 23, + "id": "a3af6815", + "metadata": {}, + "outputs": [], + "source": [ + "# get pred, conf, gt from MSP postprocessor (can change to your custom_postprocessor here)\n", + "postprocess_results = dict()\n", + "# id\n", + "modes = ['val', 'test']\n", + "postprocess_results['id'] = dict()\n", + "for mode in modes:\n", + " pred, conf = msp_postprocess(torch.from_numpy(results['id'][mode]['logits']))\n", + " pred, conf = pred.numpy(), conf.numpy()\n", + " gt = results['id'][mode]['labels']\n", + " postprocess_results['id'][mode] = [pred, conf, gt]\n", + "\n", + "# ood\n", + "split_types = ['nearood', 'farood']\n", + "for split_type in split_types:\n", + " postprocess_results[split_type] = dict()\n", + " dataset_names = config['ood_dataset'][split_type].datasets\n", + " for dataset_name in dataset_names:\n", + " pred, conf = msp_postprocess(torch.from_numpy(results[split_type][dataset_name]['logits']))\n", + " pred, conf = pred.numpy(), conf.numpy()\n", + " gt = results[split_type][dataset_name]['labels']\n", + " gt = -1 * np.ones_like(gt) # hard set to -1 here\n", + " postprocess_results[split_type][dataset_name] = [pred, conf, gt]" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "id": "41e3dac6", + "metadata": {}, + "outputs": [], + "source": [ + "def print_all_metrics(metrics):\n", + " [fpr, auroc, aupr_in, aupr_out,\n", + " ccr_4, ccr_3, ccr_2, ccr_1, accuracy] \\\n", + " = metrics\n", + " print('FPR@95: {:.2f}, AUROC: {:.2f}'.format(100 * fpr, 100 * auroc),\n", + " end=' ',\n", + " flush=True)\n", + " print('AUPR_IN: {:.2f}, AUPR_OUT: {:.2f}'.format(\n", + " 100 * aupr_in, 100 * aupr_out),\n", + " flush=True)\n", + " print('CCR: {:.2f}, {:.2f}, {:.2f}, {:.2f},'.format(\n", + " ccr_4 * 100, ccr_3 * 100, ccr_2 * 100, ccr_1 * 100),\n", + " end=' ',\n", + " flush=True)\n", + " print('ACC: {:.2f}'.format(accuracy * 100), flush=True)\n", + " print(u'\\u2500' * 70, flush=True) " + ] + }, + { + "cell_type": "code", + "execution_count": 28, + "id": "b16e6f5b", + "metadata": {}, + "outputs": [], + "source": [ + "from openood.evaluators.metrics import compute_all_metrics\n", + "def eval_ood(postprocess_results):\n", + " [id_pred, id_conf, id_gt] = postprocess_results['id']['test']\n", + " split_types = ['nearood', 'farood']\n", + "\n", + " for split_type in split_types:\n", + " metrics_list = []\n", + " print(f\"Performing evaluation on {split_type} datasets...\")\n", + " dataset_names = config['ood_dataset'][split_type].datasets\n", + " \n", + " for dataset_name in dataset_names:\n", + " [ood_pred, ood_conf, ood_gt] = postprocess_results[split_type][dataset_name]\n", + "\n", + " pred = np.concatenate([id_pred, ood_pred])\n", + " conf = np.concatenate([id_conf, ood_conf])\n", + " label = np.concatenate([id_gt, ood_gt])\n", + " print(f'Computing metrics on {dataset_name} dataset...')\n", + "\n", + " ood_metrics = compute_all_metrics(conf, label, pred)\n", + " print_all_metrics(ood_metrics)\n", + " metrics_list.append(ood_metrics)\n", + " print('Computing mean metrics...', flush=True)\n", + " metrics_list = np.array(metrics_list)\n", + " metrics_mean = np.mean(metrics_list, axis=0) \n", + " print_all_metrics(metrics_mean)\n", + "\n", + " " + ] + }, + { + "cell_type": "code", + "execution_count": 29, + "id": "335686fe", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Performing evaluation on nearood datasets...\n", + "Computing metrics on cifar100 dataset...\n", + "FPR@95: 62.01, AUROC: 87.11 AUPR_IN: 85.93, AUPR_OUT: 85.25\n", + "CCR: 0.36, 1.84, 11.31, 68.38, ACC: 95.34\n", + "──────────────────────────────────────────────────────────────────────\n", + "Computing metrics on tin dataset...\n", + "FPR@95: 60.31, AUROC: 86.61 AUPR_IN: 73.79, AUPR_OUT: 91.74\n", + "CCR: 0.14, 0.88, 7.78, 67.00, ACC: 95.34\n", + "──────────────────────────────────────────────────────────────────────\n", + "Computing mean metrics...\n", + "FPR@95: 61.16, AUROC: 86.86 AUPR_IN: 79.86, AUPR_OUT: 88.50\n", + "CCR: 0.25, 1.36, 9.54, 67.69, ACC: 95.34\n", + "──────────────────────────────────────────────────────────────────────\n", + "Performing evaluation on farood datasets...\n", + "Computing metrics on mnist dataset...\n", + "FPR@95: 58.56, AUROC: 89.92 AUPR_IN: 66.95, AUPR_OUT: 98.10\n", + "CCR: 5.49, 11.13, 31.94, 77.40, ACC: 95.34\n", + "──────────────────────────────────────────────────────────────────────\n", + "Computing metrics on svhn dataset...\n", + "FPR@95: 52.26, AUROC: 90.76 AUPR_IN: 77.86, AUPR_OUT: 95.65\n", + "CCR: 0.08, 0.73, 17.60, 80.54, ACC: 95.34\n", + "──────────────────────────────────────────────────────────────────────\n", + "Computing metrics on texture dataset...\n", + "FPR@95: 59.75, AUROC: 88.73 AUPR_IN: 91.28, AUPR_OUT: 80.60\n", + "CCR: 0.09, 0.34, 9.69, 76.00, ACC: 95.34\n", + "──────────────────────────────────────────────────────────────────────\n", + "Computing metrics on place365 dataset...\n", + "FPR@95: 58.70, AUROC: 88.03 AUPR_IN: 65.24, AUPR_OUT: 96.04\n", + "CCR: 0.39, 2.11, 13.42, 71.66, ACC: 95.34\n", + "──────────────────────────────────────────────────────────────────────\n", + "Computing mean metrics...\n", + "FPR@95: 57.32, AUROC: 89.36 AUPR_IN: 75.33, AUPR_OUT: 92.60\n", + "CCR: 1.51, 3.58, 18.16, 76.40, ACC: 95.34\n", + "──────────────────────────────────────────────────────────────────────\n" + ] + } + ], + "source": [ + "eval_ood(postprocess_results)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f2ce6263", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "interpreter": { + "hash": "9bfdd80c4cd5b3ca30f79c8858286326028dc154b9efddfc3ea147df9fc4c063" + }, + "kernelspec": { + "display_name": "Python 3.8.12 64-bit ('ood': conda)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.12" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/OpenOOD/error.txt b/OpenOOD/error.txt new file mode 100644 index 0000000000000000000000000000000000000000..961bd23a21eab7db18243c1c0a0d1d2f0d591c73 --- /dev/null +++ b/OpenOOD/error.txt @@ -0,0 +1,9 @@ + 0%| | 0/311 [00:001: + print(1) + + + sample = dict() + sample['data'] = image + sample['label'] = target["labels_level2"][0] + return sample + + # return image, target["labels_level1"][0], target["labels_level0"][0], attributes, target["labels_level2"][0], target["shape_label"][0] + + + def get_height_and_width(self, idx): + # read xml + xml_path = self.ware_xml[idx] + with open(xml_path,encoding='utf-8') as fid: + xml_str = fid.read() + xml = etree.fromstring(xml_str.encode('utf-8')) + data = self.parse_xml_to_dict(xml)["annotation"] + data_height = int(data["size"]["height"]) + data_width = int(data["size"]["width"]) + return data_height, data_width + + def load_xlsx_table(self, file_path): + age_table = pd.read_excel(file_path, engine='openpyxl')#璇诲叆age.xlsx + ware_id = np.asarray(age_table.iloc[:, 1],dtype=np.str_) + ware_name = np.asarray(age_table.iloc[:, 2]) + ware_age = np.asarray(age_table.iloc[:, 3]) + ware_book = np.asarray(age_table.iloc[:, 4]) + ware_shape = np.asarray(age_table.iloc[:, 5]) + now_location = np.asarray(age_table.iloc[:, 6]) + out_location = np.asarray(age_table.iloc[:, 7]) + category = np.asarray(age_table.iloc[:, 8]) + full_shape_name = np.asarray(age_table.iloc[:, 9],dtype=np.str_) + + return ware_id, ware_name, ware_age, ware_book, ware_shape, now_location, out_location, category, full_shape_name + + + + + def parse_xml_to_dict(self, xml): + """ + Args: + xml: xml tree obtained by parsing XML file contents using lxml.etree + Returns: + Python dictionary holding XML contents. + """ + + if len(xml) == 0: + return {xml.tag: xml.text} + + result = {} + for child in xml: + child_result = self.parse_xml_to_dict(child) + if child.tag != 'object': + result[child.tag] = child_result[child.tag] + else: + if child.tag not in result: + result[child.tag] = [] + result[child.tag].append(child_result[child.tag]) + return {xml.tag: result} + + + def collate_fn(self, batch): + '''Pad images and encode targets. + + As for images are of different sizes, we need to pad them to the same size. + + Args: + batch: (list) of images, cls_targets, loc_targets. + + Returns: + padded images, stacked cls_targets, stacked loc_targets. + ''' + imgs = [x[0] for x in batch] + level1_label = [x[1] for x in batch] + cat_label = [x[2] for x in batch] + att_label = [x[3] for x in batch] + level2_label = [x[4] for x in batch] + shape_label = [x[5] for x in batch] + h = w = self.input_size + num_imgs = len(imgs) + inputs = torch.zeros(num_imgs, 3, h, w) + + for i in range(num_imgs): + inputs[i] = imgs[i] + return inputs, torch.stack(level1_label), torch.stack(cat_label), torch.stack(att_label), torch.stack(level2_label), torch.stack(shape_label) + + + +class DingGuiXv_Dataset(data.Dataset): + def __init__(self, img_dir, input_transform=None, train=None, size=None): + self.root_dir = img_dir + # self.annotations_root=xml_dir + self.input_transform = input_transform + + # ware_img_name_for_3, _, ware_age_for3, _, ware_shape_for3, _, _, ding_gui_cat, _ = self.load_xlsx_table(excel_dir) + + # age_list = [] + # shape_list = [] + # ware_cat_list = [] + # for i in range(len(ding_gui_cat)): + # ware_cat = ding_gui_cat[i] + # if ware_category[ware_cat] == 0: + # era_dict = ding_age_idx + # elif ware_category[ware_cat] == 1: + # era_dict = gui_age_idx + # ware_age = era_dict[ware_age_for3[i]] + # ware_shape = shape_idx_together[ware_shape_for3[i]] + # age_list.append(ware_age) + # shape_list.append(ware_shape) + # ware_cat_list.append(ware_category[ware_cat]) + + + # self.ware_img_name = ware_img_name_for_3 + # self.ware_age = age_list + # self.ware_shape= shape_list + # # self.ding_gui_cat = ding_gui_cat + # self.ding_gui_cat = ware_cat_list + self.ware_img = [] + # self.ware_xml=[] + self.input_size = size + # self.train = train + + # self.front_img = [] + # self.back_img = [] + + self.ware_img_name = os.listdir(img_dir) + + for png_name in self.ware_img_name: + # png_name = img_name + '.png' + # xml_name = img_name + '.xml' + png_name = os.path.join(self.root_dir, png_name) + # xml_name = os.path.join(self.annotations_root, xml_name) + self.ware_img.append(png_name) + # self.ware_xml.append(xml_name) + + + + def __len__(self): + return len(self.ware_img_name) + + def __getitem__(self, idx): + if torch.is_tensor(idx): + idx = idx.tolist() + + # xml_path=self.ware_xml[idx] + # with open(xml_path,encoding='utf-8') as fid: + # xml_str = fid.read() + # xml = etree.fromstring(xml_str.encode('utf-8')) + # xml_data = self.parse_xml_to_dict(xml)["annotation"] + + # labels_level0 = [] + # labels_level1 = [] + # labels_level2 = [] + # # iscrowd = [] + # attributes = [0]*149 + # shape_label = [] + + # level_0 = self.ding_gui_cat[idx] + # level_1 = dating_tree_dict[str(self.ding_gui_cat[idx])+str(self.ware_age[idx])][1] + # level_2 = dating_tree_dict[str(self.ding_gui_cat[idx])+str(self.ware_age[idx])][2] + # if level_0==1 and level_2 in [8,9,10]: + # level_2 = 999 + + # labels_level0.append(level_0) + # labels_level1.append(level_1) + # labels_level2.append(level_2) + # shape_label.append(float(self.ware_shape[idx])) + # if 'object' in xml_data: + # for obj in xml_data["object"]: + # att_name = re.sub(r'[a-z0-9\t]|[^\w\s]', '', obj["name"]) + # if att_name in attribute_idx_together: + # attribute_id = attribute_idx_together[att_name] + # attributes[attribute_id] = 1 + image = Image.open(self.ware_img[idx]).convert('RGB') + + # labels_level0 = torch.from_numpy(np.asarray(labels_level0).astype('int64')) + # labels_level1 = torch.from_numpy(np.asarray(labels_level1).astype('int64')) + # labels_level2 = torch.from_numpy(np.asarray(labels_level2).astype('int64')) + # shape_label = torch.from_numpy(np.asarray(shape_label).astype('int64')) + # attributes = torch.from_numpy(np.asarray(attributes).astype('int64')) + # # iscrowd = torch.as_tensor(iscrowd, dtype=torch.int64) + # image_id = torch.tensor([idx]) + + # target = {} + # target["image_id"] = image_id + # target["labels_level0"] = labels_level0 + # target["labels_level1"] = labels_level1 + # target["labels_level2"] = labels_level2 + # target["attributes"] = attributes + # target["shape_label"] = shape_label + + if self.input_transform is not None: + image = self.input_transform(image) + # if int(labels_level0)>1: + # print(1) + + + + return image + # return image, target["labels_level1"][0], target["labels_level0"][0], attributes, target["labels_level2"][0], target["shape_label"][0] + + + def get_height_and_width(self, idx): + # read xml + xml_path = self.ware_xml[idx] + with open(xml_path,encoding='utf-8') as fid: + xml_str = fid.read() + xml = etree.fromstring(xml_str.encode('utf-8')) + data = self.parse_xml_to_dict(xml)["annotation"] + data_height = int(data["size"]["height"]) + data_width = int(data["size"]["width"]) + return data_height, data_width + + def load_xlsx_table(self, file_path): + age_table = pd.read_excel(file_path, engine='openpyxl')#璇诲叆age.xlsx + ware_id = np.asarray(age_table.iloc[:, 1],dtype=np.str) + ware_name = np.asarray(age_table.iloc[:, 2]) + ware_age = np.asarray(age_table.iloc[:, 3]) + ware_book = np.asarray(age_table.iloc[:, 4]) + ware_shape = np.asarray(age_table.iloc[:, 5]) + now_location = np.asarray(age_table.iloc[:, 6]) + out_location = np.asarray(age_table.iloc[:, 7]) + category = np.asarray(age_table.iloc[:, 8]) + full_shape_name = np.asarray(age_table.iloc[:, 9],dtype=np.str) + + return ware_id, ware_name, ware_age, ware_book, ware_shape, now_location, out_location, category, full_shape_name + + + + + def parse_xml_to_dict(self, xml): + """ + Args: + xml: xml tree obtained by parsing XML file contents using lxml.etree + Returns: + Python dictionary holding XML contents. + """ + + if len(xml) == 0: + return {xml.tag: xml.text} + + result = {} + for child in xml: + child_result = self.parse_xml_to_dict(child) + if child.tag != 'object': + result[child.tag] = child_result[child.tag] + else: + if child.tag not in result: + result[child.tag] = [] + result[child.tag].append(child_result[child.tag]) + return {xml.tag: result} + + + def collate_fn(self, batch): + '''Pad images and encode targets. + + As for images are of different sizes, we need to pad them to the same size. + + Args: + batch: (list) of images, cls_targets, loc_targets. + + Returns: + padded images, stacked cls_targets, stacked loc_targets. + ''' + imgs = [x[0] for x in batch] + level1_label = [x[1] for x in batch] + cat_label = [x[2] for x in batch] + att_label = [x[3] for x in batch] + level2_label = [x[4] for x in batch] + shape_label = [x[5] for x in batch] + h = w = self.input_size + num_imgs = len(imgs) + inputs = torch.zeros(num_imgs, 3, h, w) + + for i in range(num_imgs): + inputs[i] = imgs[i] + return inputs, torch.stack(level1_label), torch.stack(cat_label), torch.stack(att_label), torch.stack(level2_label), torch.stack(shape_label) + + + + + + + +if __name__ == '__main__': + DATASET_ROOT = "/home/zrx/lab_disk1/zhourixin/zhouriixn/DingAndGui/Ding_and_Gui_Dataset" + data_path = DATASET_ROOT+"/image" + xml_path = DATASET_ROOT+"/xml" + train_excel_path = DATASET_ROOT+"/excel_origin_information/ding_and_gui_excel.xlsx" + + img_size = 450 + input_size = 400 + BATCH_SIZE = 10 + + transform = transforms.Compose([ + transforms.Resize((img_size, img_size)), + transforms.CenterCrop(input_size), + transforms.ToTensor(), + transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)), + ]) + + trainset = BronzeWare_Dataset(data_path, xml_path, train_excel_path, transform, train=True, size=input_size) + + trainloader = torch.utils.data.DataLoader(trainset, batch_size=BATCH_SIZE, shuffle=True, num_workers=16, drop_last = True, collate_fn=trainset.collate_fn) + + for batch_idx, (inputs, _,_,attribute_label,targets,shape_label) in enumerate(trainloader): + print(1) diff --git a/OpenOOD/openood/datasets/feature_dataset.py b/OpenOOD/openood/datasets/feature_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..aff62b3f0eaa5c2936a48745cb14dd9a53349fb7 --- /dev/null +++ b/OpenOOD/openood/datasets/feature_dataset.py @@ -0,0 +1,17 @@ +from torch.utils.data import Dataset + + +class FeatDataset(Dataset): + def __init__(self, feat, labels): + self.data = feat + self.labels = labels + self.len = feat.shape[0] + assert self.len == len(labels) + + def __len__(self): + return self.len + + def __getitem__(self, idx): + data = self.data[idx] + label = self.labels[idx] + return {'data': data, 'label': label} diff --git a/OpenOOD/openood/datasets/imglist_augmix_dataset.py b/OpenOOD/openood/datasets/imglist_augmix_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..a4bccdc3311fe2bd83ab334ddcc14d3cb6ac3b50 --- /dev/null +++ b/OpenOOD/openood/datasets/imglist_augmix_dataset.py @@ -0,0 +1,109 @@ +import ast +import io +import logging +import os + +import torch +from PIL import Image, ImageFile + +from .base_dataset import BaseDataset + +# to fix "OSError: image file is truncated" +ImageFile.LOAD_TRUNCATED_IMAGES = True + + +class Convert: + def __init__(self, mode='RGB'): + self.mode = mode + + def __call__(self, image): + return image.convert(self.mode) + + +class ImglistAugMixDataset(BaseDataset): + def __init__(self, + name, + imglist_pth, + data_dir, + num_classes, + preprocessor, + data_aux_preprocessor, + maxlen=None, + dummy_read=False, + dummy_size=None, + **kwargs): + super(ImglistAugMixDataset, self).__init__(**kwargs) + + self.name = name + with open(imglist_pth) as imgfile: + self.imglist = imgfile.readlines() + self.data_dir = data_dir + self.num_classes = num_classes + self.preprocessor = preprocessor + self.transform_image = preprocessor + self.transform_aux_image = data_aux_preprocessor + self.maxlen = maxlen + self.dummy_read = dummy_read + self.dummy_size = dummy_size + if dummy_read and dummy_size is None: + raise ValueError( + 'if dummy_read is True, should provide dummy_size') + + def __len__(self): + if self.maxlen is None: + return len(self.imglist) + else: + return min(len(self.imglist), self.maxlen) + + def getitem(self, index): + line = self.imglist[index].strip('\n') + tokens = line.split(' ', 1) + image_name, extra_str = tokens[0], tokens[1] + if self.data_dir != '' and image_name.startswith('/'): + raise RuntimeError('image_name starts with "/"') + path = os.path.join(self.data_dir, image_name) + sample = dict() + sample['image_name'] = image_name + kwargs = {'name': self.name, 'path': path, 'tokens': tokens} + try: + # some preprocessor methods require setup + self.preprocessor.setup(**kwargs) + except: + pass + + try: + if not self.dummy_read: + with open(path, 'rb') as f: + content = f.read() + filebytes = content + buff = io.BytesIO(filebytes) + if self.dummy_size is not None: + sample['data'] = torch.rand(self.dummy_size) + else: + image = Image.open(buff).convert('RGB') + orig, aug1, aug2 = self.transform_image(image) + sample['data'] = orig + sample['data_aug1'] = aug1 + sample['data_aug2'] = aug2 + sample['data_aux'] = self.transform_aux_image(image) + extras = ast.literal_eval(extra_str) + try: + for key, value in extras.items(): + sample[key] = value + # if you use dic the code below will need ['label'] + sample['label'] = 0 + except AttributeError: + sample['label'] = int(extra_str) + # Generate Soft Label + soft_label = torch.Tensor(self.num_classes) + if sample['label'] < 0: + soft_label.fill_(1.0 / self.num_classes) + else: + soft_label.fill_(0) + soft_label[sample['label']] = 1 + sample['soft_label'] = soft_label + + except Exception as e: + logging.error('[{}] broken'.format(path)) + raise e + return sample diff --git a/OpenOOD/openood/datasets/imglist_dataset.py b/OpenOOD/openood/datasets/imglist_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..5f772290be9ad49687f45c2fe005e54ec551d36f --- /dev/null +++ b/OpenOOD/openood/datasets/imglist_dataset.py @@ -0,0 +1,106 @@ +import ast +import io +import logging +import os + +import torch +from PIL import Image, ImageFile + +from .base_dataset import BaseDataset + +# to fix "OSError: image file is truncated" +ImageFile.LOAD_TRUNCATED_IMAGES = True + + +class Convert: + def __init__(self, mode='RGB'): + self.mode = mode + + def __call__(self, image): + return image.convert(self.mode) + + +class ImglistDataset(BaseDataset): + def __init__(self, + name, + imglist_pth, + data_dir, + num_classes, + preprocessor, + data_aux_preprocessor, + maxlen=None, + dummy_read=False, + dummy_size=None, + **kwargs): + super(ImglistDataset, self).__init__(**kwargs) + + self.name = name + with open(imglist_pth) as imgfile: + self.imglist = imgfile.readlines() + self.data_dir = data_dir + self.num_classes = num_classes + self.preprocessor = preprocessor + self.transform_image = preprocessor + self.transform_aux_image = data_aux_preprocessor + self.maxlen = maxlen + self.dummy_read = dummy_read + self.dummy_size = dummy_size + if dummy_read and dummy_size is None: + raise ValueError( + 'if dummy_read is True, should provide dummy_size') + + def __len__(self): + if self.maxlen is None: + return len(self.imglist) + else: + return min(len(self.imglist), self.maxlen) + + def getitem(self, index): + line = self.imglist[index].strip('\n') + tokens = line.split(' ', 1) + image_name, extra_str = tokens[0], tokens[1] + if self.data_dir != '' and image_name.startswith('/'): + raise RuntimeError('image_name starts with "/"') + path = os.path.join(self.data_dir, image_name) + sample = dict() + sample['image_name'] = image_name + kwargs = {'name': self.name, 'path': path, 'tokens': tokens} + try: + # some preprocessor methods require setup + self.preprocessor.setup(**kwargs) + except: + pass + + try: + if not self.dummy_read: + with open(path, 'rb') as f: + content = f.read() + filebytes = content + buff = io.BytesIO(filebytes) + if self.dummy_size is not None: + sample['data'] = torch.rand(self.dummy_size) + else: + image = Image.open(buff).convert('RGB') + sample['data'] = self.transform_image(image) + sample['data_aux'] = self.transform_aux_image(image) + extras = ast.literal_eval(extra_str) + try: + for key, value in extras.items(): + sample[key] = value + # if you use dic the code below will need ['label'] + sample['label'] = 0 + except AttributeError: + sample['label'] = int(extra_str) + # Generate Soft Label + soft_label = torch.Tensor(self.num_classes) + if sample['label'] < 0: + soft_label.fill_(1.0 / self.num_classes) + else: + soft_label.fill_(0) + soft_label[sample['label']] = 1 + sample['soft_label'] = soft_label + + except Exception as e: + logging.error('[{}] broken'.format(path)) + raise e + return sample diff --git a/OpenOOD/openood/datasets/imglist_extradata_dataset.py b/OpenOOD/openood/datasets/imglist_extradata_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..b5c48075d4bd356ee74c7dafa81e0fa939e336cb --- /dev/null +++ b/OpenOOD/openood/datasets/imglist_extradata_dataset.py @@ -0,0 +1,190 @@ +import ast +import io +import logging +import os + +import numpy as np +import torch +from PIL import Image, ImageFile +from torch.utils.data import Sampler + +from .base_dataset import BaseDataset + +# to fix "OSError: image file is truncated" +ImageFile.LOAD_TRUNCATED_IMAGES = True + + +class Convert: + def __init__(self, mode='RGB'): + self.mode = mode + + def __call__(self, image): + return image.convert(self.mode) + + +class ImglistExtraDataDataset(BaseDataset): + def __init__(self, + name, + imglist_pth, + data_dir, + num_classes, + preprocessor, + data_aux_preprocessor, + maxlen=None, + dummy_read=False, + dummy_size=None, + extra_data_pth=None, + extra_label_pth=None, + extra_percent=100, + **kwargs): + super(ImglistExtraDataDataset, self).__init__(**kwargs) + + self.name = name + with open(imglist_pth) as imgfile: + self.imglist = imgfile.readlines() + self.data_dir = data_dir + self.num_classes = num_classes + self.preprocessor = preprocessor + self.transform_image = preprocessor + self.transform_aux_image = data_aux_preprocessor + self.maxlen = maxlen + self.dummy_read = dummy_read + self.dummy_size = dummy_size + if dummy_read and dummy_size is None: + raise ValueError( + 'if dummy_read is True, should provide dummy_size') + + self.orig_ids = list(range(len(self.imglist))) + + assert extra_data_pth is not None + assert extra_label_pth is not None + extra_data = np.load(extra_data_pth) + extra_labels = np.load(extra_label_pth) + assert len(extra_data) == len(extra_labels) + + self.extra_num = int(len(extra_labels) * extra_percent / 100.) + self.total_num = len(self.imglist) + self.extra_num + + rng = np.random.RandomState(0) + indices = rng.permutation(len(extra_labels)) + self.extra_data = extra_data[indices[:self.extra_num]] + self.extra_labels = extra_labels[indices[:self.extra_num]] + self.extra_ids = list( + set(range(self.total_num)) - set(range(len(self.imglist)))) + + def __len__(self): + return self.total_num + + def getitem(self, index): + if index in self.orig_ids: + line = self.imglist[index].strip('\n') + tokens = line.split(' ', 1) + image_name, extra_str = tokens[0], tokens[1] + if self.data_dir != '' and image_name.startswith('/'): + raise RuntimeError('image_name starts with "/"') + path = os.path.join(self.data_dir, image_name) + sample = dict() + sample['image_name'] = image_name + kwargs = {'name': self.name, 'path': path, 'tokens': tokens} + # some preprocessor methods require setup + self.preprocessor.setup(**kwargs) + try: + if not self.dummy_read: + with open(path, 'rb') as f: + content = f.read() + filebytes = content + buff = io.BytesIO(filebytes) + if self.dummy_size is not None: + sample['data'] = torch.rand(self.dummy_size) + else: + image = Image.open(buff).convert('RGB') + sample['data'] = self.transform_image(image) + sample['data_aux'] = self.transform_aux_image(image) + extras = ast.literal_eval(extra_str) + try: + for key, value in extras.items(): + sample[key] = value + # if you use dic the code below will need ['label'] + sample['label'] = 0 + except AttributeError: + sample['label'] = int(extra_str) + # Generate Soft Label + soft_label = torch.Tensor(self.num_classes) + if sample['label'] < 0: + soft_label.fill_(1.0 / self.num_classes) + else: + soft_label.fill_(0) + soft_label[sample['label']] = 1 + sample['soft_label'] = soft_label + + except Exception as e: + logging.error('[{}] broken'.format(path)) + raise e + return sample + else: + ind = index - len(self.imglist) + image = Image.fromarray(self.extra_data[ind]) + + sample = dict() + sample['image_name'] = str(ind) # dummy name + sample['data'] = self.transform_image(image) + sample['data_aux'] = self.transform_aux_image(image) + sample['label'] = self.extra_labels[ind] + + # Generate Soft Label + soft_label = torch.Tensor(self.num_classes) + if sample['label'] < 0: + soft_label.fill_(1.0 / self.num_classes) + else: + soft_label.fill_(0) + soft_label[sample['label']] = 1 + sample['soft_label'] = soft_label + + return sample + + +class TwoSourceSampler(Sampler): + def __init__(self, real_inds, syn_inds, batch_size, real_ratio=0.5): + assert len(real_inds) == 50000 + self.real_inds = real_inds + self.syn_inds = syn_inds + self.batch_size = batch_size + self.real_batch_size = int(self.batch_size * real_ratio) + self.syn_batch_size = self.batch_size - self.real_batch_size + if real_ratio == 0: + assert self.real_batch_size == 0 + elif real_ratio == 1: + assert self.syn_batch_size == 0 + + self.num_batches = int(np.ceil(len(self.real_inds) / self.batch_size)) + super().__init__(None) + + def __iter__(self): + batch_counter = 0 + real_inds_shuffled = [ + self.real_inds[i] for i in torch.randperm(len(self.real_inds)) + ] + syn_inds_shuffled = [ + self.syn_inds[i] for i in torch.randperm(len(self.syn_inds)) + ] + + real_offset = 0 + syn_offset = 0 + while batch_counter < self.num_batches: + real_batch = real_inds_shuffled[ + real_offset:min(real_offset + + self.real_batch_size, len(real_inds_shuffled))] + real_offset += self.real_batch_size + + syn_batch = syn_inds_shuffled[ + syn_offset:min(syn_offset + + self.syn_batch_size, len(syn_inds_shuffled))] + syn_offset += self.syn_batch_size + + batch = real_batch + syn_batch + np.random.shuffle(batch) + yield batch + batch_counter += 1 + + def __len__(self): + return self.num_batches diff --git a/OpenOOD/openood/datasets/udg_dataset.py b/OpenOOD/openood/datasets/udg_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..6beb4636dbefab5ce00edaac19401766e9d9f56f --- /dev/null +++ b/OpenOOD/openood/datasets/udg_dataset.py @@ -0,0 +1,95 @@ +import ast +import io +import logging +import os + +import numpy as np +import torch +from PIL import Image, ImageFile + +from .imglist_dataset import ImglistDataset + +# to fix "OSError: image file is truncated" +ImageFile.LOAD_TRUNCATED_IMAGES = True + + +class UDGDataset(ImglistDataset): + def __init__(self, + name, + imglist_pth, + data_dir, + num_classes, + preprocessor, + data_aux_preprocessor, + maxlen=None, + dummy_read=False, + dummy_size=None, + **kwargs): + super(UDGDataset, + self).__init__(name, imglist_pth, data_dir, num_classes, + preprocessor, data_aux_preprocessor, maxlen, + dummy_read, dummy_size, **kwargs) + + self.cluster_id = np.zeros(len(self.imglist), dtype=int) + self.cluster_reweight = np.ones(len(self.imglist), dtype=float) + + # use pseudo labels for unlabeled dataset during training + self.pseudo_label = np.array(-1 * np.ones(len(self.imglist)), + dtype=int) + self.ood_conf = np.ones(len(self.imglist), dtype=float) + + def getitem(self, index): + line = self.imglist[index].strip('\n') + tokens = line.split(' ', 1) + image_name, extra_str = tokens[0], tokens[1] + if self.data_dir != '' and image_name.startswith('/'): + raise RuntimeError('root not empty but image_name starts with "/"') + path = os.path.join(self.data_dir, image_name) + sample = dict() + sample['image_name'] = image_name + try: + if not self.dummy_read: + with open(path, 'rb') as f: + content = f.read() + filebytes = content + buff = io.BytesIO(filebytes) + if self.dummy_size is not None: + sample['data'] = torch.rand(self.dummy_size) + else: + image = Image.open(buff).convert('RGB') + sample['data'] = self.transform_image(image) + sample['data_aux'] = self.transform_aux_image(image) + extras = ast.literal_eval(extra_str) + try: + for key, value in extras.items(): + sample[key] = value + except AttributeError: + sample['label'] = int(extra_str) + # Generate Soft Label + soft_label = torch.Tensor(self.num_classes) + if sample['label'] < 0: + soft_label.fill_(1.0 / self.num_classes) + else: + soft_label.fill_(0) + soft_label[sample['label']] = 1 + sample['soft_label'] = soft_label + # Deep Clustering Aux Label Assignment for + # both labeled/unlabeled data + sample['cluster_id'] = self.cluster_id[index] + sample['cluster_reweight'] = self.cluster_reweight[index] + + # Deep Clustering Pseudo Label Assignment for unlabeled data + sample['pseudo_label'] = self.pseudo_label[index] + soft_pseudo_label = torch.Tensor(len(sample['soft_label'])) + if sample['pseudo_label'] == -1: + soft_pseudo_label.fill_(1.0 / len(sample['soft_label'])) + else: + soft_pseudo_label.fill_(0.0) + soft_pseudo_label[sample['pseudo_label']] = 1.0 + sample['pseudo_softlabel'] = soft_pseudo_label + sample['ood_conf'] = self.ood_conf[index] + + except Exception as e: + logging.error('[{}] broken'.format(path)) + raise e + return sample diff --git a/OpenOOD/openood/datasets/utils.py b/OpenOOD/openood/datasets/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..d09452a0649859f13a2b76235fe4d13c8663dbb5 --- /dev/null +++ b/OpenOOD/openood/datasets/utils.py @@ -0,0 +1,218 @@ +import os +import torch +from numpy import load +from torch.utils.data import DataLoader + +from openood.preprocessors.test_preprocessor import TestStandardPreProcessor +from openood.preprocessors.utils import get_preprocessor +from openood.utils.config import Config + +from .feature_dataset import FeatDataset +from .imglist_dataset import ImglistDataset +from .imglist_augmix_dataset import ImglistAugMixDataset +from .imglist_extradata_dataset import ImglistExtraDataDataset, TwoSourceSampler +from .udg_dataset import UDGDataset +from openood.datasets.bronze_dataset import BronzeWare_ID_Dataset + +def get_dataloader(config: Config): + # prepare a dataloader dictionary + dataset_config = config.dataset + dataloader_dict = {} + for split in dataset_config.split_names: + split_config = dataset_config[split] + preprocessor = get_preprocessor(config, split) + # weak augmentation for data_aux + data_aux_preprocessor = TestStandardPreProcessor(config) + + root_dir = config.output_dir.split('/results')[0] + + if split_config.dataset_class == 'ImglistExtraDataDataset': + dataset = ImglistExtraDataDataset( + name=dataset_config.name + '_' + split, + imglist_pth=split_config.imglist_pth, + data_dir=split_config.data_dir, + num_classes=dataset_config.num_classes, + preprocessor=preprocessor, + data_aux_preprocessor=data_aux_preprocessor, + extra_data_pth=split_config.extra_data_pth, + extra_label_pth=split_config.extra_label_pth, + extra_percent=split_config.extra_percent) + + batch_sampler = TwoSourceSampler(dataset.orig_ids, + dataset.extra_ids, + split_config.batch_size, + split_config.orig_ratio) + + dataloader = DataLoader( + dataset, + batch_sampler=batch_sampler, + num_workers=dataset_config.num_workers, + ) + elif split_config.dataset_class == 'ImglistAugMixDataset': + dataset = ImglistAugMixDataset( + name=dataset_config.name + '_' + split, + imglist_pth=split_config.imglist_pth, + data_dir=split_config.data_dir, + num_classes=dataset_config.num_classes, + preprocessor=preprocessor, + data_aux_preprocessor=data_aux_preprocessor) + sampler = None + if dataset_config.num_gpus * dataset_config.num_machines > 1: + sampler = torch.utils.data.distributed.DistributedSampler( + dataset) + split_config.shuffle = False + + dataloader = DataLoader(dataset, + batch_size=split_config.batch_size, + shuffle=split_config.shuffle, + num_workers=dataset_config.num_workers, + sampler=sampler) + elif split_config.dataset_class == 'Bronze2ExcelDataset': + dataset = BronzeWare_ID_Dataset(img_dir=root_dir+split_config.data_dir, + xml_dir=root_dir+split_config.xml_path, + excel_dir=root_dir+split_config.imglist_pth, + input_transform=preprocessor) + dataloader = DataLoader(dataset, + batch_size=split_config.batch_size, + shuffle=split_config.shuffle, + num_workers=dataset_config.num_workers) + else: + CustomDataset = eval(split_config.dataset_class) + split_config.imglist_pth = merge_path(root_dir, split_config.imglist_pth) + split_config.data_dir = merge_path(root_dir, split_config.data_dir) + + dataset = CustomDataset( + name=dataset_config.name + '_' + split, + imglist_pth=split_config.imglist_pth, + data_dir=split_config.data_dir, + num_classes=dataset_config.num_classes, + preprocessor=preprocessor, + data_aux_preprocessor=data_aux_preprocessor) + sampler = None + if dataset_config.num_gpus * dataset_config.num_machines > 1: + sampler = torch.utils.data.distributed.DistributedSampler( + dataset) + split_config.shuffle = False + + dataloader = DataLoader(dataset, + batch_size=split_config.batch_size, + shuffle=split_config.shuffle, + num_workers=dataset_config.num_workers, + sampler=sampler) + + dataloader_dict[split] = dataloader + return dataloader_dict + +def merge_path(roo_dir, add_dir): + add_dir = add_dir.split('./')[-1] + + out_put_dir = os.path.join(roo_dir, add_dir) + + return out_put_dir + +def get_ood_dataloader(config: Config): + # specify custom dataset class + ood_config = config.ood_dataset + CustomDataset = eval(ood_config.dataset_class) + dataloader_dict = {} + for split in ood_config.split_names: + split_config = ood_config[split] + preprocessor = get_preprocessor(config, split) + data_aux_preprocessor = TestStandardPreProcessor(config) + root_dir = config.output_dir.split('/results')[0] + + if split == 'val': + + split_config.imglist_pth = merge_path(root_dir, split_config.imglist_pth) + split_config.data_dir = merge_path(root_dir, split_config.data_dir) + # validation set + dataset = CustomDataset( + name=ood_config.name + '_' + split, + imglist_pth=split_config.imglist_pth, + data_dir=split_config.data_dir, + num_classes=ood_config.num_classes, + preprocessor=preprocessor, + data_aux_preprocessor=data_aux_preprocessor) + dataloader = DataLoader(dataset, + batch_size=ood_config.batch_size, + shuffle=ood_config.shuffle, + num_workers=ood_config.num_workers) + dataloader_dict[split] = dataloader + else: + # dataloaders for csid, nearood, farood + sub_dataloader_dict = {} + for dataset_name in split_config.datasets: + dataset_config = split_config[dataset_name] + + dataset_config.imglist_pth = merge_path(root_dir, dataset_config.imglist_pth) + dataset_config.data_dir = merge_path(root_dir, dataset_config.data_dir) + + dataset = CustomDataset( + name=ood_config.name + '_' + split, + imglist_pth=dataset_config.imglist_pth, + data_dir=dataset_config.data_dir, + num_classes=ood_config.num_classes, + preprocessor=preprocessor, + data_aux_preprocessor=data_aux_preprocessor) + dataloader = DataLoader(dataset, + batch_size=ood_config.batch_size, + shuffle=ood_config.shuffle, + num_workers=ood_config.num_workers) + sub_dataloader_dict[dataset_name] = dataloader + dataloader_dict[split] = sub_dataloader_dict + + return dataloader_dict + + +def get_feature_dataloader(dataset_config: Config): + # load in the cached feature + loaded_data = load(dataset_config.feat_path, allow_pickle=True) + total_feat = torch.from_numpy(loaded_data['feat_list']) + del loaded_data + # reshape the vector to fit in to the network + total_feat.unsqueeze_(-1).unsqueeze_(-1) + # let's see what we got here should be something like: + # torch.Size([total_num, channel_size, 1, 1]) + print('Loaded feature size: {}'.format(total_feat.shape)) + + split_config = dataset_config['train'] + + dataset = FeatDataset(feat=total_feat) + dataloader = DataLoader(dataset, + batch_size=split_config.batch_size, + shuffle=split_config.shuffle, + num_workers=dataset_config.num_workers) + + return dataloader + + +def get_feature_opengan_dataloader(dataset_config: Config): + feat_root = dataset_config.feat_root + + dataloader_dict = {} + for d in ['id_train', 'id_val', 'ood_val']: + # load in the cached feature + loaded_data = load(os.path.join(feat_root, f'{d}.npz'), + allow_pickle=True) + total_feat = torch.from_numpy(loaded_data['feat_list']) + total_labels = loaded_data['label_list'] + del loaded_data + # reshape the vector to fit in to the network + total_feat.unsqueeze_(-1).unsqueeze_(-1) + # let's see what we got here should be something like: + # torch.Size([total_num, channel_size, 1, 1]) + print('Loaded feature size: {}'.format(total_feat.shape)) + + if d == 'id_train': + split_config = dataset_config['train'] + else: + split_config = dataset_config['val'] + + dataset = FeatDataset(feat=total_feat, labels=total_labels) + dataloader = DataLoader(dataset, + batch_size=split_config.batch_size, + shuffle=split_config.shuffle, + num_workers=dataset_config.num_workers) + dataloader_dict[d] = dataloader + + return dataloader_dict diff --git a/OpenOOD/openood/evaluation_api/__init__.py b/OpenOOD/openood/evaluation_api/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..6eedafcefee18eb8dbcede389f60597447a91c2f --- /dev/null +++ b/OpenOOD/openood/evaluation_api/__init__.py @@ -0,0 +1 @@ +from .evaluator import Evaluator diff --git a/OpenOOD/openood/evaluation_api/__pycache__/__init__.cpython-311.pyc b/OpenOOD/openood/evaluation_api/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..194f98e4a63145db4736f631682a7151e42df708 Binary files /dev/null and b/OpenOOD/openood/evaluation_api/__pycache__/__init__.cpython-311.pyc differ diff --git a/OpenOOD/openood/evaluation_api/__pycache__/__init__.cpython-37.pyc b/OpenOOD/openood/evaluation_api/__pycache__/__init__.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..638850e24ccfd13264577f365f92613ebecf1af3 Binary files /dev/null and b/OpenOOD/openood/evaluation_api/__pycache__/__init__.cpython-37.pyc differ diff --git a/OpenOOD/openood/evaluation_api/__pycache__/datasets.cpython-311.pyc b/OpenOOD/openood/evaluation_api/__pycache__/datasets.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2a2d7518fa00313d77936b545d00b59ff4f056cb Binary files /dev/null and b/OpenOOD/openood/evaluation_api/__pycache__/datasets.cpython-311.pyc differ diff --git a/OpenOOD/openood/evaluation_api/__pycache__/datasets.cpython-37.pyc b/OpenOOD/openood/evaluation_api/__pycache__/datasets.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..90888721a7dab5c658206fb92bb4a93700dad1de Binary files /dev/null and b/OpenOOD/openood/evaluation_api/__pycache__/datasets.cpython-37.pyc differ diff --git a/OpenOOD/openood/evaluation_api/__pycache__/evaluator.cpython-311.pyc b/OpenOOD/openood/evaluation_api/__pycache__/evaluator.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dd0c8b30213872d0528882903062303efa984edd Binary files /dev/null and b/OpenOOD/openood/evaluation_api/__pycache__/evaluator.cpython-311.pyc differ diff --git a/OpenOOD/openood/evaluation_api/__pycache__/evaluator.cpython-37.pyc b/OpenOOD/openood/evaluation_api/__pycache__/evaluator.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..465b337e4c883cb6a2111efa5334f21f8b49e34d Binary files /dev/null and b/OpenOOD/openood/evaluation_api/__pycache__/evaluator.cpython-37.pyc differ diff --git a/OpenOOD/openood/evaluation_api/__pycache__/postprocessor.cpython-311.pyc b/OpenOOD/openood/evaluation_api/__pycache__/postprocessor.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b6b91cb49556501402d310bf72457adf2750b8d8 Binary files /dev/null and b/OpenOOD/openood/evaluation_api/__pycache__/postprocessor.cpython-311.pyc differ diff --git a/OpenOOD/openood/evaluation_api/__pycache__/postprocessor.cpython-37.pyc b/OpenOOD/openood/evaluation_api/__pycache__/postprocessor.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9da24a2546b26503864b42ff25b3c89b8bacced5 Binary files /dev/null and b/OpenOOD/openood/evaluation_api/__pycache__/postprocessor.cpython-37.pyc differ diff --git a/OpenOOD/openood/evaluation_api/__pycache__/preprocessor.cpython-311.pyc b/OpenOOD/openood/evaluation_api/__pycache__/preprocessor.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d178e5523d8bba4c42d2792e7b2b7fc761863116 Binary files /dev/null and b/OpenOOD/openood/evaluation_api/__pycache__/preprocessor.cpython-311.pyc differ diff --git a/OpenOOD/openood/evaluation_api/__pycache__/preprocessor.cpython-37.pyc b/OpenOOD/openood/evaluation_api/__pycache__/preprocessor.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d2450847fcac992aa25df26eb137e8315475ecfc Binary files /dev/null and b/OpenOOD/openood/evaluation_api/__pycache__/preprocessor.cpython-37.pyc differ diff --git a/OpenOOD/openood/evaluation_api/datasets.py b/OpenOOD/openood/evaluation_api/datasets.py new file mode 100644 index 0000000000000000000000000000000000000000..0a6f0f74a4da4c263358d4f125d4199cee67b3a9 --- /dev/null +++ b/OpenOOD/openood/evaluation_api/datasets.py @@ -0,0 +1,622 @@ +import os +import gdown +import zipfile + +from torch.utils.data import DataLoader +import torchvision as tvs +if tvs.__version__ >= '0.13': + tvs_new = True +else: + tvs_new = False + +from openood.datasets.imglist_dataset import ImglistDataset +from openood.datasets.bronze_dataset import BronzeWare_ID_Dataset +from openood.preprocessors import BasePreprocessor + +from .preprocessor import get_default_preprocessor, ImageNetCPreProcessor + +DATA_INFO = { + 'cifar10': { + 'num_classes': 10, + 'id': { + 'train': { + 'data_dir': 'images_classic/', + 'imglist_path': 'benchmark_imglist/cifar10/train_cifar10.txt' + }, + 'val': { + 'data_dir': 'images_classic/', + 'imglist_path': 'benchmark_imglist/cifar10/val_cifar10.txt' + }, + 'test': { + 'data_dir': 'images_classic/', + 'imglist_path': 'benchmark_imglist/cifar10/test_cifar10.txt' + } + }, + 'csid': { + 'datasets': ['cifar10c'], + 'cinic10': { + 'data_dir': 'images_classic/', + 'imglist_path': 'benchmark_imglist/cifar10/val_cinic10.txt' + }, + 'cifar10c': { + 'data_dir': 'images_classic/', + 'imglist_path': 'benchmark_imglist/cifar10/test_cifar10c.txt' + } + }, + 'ood': { + 'val': { + 'data_dir': 'images_classic/', + 'imglist_path': 'benchmark_imglist/cifar10/val_tin.txt' + }, + 'near': { + 'datasets': ['cifar100', 'tin'], + 'cifar100': { + 'data_dir': 'images_classic/', + 'imglist_path': + 'benchmark_imglist/cifar10/test_cifar100.txt' + }, + 'tin': { + 'data_dir': 'images_classic/', + 'imglist_path': 'benchmark_imglist/cifar10/test_tin.txt' + } + }, + 'far': { + 'datasets': ['mnist', 'svhn', 'texture', 'places365'], + 'mnist': { + 'data_dir': 'images_classic/', + 'imglist_path': 'benchmark_imglist/cifar10/test_mnist.txt' + }, + 'svhn': { + 'data_dir': 'images_classic/', + 'imglist_path': 'benchmark_imglist/cifar10/test_svhn.txt' + }, + 'texture': { + 'data_dir': 'images_classic/', + 'imglist_path': + 'benchmark_imglist/cifar10/test_texture.txt' + }, + 'places365': { + 'data_dir': 'images_classic/', + 'imglist_path': + 'benchmark_imglist/cifar10/test_places365.txt' + }, + } + } + }, + 'cifar100': { + 'num_classes': 100, + 'id': { + 'train': { + 'data_dir': 'images_classic/', + 'imglist_path': 'benchmark_imglist/cifar100/train_cifar100.txt' + }, + 'val': { + 'data_dir': 'images_classic/', + 'imglist_path': 'benchmark_imglist/cifar100/val_cifar100.txt' + }, + 'test': { + 'data_dir': 'images_classic/', + 'imglist_path': 'benchmark_imglist/cifar100/test_cifar100.txt' + } + }, + 'csid': { + 'datasets': [], + }, + 'ood': { + 'val': { + 'data_dir': 'images_classic/', + 'imglist_path': 'benchmark_imglist/cifar100/val_tin.txt' + }, + 'near': { + 'datasets': ['cifar10', 'tin'], + 'cifar10': { + 'data_dir': 'images_classic/', + 'imglist_path': + 'benchmark_imglist/cifar100/test_cifar10.txt' + }, + 'tin': { + 'data_dir': 'images_classic/', + 'imglist_path': 'benchmark_imglist/cifar100/test_tin.txt' + } + }, + 'far': { + 'datasets': ['mnist', 'svhn', 'texture', 'places365'], + 'mnist': { + 'data_dir': 'images_classic/', + 'imglist_path': 'benchmark_imglist/cifar100/test_mnist.txt' + }, + 'svhn': { + 'data_dir': 'images_classic/', + 'imglist_path': 'benchmark_imglist/cifar100/test_svhn.txt' + }, + 'texture': { + 'data_dir': 'images_classic/', + 'imglist_path': + 'benchmark_imglist/cifar100/test_texture.txt' + }, + 'places365': { + 'data_dir': 'images_classic/', + 'imglist_path': + 'benchmark_imglist/cifar100/test_places365.txt' + } + }, + } + }, + 'imagenet200': { + 'num_classes': 200, + 'id': { + 'train': { + 'data_dir': + 'images_largescale/', + 'imglist_path': + 'benchmark_imglist/imagenet200/train_imagenet200.txt' + }, + 'val': { + 'data_dir': 'images_largescale/', + 'imglist_path': + 'benchmark_imglist/imagenet200/val_imagenet200.txt' + }, + 'test': { + 'data_dir': + 'images_largescale/', + 'imglist_path': + 'benchmark_imglist/imagenet200/test_imagenet200.txt' + } + }, + 'csid': { + 'datasets': ['imagenet_v2', 'imagenet_c', 'imagenet_r'], + 'imagenet_v2': { + 'data_dir': + 'images_largescale/', + 'imglist_path': + 'benchmark_imglist/imagenet200/test_imagenet200_v2.txt' + }, + 'imagenet_c': { + 'data_dir': + 'images_largescale/', + 'imglist_path': + 'benchmark_imglist/imagenet200/test_imagenet200_c.txt' + }, + 'imagenet_r': { + 'data_dir': + 'images_largescale/', + 'imglist_path': + 'benchmark_imglist/imagenet200/test_imagenet200_r.txt' + }, + }, + 'ood': { + 'val': { + 'data_dir': 'images_largescale/', + 'imglist_path': + 'benchmark_imglist/imagenet200/val_openimage_o.txt' + }, + 'near': { + 'datasets': ['ssb_hard', 'ninco'], + 'ssb_hard': { + 'data_dir': + 'images_largescale/', + 'imglist_path': + 'benchmark_imglist/imagenet200/test_ssb_hard.txt' + }, + 'ninco': { + 'data_dir': 'images_largescale/', + 'imglist_path': + 'benchmark_imglist/imagenet200/test_ninco.txt' + } + }, + 'far': { + 'datasets': ['inaturalist', 'textures', 'openimage_o'], + 'inaturalist': { + 'data_dir': + 'images_largescale/', + 'imglist_path': + 'benchmark_imglist/imagenet200/test_inaturalist.txt' + }, + 'textures': { + 'data_dir': + 'images_classic/', + 'imglist_path': + 'benchmark_imglist/imagenet200/test_textures.txt' + }, + 'openimage_o': { + 'data_dir': + 'images_largescale/', + 'imglist_path': + 'benchmark_imglist/imagenet200/test_openimage_o.txt' + }, + }, + } + }, + 'imagenet': { + 'num_classes': 1000, + 'id': { + 'train': { + 'data_dir': 'images_largescale/', + 'imglist_path': 'benchmark_imglist/imagenet/train_imagenet.txt' + }, + 'val': { + 'data_dir': 'images_largescale/', + 'imglist_path': 'benchmark_imglist/imagenet/val_imagenet.txt' + }, + 'test': { + 'data_dir': 'images_largescale/', + 'imglist_path': 'benchmark_imglist/imagenet/test_imagenet.txt' + } + }, + 'csid': { + 'datasets': [], + }, + # 'csid': { + # 'datasets': ['imagenet_v2', 'imagenet_c', 'imagenet_r'], + # 'imagenet_v2': { + # 'data_dir': 'images_largescale/', + # 'imglist_path': + # 'benchmark_imglist/imagenet/test_imagenet_v2.txt' + # }, + # 'imagenet_c': { + # 'data_dir': 'images_largescale/', + # 'imglist_path': + # 'benchmark_imglist/imagenet/test_imagenet_c.txt' + # }, + # 'imagenet_r': { + # 'data_dir': 'images_largescale/', + # 'imglist_path': + # 'benchmark_imglist/imagenet/test_imagenet_r.txt' + # }, + # }, + 'ood': { + 'val': { + 'data_dir': 'images_largescale/', + 'imglist_path': + 'benchmark_imglist/imagenet/val_openimage_o.txt' + }, + 'near': { + 'datasets': ['imagenet22k_container', 'ssb_hard', 'ninco'], + 'imagenet22k_container': { + 'data_dir': 'images_largescale/', + 'imglist_path': + 'benchmark_imglist/imagenet21k_container/imagenet21k_container_file-list.txt' + }, + 'ssb_hard': { + 'data_dir': 'images_largescale/', + 'imglist_path': + 'benchmark_imglist/imagenet/test_ssb_hard.txt' + }, + 'ninco': { + 'data_dir': 'images_largescale/', + 'imglist_path': 'benchmark_imglist/imagenet/test_ninco.txt' + } + }, + 'far': { + 'datasets': ['inaturalist', 'textures', 'openimage_o'], + 'inaturalist': { + 'data_dir': + 'images_largescale/', + 'imglist_path': + 'benchmark_imglist/imagenet/test_inaturalist.txt' + }, + 'textures': { + 'data_dir': 'images_classic/', + 'imglist_path': + 'benchmark_imglist/imagenet/test_textures.txt' + }, + 'openimage_o': { + 'data_dir': + 'images_largescale/', + 'imglist_path': + 'benchmark_imglist/imagenet/test_openimage_o.txt' + }, + }, + } + }, + 'bronze2': { + 'num_classes': 11, + 'id': { + 'train': { + 'data_dir': 'bronze_ID_and_OOD/bronze2/images', + 'imglist_path': 'benchmark_imglist/bronzeData/ding_and_gui_train.xlsx', + 'xml_path': 'bronze_ID_and_OOD/bronze2/xmls' + }, + 'val': { + 'data_dir': 'bronze_ID_and_OOD/bronze2/images', + 'imglist_path': 'benchmark_imglist/bronzeData/ding_and_gui_val.xlsx', + 'xml_path': 'bronze_ID_and_OOD/bronze2/xmls' + }, + 'test': { + 'data_dir': 'bronze_ID_and_OOD/bronze2/images', + 'imglist_path': 'benchmark_imglist/bronzeData/ding_and_gui_test.xlsx', + 'xml_path': 'bronze_ID_and_OOD/bronze2/xmls' + } + }, + 'csid': { + 'datasets': [], + }, + 'ood': { + 'val': { + 'data_dir': 'images_largescale/', + 'imglist_path': + 'benchmark_imglist/imagenet/val_openimage_o.txt' + }, + 'near': { + 'datasets': ['imagenet22k_container', 'imagenet22k_container_refine', 'bronzeS_containerM', 'bronzeM_containerS', 'bronze_Line'], + 'imagenet22k_container': { + 'data_dir': 'images_largescale/', + 'imglist_path': + 'benchmark_imglist/imagenet21k_container/imagenet21k_container_file-list.txt' + }, + 'imagenet22k_container_refine': { + 'data_dir': 'images_largescale/', + 'imglist_path': + 'benchmark_imglist/imagenet21k_container_refine/imagenet21k_container_file-list-refine.txt' + }, + 'bronzeS_containerM': { + 'data_dir': 'images_largescale/', + 'imglist_path': 'images_largescale/transfer_dataset/bronze_structure_container_material/bronze_structure_container_material_test.txt' + }, + 'bronzeM_containerS': { + 'data_dir': 'images_largescale/', + 'imglist_path': 'images_largescale/transfer_dataset/container_structure_bronze_material/container_structure_bronze_material_test.txt' + }, + 'bronze_Line': { + 'data_dir': 'images_largescale/', + 'imglist_path': 'images_largescale/bronze_line/bronze2_Line_OOD_list.txt' + } + }, + 'mid': { + 'datasets': ['ssb_hard', 'ninco'], + 'ssb_hard': { + 'data_dir': 'images_largescale/', + 'imglist_path': + 'benchmark_imglist/imagenet/test_ssb_hard.txt' + }, + 'ninco': { + 'data_dir': 'images_largescale/', + 'imglist_path': 'benchmark_imglist/imagenet/test_ninco.txt' + } + }, + 'far': { + 'datasets': ['inaturalist', 'textures', 'openimage_o'], + 'inaturalist': { + 'data_dir': + 'images_largescale/', + 'imglist_path': + 'benchmark_imglist/imagenet/test_inaturalist.txt' + }, + 'textures': { + 'data_dir': 'images_classic/', + 'imglist_path': + 'benchmark_imglist/imagenet/test_textures.txt' + }, + 'openimage_o': { + 'data_dir': + 'images_largescale/', + 'imglist_path': + 'benchmark_imglist/imagenet/test_openimage_o.txt' + }, + }, + } + }, +} + +download_id_dict = { + 'cifar10': '1Co32RiiWe16lTaiOU6JMMnyUYS41IlO1', + 'cifar100': '1PGKheHUsf29leJPPGuXqzLBMwl8qMF8_', + 'tin': '1PZ-ixyx52U989IKsMA2OT-24fToTrelC', + 'mnist': '1CCHAGWqA1KJTFFswuF9cbhmB-j98Y1Sb', + 'svhn': '1DQfc11HOtB1nEwqS4pWUFp8vtQ3DczvI', + 'texture': '1OSz1m3hHfVWbRdmMwKbUzoU8Hg9UKcam', + 'places365': '1Ec-LRSTf6u5vEctKX9vRp9OA6tqnJ0Ay', + 'imagenet_1k': '1i1ipLDFARR-JZ9argXd2-0a6DXwVhXEj', + 'species_sub': '1-JCxDx__iFMExkYRMylnGJYTPvyuX6aq', + 'ssb_hard': '1PzkA-WGG8Z18h0ooL_pDdz9cO-DCIouE', + 'ninco': '1Z82cmvIB0eghTehxOGP5VTdLt7OD3nk6', + 'inaturalist': '1zfLfMvoUD0CUlKNnkk7LgxZZBnTBipdj', + 'places': '1fZ8TbPC4JGqUCm-VtvrmkYxqRNp2PoB3', + 'sun': '1ISK0STxWzWmg-_uUr4RQ8GSLFW7TZiKp', + 'openimage_o': '1VUFXnB_z70uHfdgJG2E_pjYOcEgqM7tE', + 'imagenet_v2': '1akg2IiE22HcbvTBpwXQoD7tgfPCdkoho', + 'imagenet_r': '1EzjMN2gq-bVV7lg-MEAdeuBuz-7jbGYU', + 'imagenet_c': '1JeXL9YH4BO8gCJ631c5BHbaSsl-lekHt', + 'benchmark_imglist': '1XKzBdWCqg3vPoj-D32YixJyJJ0hL63gP' +} + +dir_dict = { + 'images_classic/': [ + 'cifar100', 'tin', 'tin597', 'svhn', 'cinic10', 'imagenet10', 'mnist', + 'fashionmnist', 'cifar10', 'cifar100c', 'places365', 'cifar10c', + 'fractals_and_fvis', 'usps', 'texture', 'notmnist' + ], + 'images_largescale/': [ + 'imagenet_1k', + 'ssb_hard', + 'ninco', + 'inaturalist', + 'places', + 'sun', + 'openimage_o', + 'imagenet_v2', + 'imagenet_c', + 'imagenet_r', + 'imagenet-21k-container', + ], + 'images_medical/': ['actmed', 'bimcv', 'ct', 'hannover', 'xraybone'], + 'bronze_ID_and_OOD/': ['bronze2'], +} + +benchmarks_dict = { + 'cifar10': + ['cifar10', 'cifar100', 'tin', 'mnist', 'svhn', 'texture', 'places365'], + 'cifar100': + ['cifar100', 'cifar10', 'tin', 'mnist', 'svhn', 'texture', 'places365'], + 'imagenet200': [ + 'imagenet_1k', 'ssb_hard', 'ninco', 'inaturalist', 'texture', + 'openimage_o', 'imagenet_v2', 'imagenet_c', 'imagenet_r' + ], + 'imagenet': [ + 'imagenet_1k', 'ssb_hard', 'ninco', 'inaturalist', 'texture', + 'openimage_o', 'imagenet_v2', 'imagenet_c', 'imagenet_r' + ], + 'bronze2': + ['bronze2', 'cifar10'], +} + + +def require_download(filename, path): + for item in os.listdir(path): + if item.startswith(filename) or filename.startswith( + item) or path.endswith(filename): + return False + + else: + print(filename + ' needs download:') + return True + + +def download_dataset(dataset, data_root): + for key in dir_dict.keys(): + if dataset in dir_dict[key]: + store_path = os.path.join(data_root, key, dataset) + if not os.path.exists(store_path): + os.makedirs(store_path) + break + else: + print('Invalid dataset detected {}'.format(dataset)) + return + + if require_download(dataset, store_path): + print(store_path) + if not store_path.endswith('/'): + store_path = store_path + '/' + gdown.download(id=download_id_dict[dataset], output=store_path) + + file_path = os.path.join(store_path, dataset + '.zip') + with zipfile.ZipFile(file_path, 'r') as zip_file: + zip_file.extractall(store_path) + os.remove(file_path) + + +def data_setup(data_root, id_data_name): + if not data_root.endswith('/'): + data_root = data_root + '/' + + if not os.path.exists(os.path.join(data_root, 'benchmark_imglist')): + gdown.download(id=download_id_dict['benchmark_imglist'], + output=data_root) + file_path = os.path.join(data_root, 'benchmark_imglist.zip') + with zipfile.ZipFile(file_path, 'r') as zip_file: + zip_file.extractall(data_root) + os.remove(file_path) + + for dataset in benchmarks_dict[id_data_name]: + download_dataset(dataset, data_root) + + +def get_id_ood_dataloader(id_name, data_root, preprocessor, **loader_kwargs): + if 'imagenet' in id_name: + if tvs_new: + if isinstance(preprocessor, + tvs.transforms._presets.ImageClassification): + mean, std = preprocessor.mean, preprocessor.std + elif isinstance(preprocessor, tvs.transforms.Compose): + temp = preprocessor.transforms[-1] + mean, std = temp.mean, temp.std + elif isinstance(preprocessor, BasePreprocessor): + temp = preprocessor.transform.transforms[-1] + mean, std = temp.mean, temp.std + else: + raise TypeError + else: + if isinstance(preprocessor, tvs.transforms.Compose): + temp = preprocessor.transforms[-1] + mean, std = temp.mean, temp.std + elif isinstance(preprocessor, BasePreprocessor): + temp = preprocessor.transform.transforms[-1] + mean, std = temp.mean, temp.std + else: + raise TypeError + imagenet_c_preprocessor = ImageNetCPreProcessor(mean, std) + + # weak augmentation for data_aux + test_standard_preprocessor = get_default_preprocessor(id_name) + + dataloader_dict = {} + data_info = DATA_INFO[id_name] + + # id + sub_dataloader_dict = {} + for split in data_info['id'].keys(): + if id_name == "bronze2": + dataset = BronzeWare_ID_Dataset(os.path.join(data_root, + data_info['id'][split]['data_dir']), + os.path.join(data_root, + data_info['id'][split]['xml_path']), + os.path.join(data_root, + data_info['id'][split]['imglist_path']), + input_transform=preprocessor) + else: + dataset = ImglistDataset( + name='_'.join((id_name, split)), + imglist_pth=os.path.join(data_root, + data_info['id'][split]['imglist_path']), + data_dir=os.path.join(data_root, + data_info['id'][split]['data_dir']), + num_classes=data_info['num_classes'], + preprocessor=preprocessor, + data_aux_preprocessor=test_standard_preprocessor) + dataloader = DataLoader(dataset, **loader_kwargs) + sub_dataloader_dict[split] = dataloader + dataloader_dict['id'] = sub_dataloader_dict + + # csid + sub_dataloader_dict = {} + for dataset_name in data_info['csid']['datasets']: + dataset = ImglistDataset( + name='_'.join((id_name, 'csid', dataset_name)), + imglist_pth=os.path.join( + data_root, data_info['csid'][dataset_name]['imglist_path']), + data_dir=os.path.join(data_root, + data_info['csid'][dataset_name]['data_dir']), + num_classes=data_info['num_classes'], + preprocessor=preprocessor + if dataset_name != 'imagenet_c' else imagenet_c_preprocessor, + data_aux_preprocessor=test_standard_preprocessor) + dataloader = DataLoader(dataset, **loader_kwargs) + sub_dataloader_dict[dataset_name] = dataloader + dataloader_dict['csid'] = sub_dataloader_dict + + # ood + dataloader_dict['ood'] = {} + for split in data_info['ood'].keys(): + split_config = data_info['ood'][split] + + if split == 'val': + # validation set + dataset = ImglistDataset( + name='_'.join((id_name, 'ood', split)), + imglist_pth=os.path.join(data_root, + split_config['imglist_path']), + data_dir=os.path.join(data_root, split_config['data_dir']), + num_classes=data_info['num_classes'], + preprocessor=preprocessor, + data_aux_preprocessor=test_standard_preprocessor) + dataloader = DataLoader(dataset, **loader_kwargs) + dataloader_dict['ood'][split] = dataloader + else: + # dataloaders for nearood, farood + sub_dataloader_dict = {} + for dataset_name in split_config['datasets']: + dataset_config = split_config[dataset_name] + dataset = ImglistDataset( + name='_'.join((id_name, 'ood', dataset_name)), + imglist_pth=os.path.join(data_root, + dataset_config['imglist_path']), + data_dir=os.path.join(data_root, + dataset_config['data_dir']), + num_classes=data_info['num_classes'], + preprocessor=preprocessor, + data_aux_preprocessor=test_standard_preprocessor) + dataloader = DataLoader(dataset, **loader_kwargs) + sub_dataloader_dict[dataset_name] = dataloader + dataloader_dict['ood'][split] = sub_dataloader_dict + + return dataloader_dict diff --git a/OpenOOD/openood/evaluation_api/evaluator.py b/OpenOOD/openood/evaluation_api/evaluator.py new file mode 100644 index 0000000000000000000000000000000000000000..751d1a7ca41a714e839a75829b23ce38c3db97d4 --- /dev/null +++ b/OpenOOD/openood/evaluation_api/evaluator.py @@ -0,0 +1,446 @@ +from typing import Callable, List, Type + +import os +import numpy as np +import pandas as pd +import torch +import torch.nn as nn +from torch.utils.data import DataLoader +from tqdm import tqdm + +from openood.evaluators.metrics import compute_all_metrics +from openood.postprocessors import BasePostprocessor +from openood.networks.ash_net import ASHNet +from openood.networks.react_net import ReactNet +from openood.networks.scale_net import ScaleNet + +from .datasets import DATA_INFO, data_setup, get_id_ood_dataloader +from .postprocessor import get_postprocessor +from .preprocessor import get_default_preprocessor + + +class Evaluator: + def __init__( + self, + net: nn.Module, + id_name: str, + data_root: str = './data', + config_root: str = './configs', + preprocessor: Callable = None, + postprocessor_name: str = None, + postprocessor: Type[BasePostprocessor] = None, + batch_size: int = 200, + shuffle: bool = False, + num_workers: int = 4, + ) -> None: + """A unified, easy-to-use API for evaluating (most) discriminative OOD + detection methods. + + Args: + net (nn.Module): + The base classifier. + id_name (str): + The name of the in-distribution dataset. + data_root (str, optional): + The path of the data folder. Defaults to './data'. + config_root (str, optional): + The path of the config folder. Defaults to './configs'. + preprocessor (Callable, optional): + The preprocessor of input images. + Passing None will use the default preprocessor + following convention. Defaults to None. + postprocessor_name (str, optional): + The name of the postprocessor that obtains OOD score. + Ignored if an actual postprocessor is passed. + Defaults to None. + postprocessor (Type[BasePostprocessor], optional): + An actual postprocessor instance which inherits + OpenOOD's BasePostprocessor. Defaults to None. + batch_size (int, optional): + The batch size of samples. Defaults to 200. + shuffle (bool, optional): + Whether shuffling samples. Defaults to False. + num_workers (int, optional): + The num_workers argument that will be passed to + data loaders. Defaults to 4. + + Raises: + ValueError: + If both postprocessor_name and postprocessor are None. + ValueError: + If the specified ID dataset {id_name} is not supported. + TypeError: + If the passed postprocessor does not inherit BasePostprocessor. + """ + # check the arguments + if postprocessor_name is None and postprocessor is None: + raise ValueError('Please pass postprocessor_name or postprocessor') + if postprocessor_name is not None and postprocessor is not None: + print( + 'Postprocessor_name is ignored because postprocessor is passed' + ) + if id_name not in DATA_INFO: + raise ValueError(f'Dataset [{id_name}] is not supported') + + # get data preprocessor + if preprocessor is None: + preprocessor = get_default_preprocessor(id_name) + + # set up config root + if config_root is None: + filepath = os.path.dirname(os.path.abspath(__file__)) + config_root = os.path.join(*filepath.split('/')[:-2], 'configs') + + # get postprocessor + if postprocessor is None: + postprocessor = get_postprocessor(config_root, postprocessor_name, + id_name) + if not isinstance(postprocessor, BasePostprocessor): + raise TypeError( + 'postprocessor should inherit BasePostprocessor in OpenOOD') + + # load data + data_setup(data_root, id_name) + loader_kwargs = { + 'batch_size': batch_size, + 'shuffle': shuffle, + 'num_workers': num_workers + } + dataloader_dict = get_id_ood_dataloader(id_name, data_root, + preprocessor, **loader_kwargs) + + # wrap base model to work with certain postprocessors + if postprocessor_name == 'react': + net = ReactNet(net) + elif postprocessor_name == 'ash': + net = ASHNet(net) + elif postprocessor_name == 'scale': + net = ScaleNet(net) + + # postprocessor setup + postprocessor.setup(net, dataloader_dict['id'], dataloader_dict['ood']) + + self.id_name = id_name + self.net = net + self.preprocessor = preprocessor + self.postprocessor = postprocessor + self.dataloader_dict = dataloader_dict + self.metrics = { + 'id_acc': None, + 'csid_acc': None, + 'ood': None, + 'fsood': None + } + self.scores = { + 'id': { + 'train': None, + 'val': None, + 'test': None + }, + 'csid': {k: None + for k in dataloader_dict['csid'].keys()}, + 'ood': { + 'val': None, + 'near': + {k: None + for k in dataloader_dict['ood']['near'].keys()}, + 'mid': {k: None + for k in dataloader_dict['ood']['mid'].keys()}, + 'far': {k: None + for k in dataloader_dict['ood']['far'].keys()}, + }, + 'id_preds': None, + 'id_labels': None, + 'csid_preds': {k: None + for k in dataloader_dict['csid'].keys()}, + 'csid_labels': {k: None + for k in dataloader_dict['csid'].keys()}, + } + # perform hyperparameter search if have not done so + if (self.postprocessor.APS_mode + and not self.postprocessor.hyperparam_search_done): + self.hyperparam_search() + + self.net.eval() + + # how to ensure the postprocessors can work with + # models whose definition doesn't align with OpenOOD + + def _classifier_inference(self, + data_loader: DataLoader, + msg: str = 'Acc Eval', + progress: bool = True): + self.net.eval() + + all_preds = [] + all_labels = [] + with torch.no_grad(): + for batch in tqdm(data_loader, desc=msg, disable=not progress): + data = batch['data'].cuda() + logits = self.net(data) + preds = logits.argmax(1) + all_preds.append(preds.cpu()) + all_labels.append(batch['label']) + + all_preds = torch.cat(all_preds) + all_labels = torch.cat(all_labels) + return all_preds, all_labels + + def eval_acc(self, data_name: str = 'id') -> float: + if data_name == 'id': + if self.metrics['id_acc'] is not None: + return self.metrics['id_acc'] + else: + if self.scores['id_preds'] is None: + all_preds, all_labels = self._classifier_inference( + self.dataloader_dict['id']['test'], 'ID Acc Eval') + self.scores['id_preds'] = all_preds + self.scores['id_labels'] = all_labels + else: + all_preds = self.scores['id_preds'] + all_labels = self.scores['id_labels'] + + assert len(all_preds) == len(all_labels) + correct = (all_preds == all_labels).sum().item() + acc = correct / len(all_labels) * 100 + self.metrics['id_acc'] = acc + return acc + elif data_name == 'csid': + if self.metrics['csid_acc'] is not None: + return self.metrics['csid_acc'] + else: + correct, total = 0, 0 + for _, (dataname, dataloader) in enumerate( + self.dataloader_dict['csid'].items()): + if self.scores['csid_preds'][dataname] is None: + all_preds, all_labels = self._classifier_inference( + dataloader, f'CSID {dataname} Acc Eval') + self.scores['csid_preds'][dataname] = all_preds + self.scores['csid_labels'][dataname] = all_labels + else: + all_preds = self.scores['csid_preds'][dataname] + all_labels = self.scores['csid_labels'][dataname] + + assert len(all_preds) == len(all_labels) + c = (all_preds == all_labels).sum().item() + t = len(all_labels) + correct += c + total += t + + if self.scores['id_preds'] is None: + all_preds, all_labels = self._classifier_inference( + self.dataloader_dict['id']['test'], 'ID Acc Eval') + self.scores['id_preds'] = all_preds + self.scores['id_labels'] = all_labels + else: + all_preds = self.scores['id_preds'] + all_labels = self.scores['id_labels'] + + correct += (all_preds == all_labels).sum().item() + total += len(all_labels) + + acc = correct / total * 100 + self.metrics['csid_acc'] = acc + return acc + else: + raise ValueError(f'Unknown data name {data_name}') + + def eval_ood(self, fsood: bool = False, progress: bool = True): + id_name = 'id' if not fsood else 'csid' + task = 'ood' if not fsood else 'fsood' + if self.metrics[task] is None: + self.net.eval() + + # id score + if self.scores['id']['test'] is None: + print(f'Performing inference on {self.id_name} test set...', + flush=True) + id_pred, id_conf, id_gt = self.postprocessor.inference( + self.net, self.dataloader_dict['id']['test'], progress) + self.scores['id']['test'] = [id_pred, id_conf, id_gt] + else: + id_pred, id_conf, id_gt = self.scores['id']['test'] + + if fsood: + csid_pred, csid_conf, csid_gt = [], [], [] + for i, dataset_name in enumerate(self.scores['csid'].keys()): + if self.scores['csid'][dataset_name] is None: + print( + f'Performing inference on {self.id_name} ' + f'(cs) test set [{i+1}]: {dataset_name}...', + flush=True) + temp_pred, temp_conf, temp_gt = \ + self.postprocessor.inference( + self.net, + self.dataloader_dict['csid'][dataset_name], + progress) + self.scores['csid'][dataset_name] = [ + temp_pred, temp_conf, temp_gt + ] + + csid_pred.append(self.scores['csid'][dataset_name][0]) + csid_conf.append(self.scores['csid'][dataset_name][1]) + csid_gt.append(self.scores['csid'][dataset_name][2]) + + csid_pred = np.concatenate(csid_pred) + csid_conf = np.concatenate(csid_conf) + csid_gt = np.concatenate(csid_gt) + + id_pred = np.concatenate((id_pred, csid_pred)) + id_conf = np.concatenate((id_conf, csid_conf)) + id_gt = np.concatenate((id_gt, csid_gt)) + + # load nearood data and compute ood metrics + near_metrics = self._eval_ood([id_pred, id_conf, id_gt], + ood_split='near', + progress=progress) + mid_metrics = self._eval_ood([id_pred, id_conf, id_gt], + ood_split='mid', + progress=progress) + # load farood data and compute ood metrics + far_metrics = self._eval_ood([id_pred, id_conf, id_gt], + ood_split='far', + progress=progress) + + if self.metrics[f'{id_name}_acc'] is None: + self.eval_acc(id_name) + near_metrics[:, -1] = np.array([self.metrics[f'{id_name}_acc']] * + len(near_metrics)) + mid_metrics[:, -1] = np.array([self.metrics[f'{id_name}_acc']] * + len(mid_metrics)) + far_metrics[:, -1] = np.array([self.metrics[f'{id_name}_acc']] * + len(far_metrics)) + + self.metrics[task] = pd.DataFrame( + np.concatenate([near_metrics, mid_metrics, far_metrics], axis=0), + index=list(self.dataloader_dict['ood']['near'].keys()) + + ['nearood'] + list(self.dataloader_dict['ood']['mid'].keys()) + + ['midood'] + list(self.dataloader_dict['ood']['far'].keys()) + + ['farood'], + columns=['FPR@95', 'AUROC', 'AUPR_IN', 'AUPR_OUT', 'ACC'], + ) + else: + print('Evaluation has already been done!') + + with pd.option_context( + 'display.max_rows', None, 'display.max_columns', None, + 'display.float_format', + '{:,.2f}'.format): # more options can be specified also + print(self.metrics[task]) + + return self.metrics[task] + + def _eval_ood(self, + id_list: List[np.ndarray], + ood_split: str = 'near', + progress: bool = True): + print(f'Processing {ood_split} ood...', flush=True) + [id_pred, id_conf, id_gt] = id_list + metrics_list = [] + for dataset_name, ood_dl in self.dataloader_dict['ood'][ + ood_split].items(): + if self.scores['ood'][ood_split][dataset_name] is None: + print(f'Performing inference on {dataset_name} dataset...', + flush=True) + ood_pred, ood_conf, ood_gt = self.postprocessor.inference( + self.net, ood_dl, progress) + self.scores['ood'][ood_split][dataset_name] = [ + ood_pred, ood_conf, ood_gt + ] + else: + print( + 'Inference has been performed on ' + f'{dataset_name} dataset...', + flush=True) + [ood_pred, ood_conf, + ood_gt] = self.scores['ood'][ood_split][dataset_name] + + ood_gt = -1 * np.ones_like(ood_gt) # hard set to -1 as ood + pred = np.concatenate([id_pred, ood_pred]) + conf = np.concatenate([id_conf, ood_conf]) + label = np.concatenate([id_gt, ood_gt]) + + print(f'Computing metrics on {dataset_name} dataset...') + ood_metrics = compute_all_metrics(conf, label, pred) + metrics_list.append(ood_metrics) + self._print_metrics(ood_metrics) + + print('Computing mean metrics...', flush=True) + metrics_list = np.array(metrics_list) + metrics_mean = np.mean(metrics_list, axis=0, keepdims=True) + self._print_metrics(list(metrics_mean[0])) + return np.concatenate([metrics_list, metrics_mean], axis=0) * 100 + + def _print_metrics(self, metrics): + [fpr, auroc, aupr_in, aupr_out, _] = metrics + + # print ood metric results + print('FPR@95: {:.2f}, AUROC: {:.2f}'.format(100 * fpr, 100 * auroc), + end=' ', + flush=True) + print('AUPR_IN: {:.2f}, AUPR_OUT: {:.2f}'.format( + 100 * aupr_in, 100 * aupr_out), + flush=True) + print(u'\u2500' * 70, flush=True) + print('', flush=True) + + def hyperparam_search(self): + print('Starting automatic parameter search...') + max_auroc = 0 + hyperparam_names = [] + hyperparam_list = [] + count = 0 + + for name in self.postprocessor.args_dict.keys(): + hyperparam_names.append(name) + count += 1 + + for name in hyperparam_names: + hyperparam_list.append(self.postprocessor.args_dict[name]) + + hyperparam_combination = self.recursive_generator( + hyperparam_list, count) + + final_index = None + for i, hyperparam in enumerate(hyperparam_combination): + self.postprocessor.set_hyperparam(hyperparam) + + id_pred, id_conf, id_gt = self.postprocessor.inference( + self.net, self.dataloader_dict['id']['val']) + ood_pred, ood_conf, ood_gt = self.postprocessor.inference( + self.net, self.dataloader_dict['ood']['val']) + + ood_gt = -1 * np.ones_like(ood_gt) # hard set to -1 as ood + pred = np.concatenate([id_pred, ood_pred]) + conf = np.concatenate([id_conf, ood_conf]) + label = np.concatenate([id_gt, ood_gt]) + ood_metrics = compute_all_metrics(conf, label, pred) + auroc = ood_metrics[1] + + print('Hyperparam: {}, auroc: {}'.format(hyperparam, auroc)) + if auroc > max_auroc: + final_index = i + max_auroc = auroc + + self.postprocessor.set_hyperparam(hyperparam_combination[final_index]) + print('Final hyperparam: {}'.format( + self.postprocessor.get_hyperparam())) + self.postprocessor.hyperparam_search_done = True + + def recursive_generator(self, list, n): + if n == 1: + results = [] + for x in list[0]: + k = [] + k.append(x) + results.append(k) + return results + else: + results = [] + temp = self.recursive_generator(list, n - 1) + for x in list[n - 1]: + for y in temp: + k = y.copy() + k.append(x) + results.append(k) + return results diff --git a/OpenOOD/openood/evaluation_api/postprocessor.py b/OpenOOD/openood/evaluation_api/postprocessor.py new file mode 100644 index 0000000000000000000000000000000000000000..accf438dc1a73172bc387017d30b67b5911837f6 --- /dev/null +++ b/OpenOOD/openood/evaluation_api/postprocessor.py @@ -0,0 +1,80 @@ +import os +import urllib.request + +from openood.postprocessors import ( + ASHPostprocessor, BasePostprocessor, ConfBranchPostprocessor, CutPastePostprocessor, + DICEPostprocessor, DRAEMPostprocessor, DropoutPostProcessor, DSVDDPostprocessor, + EBOPostprocessor, EnsemblePostprocessor, GMMPostprocessor, GodinPostprocessor, + GradNormPostprocessor, GRAMPostprocessor, KLMatchingPostprocessor, KNNPostprocessor, + MaxLogitPostprocessor, MCDPostprocessor, MDSPostprocessor, MDSEnsemblePostprocessor, + MOSPostprocessor, ODINPostprocessor, OpenGanPostprocessor, OpenMax, PatchcorePostprocessor, + Rd4adPostprocessor, ReactPostprocessor, ResidualPostprocessor, ScalePostprocessor, + SSDPostprocessor, TemperatureScalingPostprocessor, VIMPostprocessor, RotPredPostprocessor, + RankFeatPostprocessor, RMDSPostprocessor, SHEPostprocessor, CIDERPostprocessor, NPOSPostprocessor, + GENPostprocessor, NNGuidePostprocessor, RelationPostprocessor, BronzeNet2Postprocessor) +from openood.utils.config import Config, merge_configs + +postprocessors = { + 'ash': ASHPostprocessor, + 'cider': CIDERPostprocessor, + 'conf_branch': ConfBranchPostprocessor, + 'msp': BasePostprocessor, + 'ebo': EBOPostprocessor, + 'odin': ODINPostprocessor, + 'mds': MDSPostprocessor, + 'mds_ensemble': MDSEnsemblePostprocessor, + 'npos': NPOSPostprocessor, + 'rmds': RMDSPostprocessor, + 'gmm': GMMPostprocessor, + 'patchcore': PatchcorePostprocessor, + 'openmax': OpenMax, + 'react': ReactPostprocessor, + 'vim': VIMPostprocessor, + 'gradnorm': GradNormPostprocessor, + 'godin': GodinPostprocessor, + 'mds': MDSPostprocessor, + 'gram': GRAMPostprocessor, + 'cutpaste': CutPastePostprocessor, + 'mls': MaxLogitPostprocessor, + 'residual': ResidualPostprocessor, + 'klm': KLMatchingPostprocessor, + 'temp_scaling': TemperatureScalingPostprocessor, + 'ensemble': EnsemblePostprocessor, + 'dropout': DropoutPostProcessor, + 'draem': DRAEMPostprocessor, + 'dsvdd': DSVDDPostprocessor, + 'mos': MOSPostprocessor, + 'mcd': MCDPostprocessor, + 'opengan': OpenGanPostprocessor, + 'knn': KNNPostprocessor, + 'dice': DICEPostprocessor, + 'scale': ScalePostprocessor, + 'ssd': SSDPostprocessor, + 'she': SHEPostprocessor, + 'rd4ad': Rd4adPostprocessor, + 'rotpred': RotPredPostprocessor, + 'rankfeat': RankFeatPostprocessor, + 'gen': GENPostprocessor, + 'nnguide': NNGuidePostprocessor, + 'relation': RelationPostprocessor, + + 'BronzeNet2': BronzeNet2Postprocessor +} + +link_prefix = 'https://raw.githubusercontent.com/Jingkang50/OpenOOD/main/configs/postprocessors/' + + +def get_postprocessor(config_root: str, postprocessor_name: str, id_data_name: str): + postprocessor_config_path = os.path.join(config_root, 'postprocessors', + f'{postprocessor_name}.yml') + if not os.path.exists(postprocessor_config_path): + os.makedirs(os.path.dirname(postprocessor_config_path), exist_ok=True) + urllib.request.urlretrieve(link_prefix + f'{postprocessor_name}.yml', + postprocessor_config_path) + + config = Config(postprocessor_config_path) + config = merge_configs(config, Config(**{'dataset': {'name': id_data_name}})) + postprocessor = postprocessors[postprocessor_name](config) + postprocessor.APS_mode = config.postprocessor.APS_mode + postprocessor.hyperparam_search_done = False + return postprocessor diff --git a/OpenOOD/openood/evaluation_api/preprocessor.py b/OpenOOD/openood/evaluation_api/preprocessor.py new file mode 100644 index 0000000000000000000000000000000000000000..f5b737fdd8eae528853fd6a70de24d55bb6e2d22 --- /dev/null +++ b/OpenOOD/openood/evaluation_api/preprocessor.py @@ -0,0 +1,84 @@ +import torchvision.transforms as tvs_trans + +from openood.preprocessors import BasePreprocessor +from openood.utils import Config + +INTERPOLATION = tvs_trans.InterpolationMode.BILINEAR + +default_preprocessing_dict = { + 'cifar10': { + 'pre_size': 32, + 'img_size': 32, + 'normalization': [[0.4914, 0.4822, 0.4465], [0.2470, 0.2435, 0.2616]], + }, + 'cifar100': { + 'pre_size': 32, + 'img_size': 32, + 'normalization': [[0.5071, 0.4867, 0.4408], [0.2675, 0.2565, 0.2761]], + }, + 'imagenet': { + 'pre_size': 256, + 'img_size': 224, + 'normalization': [[0.485, 0.456, 0.406], [0.229, 0.224, 0.225]], + }, + 'imagenet200': { + 'pre_size': 256, + 'img_size': 224, + 'normalization': [[0.485, 0.456, 0.406], [0.229, 0.224, 0.225]], + }, + 'aircraft': { + 'pre_size': 512, + 'img_size': 448, + 'normalization': [[0.5, 0.5, 0.5], [0.5, 0.5, 0.5]], + }, + 'cub': { + 'pre_size': 512, + 'img_size': 448, + 'normalization': [[0.5, 0.5, 0.5], [0.5, 0.5, 0.5]], + }, + 'bronze2': { + 'pre_size': 420, + 'img_size': 400, + 'normalization': [[0.5, 0.5, 0.5], [0.5, 0.5, 0.5]], + } +} + + +class Convert: + def __init__(self, mode='RGB'): + self.mode = mode + + def __call__(self, image): + return image.convert(self.mode) + + +class TestStandardPreProcessor(BasePreprocessor): + """For test and validation dataset standard image transformation.""" + def __init__(self, config: Config): + self.transform = tvs_trans.Compose([ + Convert('RGB'), + tvs_trans.Resize(config.pre_size, interpolation=INTERPOLATION), + tvs_trans.CenterCrop(config.img_size), + tvs_trans.ToTensor(), + tvs_trans.Normalize(*config.normalization), + ]) + + +class ImageNetCPreProcessor(BasePreprocessor): + def __init__(self, mean, std): + self.transform = tvs_trans.Compose([ + tvs_trans.ToTensor(), + tvs_trans.Normalize(mean, std), + ]) + + +def get_default_preprocessor(data_name: str): + # TODO: include fine-grained datasets proposed in Vaze et al.? + + if data_name not in default_preprocessing_dict: + raise NotImplementedError(f'The dataset {data_name} is not supported') + + config = Config(**default_preprocessing_dict[data_name]) + preprocessor = TestStandardPreProcessor(config) + + return preprocessor diff --git a/OpenOOD/openood/evaluators/__init__.py b/OpenOOD/openood/evaluators/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..2d5105ba17eda20cffc4e3f8e395106450e8b157 --- /dev/null +++ b/OpenOOD/openood/evaluators/__init__.py @@ -0,0 +1 @@ +from .utils import get_evaluator diff --git a/OpenOOD/openood/evaluators/__pycache__/__init__.cpython-311.pyc b/OpenOOD/openood/evaluators/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8c2099e9bb1d4143fca2de5643ce2d98fc022fca Binary files /dev/null and b/OpenOOD/openood/evaluators/__pycache__/__init__.cpython-311.pyc differ diff --git a/OpenOOD/openood/evaluators/__pycache__/__init__.cpython-37.pyc b/OpenOOD/openood/evaluators/__pycache__/__init__.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e98c114257552b48d0295e92a7664434385dd044 Binary files /dev/null and b/OpenOOD/openood/evaluators/__pycache__/__init__.cpython-37.pyc differ diff --git a/OpenOOD/openood/evaluators/__pycache__/ad_evaluator.cpython-311.pyc b/OpenOOD/openood/evaluators/__pycache__/ad_evaluator.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..154c9dc6b92a575a888a1c5fd95ce777e0ac618f Binary files /dev/null and b/OpenOOD/openood/evaluators/__pycache__/ad_evaluator.cpython-311.pyc differ diff --git a/OpenOOD/openood/evaluators/__pycache__/ad_evaluator.cpython-37.pyc b/OpenOOD/openood/evaluators/__pycache__/ad_evaluator.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aa41e436e6c3e31df5c08d86e2f5ec65b585d325 Binary files /dev/null and b/OpenOOD/openood/evaluators/__pycache__/ad_evaluator.cpython-37.pyc differ diff --git a/OpenOOD/openood/evaluators/__pycache__/arpl_evaluator.cpython-311.pyc b/OpenOOD/openood/evaluators/__pycache__/arpl_evaluator.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4a769c26f2f6b2126bb4413f8240428abcd3d85e Binary files /dev/null and b/OpenOOD/openood/evaluators/__pycache__/arpl_evaluator.cpython-311.pyc differ diff --git a/OpenOOD/openood/evaluators/__pycache__/arpl_evaluator.cpython-37.pyc b/OpenOOD/openood/evaluators/__pycache__/arpl_evaluator.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..764f7cf574ce7e2a232af3c96748f51d3d672dec Binary files /dev/null and b/OpenOOD/openood/evaluators/__pycache__/arpl_evaluator.cpython-37.pyc differ diff --git a/OpenOOD/openood/evaluators/__pycache__/base_evaluator.cpython-311.pyc b/OpenOOD/openood/evaluators/__pycache__/base_evaluator.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3fdb0d245daff54d7bcb5dd8a9c099b3aa38634b Binary files /dev/null and b/OpenOOD/openood/evaluators/__pycache__/base_evaluator.cpython-311.pyc differ diff --git a/OpenOOD/openood/evaluators/__pycache__/base_evaluator.cpython-37.pyc b/OpenOOD/openood/evaluators/__pycache__/base_evaluator.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a83f4ce4c8c21a77841be500506c2da1dfa178cc Binary files /dev/null and b/OpenOOD/openood/evaluators/__pycache__/base_evaluator.cpython-37.pyc differ diff --git a/OpenOOD/openood/evaluators/__pycache__/ece_evaluator.cpython-311.pyc b/OpenOOD/openood/evaluators/__pycache__/ece_evaluator.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f7cf7817ba68771b3adbf0aba3e1874ad35882fe Binary files /dev/null and b/OpenOOD/openood/evaluators/__pycache__/ece_evaluator.cpython-311.pyc differ diff --git a/OpenOOD/openood/evaluators/__pycache__/ece_evaluator.cpython-37.pyc b/OpenOOD/openood/evaluators/__pycache__/ece_evaluator.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f951c860b1a4595b49ab8b24c90dbf441d6a22e4 Binary files /dev/null and b/OpenOOD/openood/evaluators/__pycache__/ece_evaluator.cpython-37.pyc differ diff --git a/OpenOOD/openood/evaluators/__pycache__/fsood_evaluator.cpython-311.pyc b/OpenOOD/openood/evaluators/__pycache__/fsood_evaluator.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6c8b3b92153d50c8d5c4c68191191eefecbf59bb Binary files /dev/null and b/OpenOOD/openood/evaluators/__pycache__/fsood_evaluator.cpython-311.pyc differ diff --git a/OpenOOD/openood/evaluators/__pycache__/fsood_evaluator.cpython-37.pyc b/OpenOOD/openood/evaluators/__pycache__/fsood_evaluator.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..32798fd528c51cf1d3d31e6b45e7f38b62994657 Binary files /dev/null and b/OpenOOD/openood/evaluators/__pycache__/fsood_evaluator.cpython-37.pyc differ diff --git a/OpenOOD/openood/evaluators/__pycache__/metrics.cpython-311.pyc b/OpenOOD/openood/evaluators/__pycache__/metrics.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c9d99de990242ee82c6f200a2b98c55bac179b0d Binary files /dev/null and b/OpenOOD/openood/evaluators/__pycache__/metrics.cpython-311.pyc differ diff --git a/OpenOOD/openood/evaluators/__pycache__/metrics.cpython-37.pyc b/OpenOOD/openood/evaluators/__pycache__/metrics.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d30493d5cd608f90e505bd18294b7012e8754f6a Binary files /dev/null and b/OpenOOD/openood/evaluators/__pycache__/metrics.cpython-37.pyc differ diff --git a/OpenOOD/openood/evaluators/__pycache__/mos_evaluator.cpython-311.pyc b/OpenOOD/openood/evaluators/__pycache__/mos_evaluator.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e40a12db1ee0330a5610027c6884174d3d48fa45 Binary files /dev/null and b/OpenOOD/openood/evaluators/__pycache__/mos_evaluator.cpython-311.pyc differ diff --git a/OpenOOD/openood/evaluators/__pycache__/mos_evaluator.cpython-37.pyc b/OpenOOD/openood/evaluators/__pycache__/mos_evaluator.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d33e66d6ed6db0a615c884c7209426d2985c1c61 Binary files /dev/null and b/OpenOOD/openood/evaluators/__pycache__/mos_evaluator.cpython-37.pyc differ diff --git a/OpenOOD/openood/evaluators/__pycache__/ood_evaluator.cpython-311.pyc b/OpenOOD/openood/evaluators/__pycache__/ood_evaluator.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..356f55703f367508f0b4a4bb67e0b8a1442d2500 Binary files /dev/null and b/OpenOOD/openood/evaluators/__pycache__/ood_evaluator.cpython-311.pyc differ diff --git a/OpenOOD/openood/evaluators/__pycache__/ood_evaluator.cpython-37.pyc b/OpenOOD/openood/evaluators/__pycache__/ood_evaluator.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8ec325681b5235f55e89f0797ccceffc51ba9418 Binary files /dev/null and b/OpenOOD/openood/evaluators/__pycache__/ood_evaluator.cpython-37.pyc differ diff --git a/OpenOOD/openood/evaluators/__pycache__/osr_evaluator.cpython-311.pyc b/OpenOOD/openood/evaluators/__pycache__/osr_evaluator.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f6ebd3bbae75a824bf323fa1b622c3bbf3bfd6af Binary files /dev/null and b/OpenOOD/openood/evaluators/__pycache__/osr_evaluator.cpython-311.pyc differ diff --git a/OpenOOD/openood/evaluators/__pycache__/osr_evaluator.cpython-37.pyc b/OpenOOD/openood/evaluators/__pycache__/osr_evaluator.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dbf654339647f1af178e2fc3b5d1e9f6f0698a12 Binary files /dev/null and b/OpenOOD/openood/evaluators/__pycache__/osr_evaluator.cpython-37.pyc differ diff --git a/OpenOOD/openood/evaluators/__pycache__/patchcore_evaluator.cpython-311.pyc b/OpenOOD/openood/evaluators/__pycache__/patchcore_evaluator.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9129ae1ffcfb91778f0fc07113e7fecab5489a44 Binary files /dev/null and b/OpenOOD/openood/evaluators/__pycache__/patchcore_evaluator.cpython-311.pyc differ diff --git a/OpenOOD/openood/evaluators/__pycache__/patchcore_evaluator.cpython-37.pyc b/OpenOOD/openood/evaluators/__pycache__/patchcore_evaluator.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..60f35a09e9ea065ec1fc74d92704ca1af68e4247 Binary files /dev/null and b/OpenOOD/openood/evaluators/__pycache__/patchcore_evaluator.cpython-37.pyc differ diff --git a/OpenOOD/openood/evaluators/__pycache__/utils.cpython-311.pyc b/OpenOOD/openood/evaluators/__pycache__/utils.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5d422c0cdda11186b45776f5f74287a4e4679eba Binary files /dev/null and b/OpenOOD/openood/evaluators/__pycache__/utils.cpython-311.pyc differ diff --git a/OpenOOD/openood/evaluators/__pycache__/utils.cpython-37.pyc b/OpenOOD/openood/evaluators/__pycache__/utils.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5dd56cda30a62ae1fda803e678bd45e72c918f4e Binary files /dev/null and b/OpenOOD/openood/evaluators/__pycache__/utils.cpython-37.pyc differ diff --git a/OpenOOD/openood/evaluators/ad_evaluator.py b/OpenOOD/openood/evaluators/ad_evaluator.py new file mode 100644 index 0000000000000000000000000000000000000000..6b1bc648790b09a1a4ac84a8b39a300927f08c5f --- /dev/null +++ b/OpenOOD/openood/evaluators/ad_evaluator.py @@ -0,0 +1,59 @@ +import numpy as np +import torch +from sklearn.metrics import auc, roc_curve + +from openood.utils import Config + + +class ADEvaluator(): + def __init__(self, config: Config): + self.config = config + + def eval_ood(self, + net, + id_data_loader, + ood_data_loaders, + postprocessor, + epoch_idx: int = -1): + with torch.no_grad(): + if type(net) is dict: + for subnet in net.values(): + subnet.eval() + else: + net.eval() + auroc = self.get_auroc(net, id_data_loader['test'], + ood_data_loaders['val'], postprocessor) + metrics = { + 'epoch_idx': epoch_idx, + 'image_auroc': auroc, + } + return metrics + + def report(self, test_metrics): + + print('Complete Evaluation:\n' + '{}\n' + '==============================\n' + 'AUC Image: {:.2f} \n' + '=============================='.format( + self.config.dataset.name, + 100.0 * test_metrics['image_auroc']), + flush=True) + print('Completed!', flush=True) + + def get_auroc(self, net, id_data_loader, ood_data_loader, postprocessor): + _, id_conf, id_gt = postprocessor.inference(net, id_data_loader) + _, ood_conf, ood_gt = postprocessor.inference(net, ood_data_loader) + ood_gt = -1 * np.ones_like(ood_gt) # hard set to -1 as ood + + conf = np.concatenate([id_conf, ood_conf]) + label = np.concatenate([id_gt, ood_gt]) + + ind_indicator = np.zeros_like(label) + ind_indicator[label != -1] = 1 + + fpr, tpr, _ = roc_curve(ind_indicator, conf) + + auroc = auc(fpr, tpr) + + return auroc diff --git a/OpenOOD/openood/evaluators/arpl_evaluator.py b/OpenOOD/openood/evaluators/arpl_evaluator.py new file mode 100644 index 0000000000000000000000000000000000000000..25a8ffc005ba4236078e9331b1e12a0116479778 --- /dev/null +++ b/OpenOOD/openood/evaluators/arpl_evaluator.py @@ -0,0 +1,139 @@ +from typing import Dict + +import numpy as np +import torch +import torch.nn as nn +from torch.utils.data import DataLoader +from tqdm import tqdm + +from openood.postprocessors import BasePostprocessor +from openood.utils import Config + +from .ood_evaluator import OODEvaluator + + +class ARPLEvaluator(OODEvaluator): + def __init__(self, config: Config): + self.config = config + + def eval_acc(self, + net: dict, + data_loader: DataLoader, + postprocessor: BasePostprocessor = None, + epoch_idx: int = -1, + fsood: bool = False, + csid_data_loaders: Dict[str, DataLoader] = None): + criterion = net['criterion'] + net = net['netF'] + net.eval() + + loss_avg = 0.0 + correct = 0 + with torch.no_grad(): + for batch in tqdm(data_loader, + desc='Eval: ', + position=0, + leave=True): + # prepare data + data = batch['data'].cuda() + target = batch['label'].cuda() + + # forward + _, feat = net(data, return_feature=True) + output, loss = criterion(feat, target) + + # accuracy + pred = output.data.max(1)[1] + correct += pred.eq(target.data).sum().item() + + # test loss average + loss_avg += float(loss.data) + + if not fsood: + metrics = {} + metrics['epoch_idx'] = epoch_idx + metrics['loss'] = loss_avg / len(data_loader) + metrics['acc'] = correct / len(data_loader.dataset) + return metrics + else: + all_correct = 0 + all_total = 0 + all_correct += correct + all_total += len(data_loader.dataset) + + assert csid_data_loaders is not None + for dataset_name, csid_dl in csid_data_loaders.items(): + correct = 0 + with torch.no_grad(): + for batch in tqdm(csid_dl, + desc='Eval: ', + position=0, + leave=True): + # prepare data + data = batch['data'].cuda() + target = batch['label'].cuda() + + # forward + _, feat = net(data, return_feature=True) + output, loss = criterion(feat, target) + + # accuracy + pred = output.data.max(1)[1] + correct += pred.eq(target.data).sum().item() + + all_correct += correct + all_total += len(csid_dl.dataset) + + metrics = {} + metrics['epoch_idx'] = epoch_idx + metrics['acc'] = all_correct / all_total + return metrics + + def eval_ood(self, + net: dict, + id_data_loader: DataLoader, + ood_data_loaders: Dict[str, Dict[str, DataLoader]], + postprocessor: BasePostprocessor, + fsood: bool = False): + criterion = net['criterion'] + net = net['netF'] + net = nn.Sequential( + net, + criterion, + ) + net.eval() + # load training in-distribution data + assert 'test' in id_data_loader, \ + 'id_data_loaders should have the key: test!' + dataset_name = self.config.dataset.name + print(f'Performing inference on {dataset_name} dataset...', flush=True) + id_pred, id_conf, id_gt = postprocessor.inference( + net, id_data_loader['test']) + if self.config.recorder.save_scores: + self._save_scores(id_pred, id_conf, id_gt, dataset_name) + + if fsood: + # load csid data and compute confidence + for dataset_name, csid_dl in ood_data_loaders['csid'].items(): + print(f'Performing inference on {dataset_name} dataset...', + flush=True) + csid_pred, csid_conf, csid_gt = postprocessor.inference( + net, csid_dl) + if self.config.recorder.save_scores: + self._save_scores(csid_pred, csid_conf, csid_gt, + dataset_name) + id_pred = np.concatenate([id_pred, csid_pred]) + id_conf = np.concatenate([id_conf, csid_conf]) + id_gt = np.concatenate([id_gt, csid_gt]) + + # load nearood data and compute ood metrics + self._eval_ood(net, [id_pred, id_conf, id_gt], + ood_data_loaders, + postprocessor, + ood_split='nearood') + + # load farood data and compute ood metrics + self._eval_ood(net, [id_pred, id_conf, id_gt], + ood_data_loaders, + postprocessor, + ood_split='farood') diff --git a/OpenOOD/openood/evaluators/base_evaluator.py b/OpenOOD/openood/evaluators/base_evaluator.py new file mode 100644 index 0000000000000000000000000000000000000000..b6e1e4420cdd1d95c9062c0e5a04159525d0b9c5 --- /dev/null +++ b/OpenOOD/openood/evaluators/base_evaluator.py @@ -0,0 +1,99 @@ +import os + +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch.utils.data import DataLoader +from tqdm import tqdm + +import openood.utils.comm as comm +from openood.postprocessors import BasePostprocessor +from openood.utils import Config + + +def to_np(x): + return x.data.cpu().numpy() + + +class BaseEvaluator: + def __init__(self, config: Config): + self.config = config + + def eval_acc(self, + net: nn.Module, + data_loader: DataLoader, + postprocessor: BasePostprocessor = None, + epoch_idx: int = -1): + net.eval() + + loss_avg = 0.0 + correct = 0 + with torch.no_grad(): + for batch in tqdm(data_loader, + desc='Eval: ', + position=0, + leave=True, + disable=not comm.is_main_process()): + # prepare data + data = batch['data'].cuda() + target = batch['label'].cuda() + + # forward + output = net(data) + + loss = F.cross_entropy(output, target) + + # accuracy + pred = output.data.max(1)[1] + correct += pred.eq(target.data).sum().item() + + # test loss average + loss_avg += float(loss.data) + + loss = loss_avg / len(data_loader) + acc = correct / len(data_loader.dataset) + + metrics = {} + metrics['epoch_idx'] = epoch_idx + metrics['loss'] = self.save_metrics(loss) + metrics['acc'] = self.save_metrics(acc) + return metrics + + def extract(self, + net: nn.Module, + data_loader: DataLoader, + filename: str = 'feature'): + net.eval() + feat_list, label_list = [], [] + + with torch.no_grad(): + for batch in tqdm(data_loader, + desc='Feature Extracting: ', + position=0, + leave=True, + disable=not comm.is_main_process()): + data = batch['data'].cuda() + label = batch['label'] + + _, feat = net(data, return_feature=True) + feat_list.extend(to_np(feat)) + label_list.extend(to_np(label)) + + feat_list = np.array(feat_list) + label_list = np.array(label_list) + + save_dir = self.config.output_dir + os.makedirs(save_dir, exist_ok=True) + np.savez(os.path.join(save_dir, filename), + feat_list=feat_list, + label_list=label_list) + + def save_metrics(self, value): + all_values = comm.gather(value) + temp = 0 + for i in all_values: + temp = temp + i + # total_value = np.add([x for x in all_values])s + + return temp diff --git a/OpenOOD/openood/evaluators/ece_evaluator.py b/OpenOOD/openood/evaluators/ece_evaluator.py new file mode 100644 index 0000000000000000000000000000000000000000..de62e0ab3755ff87ee775bd3188a0cd3a1bddc0a --- /dev/null +++ b/OpenOOD/openood/evaluators/ece_evaluator.py @@ -0,0 +1,108 @@ +import os + +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch.utils.data import DataLoader +from tqdm import tqdm + +import openood.utils.comm as comm +from openood.postprocessors import BasePostprocessor +from openood.utils import Config +from .base_evaluator import BaseEvaluator + + +class ECEEvaluator(BaseEvaluator): + def __init__(self, config: Config): + """OOD Evaluator. + + Args: + config (Config): Config file from + """ + super(ECEEvaluator, self).__init__(config) + + def eval_acc(self, + net: nn.Module, + data_loader: DataLoader, + postprocessor: BasePostprocessor = None, + epoch_idx: int = -1, + num_bins: int = 15): + net.eval() + """Calculates ECE. + Args: + num_bins: the number of bins to partition all samples. we set it as 15. + Returns: + ece: the calculated ECE value. + """ + + loss_avg = 0.0 + correct = 0 + total_scores = [] + total_preds = [] + total_labels = [] + with torch.no_grad(): + for batch in tqdm(data_loader, + desc='Eval: ', + position=0, + leave=True): + # prepare data + data = batch['data'].cuda() + target = batch['label'].cuda() + + # forward + output = net(data) + loss = F.cross_entropy(output, target) + + # accuracy + pred = output.data.max(1)[1] + score = output.data.max(1)[0] + correct += pred.eq(target.data).sum().item() + + # test loss average + loss_avg += float(loss.data) + + total_preds.append(pred.cpu().numpy().reshape(-1)) + total_scores.append(score.cpu().numpy().reshape(-1)) + total_labels.append(target.data.cpu().numpy().reshape(-1)) + + scores_np = np.reshape(total_scores, -1) + preds_np = np.reshape(total_preds, -1) + labels_np = np.reshape(total_labels, -1) + acc_tab = np.zeros(num_bins) # Empirical (true) confidence + mean_conf = np.zeros(num_bins) # Predicted confidence + nb_items_bin = np.zeros(num_bins) # Number of items in the bins + tau_tab = np.linspace(0, 1, num_bins + 1) # Confidence bins + for i in np.arange(num_bins): # Iterates over the bins + # Selects the items where the predicted max probability falls in the bin + # [tau_tab[i], tau_tab[i + 1)] + sec = (tau_tab[i + 1] > scores_np) & (scores_np >= tau_tab[i]) + nb_items_bin[i] = np.sum(sec) # Number of items in the bin + # Selects the predicted classes, and the true classes + class_pred_sec, y_sec = preds_np[sec], labels_np[sec] + # Averages of the predicted max probabilities + mean_conf[i] = np.mean( + scores_np[sec]) if nb_items_bin[i] > 0 else np.nan + # Computes the empirical confidence + acc_tab[i] = np.mean( + class_pred_sec == y_sec) if nb_items_bin[i] > 0 else np.nan + # Cleaning + mean_conf = mean_conf[nb_items_bin > 0] + acc_tab = acc_tab[nb_items_bin > 0] + nb_items_bin = nb_items_bin[nb_items_bin > 0] + if sum(nb_items_bin) != 0: + ece = np.average( + np.absolute(mean_conf - acc_tab), + weights=nb_items_bin.astype(np.float) / np.sum(nb_items_bin)) + else: + ece = 0.0 + + loss = loss_avg / len(data_loader) + acc = correct / len(data_loader.dataset) + + metrics = {} + metrics['epoch_idx'] = epoch_idx + metrics['loss'] = self.save_metrics(loss) + metrics['acc'] = self.save_metrics(acc) + metrics['ece'] = self.save_metrics(ece) + return metrics diff --git a/OpenOOD/openood/evaluators/fsood_evaluator.py b/OpenOOD/openood/evaluators/fsood_evaluator.py new file mode 100644 index 0000000000000000000000000000000000000000..89eb8c9db96c363d9391642fd7c9e698bd36dbfe --- /dev/null +++ b/OpenOOD/openood/evaluators/fsood_evaluator.py @@ -0,0 +1,104 @@ +import csv +import os +from typing import Dict, List + +import numpy as np +import torch +import torch.nn as nn +from torch.utils.data import DataLoader + +from openood.postprocessors import BasePostprocessor + +from .ood_evaluator import OODEvaluator + + +class FSOODEvaluator(OODEvaluator): + def eval_csid_acc(self, net: nn.Module, + csid_loaders: Dict[str, Dict[str, DataLoader]]): + # ensure the networks in eval mode + net.eval() + for dataset_name, csid_dl in csid_loaders.items(): + print(f'Computing accuracy on {dataset_name} dataset...') + correct = 0 + with torch.no_grad(): + for batch in csid_dl: + data = batch['data'].cuda() + target = batch['label'].cuda() + # forward + output = net(data) + # accuracy + pred = output.data.max(1)[1] + correct += pred.eq(target.data).sum().item() + acc = correct / len(csid_dl.dataset) + if self.config.recorder.save_csv: + self._save_acc_results(acc, dataset_name) + print(u'\u2500' * 70, flush=True) + + def _save_acc_results(self, acc, dataset_name): + write_content = { + 'dataset': dataset_name, + 'FPR@95': '-', + 'AUROC': '-', + 'AUPR_IN': '-', + 'AUPR_OUT': '-', + 'ACC': '{:.2f}'.format(100 * acc), + } + fieldnames = list(write_content.keys()) + # print csid metric results + print('CSID[{}] accuracy: {:.2f}%'.format(dataset_name, 100 * acc), + flush=True) + csv_path = os.path.join(self.config.output_dir, 'csid.csv') + if not os.path.exists(csv_path): + with open(csv_path, 'w', newline='') as csvfile: + writer = csv.DictWriter(csvfile, fieldnames=fieldnames) + writer.writeheader() + writer.writerow(write_content) + else: + with open(csv_path, 'a', newline='') as csvfile: + writer = csv.DictWriter(csvfile, fieldnames=fieldnames) + writer.writerow(write_content) + + def eval_ood(self, net: nn.Module, id_data_loader: List[DataLoader], + ood_data_loaders: List[DataLoader], + postprocessor: BasePostprocessor): + # ensure the networks in eval mode + net.eval() + # load training in-distribution data + assert 'test' in id_data_loader, \ + 'id_data_loaders should have the key: test!' + dataset_name = self.config.dataset.name + print(f'Performing inference on {dataset_name} dataset...', flush=True) + id_pred, id_conf, id_gt = postprocessor.inference( + net, id_data_loader['test']) + if self.config.recorder.save_scores: + self._save_scores(id_pred, id_conf, id_gt, dataset_name) + + # load csid data and compute confidence + for dataset_name, csid_dl in ood_data_loaders['csid'].items(): + print(f'Performing inference on {dataset_name} dataset...', + flush=True) + csid_pred, csid_conf, csid_gt = postprocessor.inference( + net, csid_dl) + if self.config.recorder.save_scores: + self._save_scores(csid_pred, csid_conf, csid_gt, dataset_name) + id_pred = np.concatenate([id_pred, csid_pred]) + id_conf = np.concatenate([id_conf, csid_conf]) + id_gt = np.concatenate([id_gt, csid_gt]) + + # compute accuracy on csid + print(u'\u2500' * 70, flush=True) + self.eval_csid_acc(net, ood_data_loaders['csid']) + + # load nearood data and compute ood metrics + print(u'\u2500' * 70, flush=True) + self._eval_ood(net, [id_pred, id_conf, id_gt], + ood_data_loaders, + postprocessor, + ood_split='nearood') + + # load farood data and compute ood metrics + print(u'\u2500' * 70, flush=True) + self._eval_ood(net, [id_pred, id_conf, id_gt], + ood_data_loaders, + postprocessor, + ood_split='farood') diff --git a/OpenOOD/openood/evaluators/metrics.py b/OpenOOD/openood/evaluators/metrics.py new file mode 100644 index 0000000000000000000000000000000000000000..9e5501d52a4b55e9e8883cd799114c88f4173290 --- /dev/null +++ b/OpenOOD/openood/evaluators/metrics.py @@ -0,0 +1,115 @@ +import numpy as np +from sklearn import metrics + + +def compute_all_metrics(conf, label, pred): + np.set_printoptions(precision=3) + recall = 0.95 + auroc, aupr_in, aupr_out, fpr = auc_and_fpr_recall(conf, label, recall) + + accuracy = acc(pred, label) + + results = [fpr, auroc, aupr_in, aupr_out, accuracy] + + return results + + +# accuracy +def acc(pred, label): + ind_pred = pred[label != -1] + ind_label = label[label != -1] + + num_tp = np.sum(ind_pred == ind_label) + acc = num_tp / len(ind_label) + + return acc + + +# fpr_recall +def fpr_recall(conf, label, tpr): + gt = np.ones_like(label) + gt[label == -1] = 0 + + fpr_list, tpr_list, threshold_list = metrics.roc_curve(gt, conf) + fpr = fpr_list[np.argmax(tpr_list >= tpr)] + thresh = threshold_list[np.argmax(tpr_list >= tpr)] + return fpr, thresh + + +# auc +def auc_and_fpr_recall(conf, label, tpr_th): + # following convention in ML we treat OOD as positive + ood_indicator = np.zeros_like(label) + ood_indicator[label == -1] = 1 + + # in the postprocessor we assume ID samples will have larger + # "conf" values than OOD samples + # therefore here we need to negate the "conf" values + fpr_list, tpr_list, thresholds = metrics.roc_curve(ood_indicator, -conf) + fpr = fpr_list[np.argmax(tpr_list >= tpr_th)] + + precision_in, recall_in, thresholds_in \ + = metrics.precision_recall_curve(1 - ood_indicator, conf) + + precision_out, recall_out, thresholds_out \ + = metrics.precision_recall_curve(ood_indicator, -conf) + + auroc = metrics.auc(fpr_list, tpr_list) + aupr_in = metrics.auc(recall_in, precision_in) + aupr_out = metrics.auc(recall_out, precision_out) + + return auroc, aupr_in, aupr_out, fpr + + +# ccr_fpr +def ccr_fpr(conf, fpr, pred, label): + ind_conf = conf[label != -1] + ind_pred = pred[label != -1] + ind_label = label[label != -1] + + ood_conf = conf[label == -1] + + num_ind = len(ind_conf) + num_ood = len(ood_conf) + + fp_num = int(np.ceil(fpr * num_ood)) + thresh = np.sort(ood_conf)[-fp_num] + num_tp = np.sum((ind_conf > thresh) * (ind_pred == ind_label)) + ccr = num_tp / num_ind + + return ccr + + +def detection(ind_confidences, + ood_confidences, + n_iter=100000, + return_data=False): + # calculate the minimum detection error + Y1 = ood_confidences + X1 = ind_confidences + + start = np.min([np.min(X1), np.min(Y1)]) + end = np.max([np.max(X1), np.max(Y1)]) + gap = (end - start) / n_iter + + best_error = 1.0 + best_delta = None + all_thresholds = [] + all_errors = [] + for delta in np.arange(start, end, gap): + tpr = np.sum(np.sum(X1 < delta)) / np.float(len(X1)) + error2 = np.sum(np.sum(Y1 > delta)) / np.float(len(Y1)) + detection_error = (tpr + error2) / 2.0 + + if return_data: + all_thresholds.append(delta) + all_errors.append(detection_error) + + if detection_error < best_error: + best_error = np.minimum(best_error, detection_error) + best_delta = delta + + if return_data: + return best_error, best_delta, all_errors, all_thresholds + else: + return best_error, best_delta diff --git a/OpenOOD/openood/evaluators/mos_evaluator.py b/OpenOOD/openood/evaluators/mos_evaluator.py new file mode 100644 index 0000000000000000000000000000000000000000..a0cd48de3255c5187a6cbd89e1e7dd4720ca126c --- /dev/null +++ b/OpenOOD/openood/evaluators/mos_evaluator.py @@ -0,0 +1,377 @@ +import csv +import os +from typing import Dict, List + +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch.utils.data import DataLoader +from tqdm import tqdm + +import openood.utils.comm as comm +from openood.postprocessors import BasePostprocessor +from openood.utils import Config + +from .base_evaluator import BaseEvaluator +from .metrics import compute_all_metrics + + +def topk(output, target, ks=(1, )): + """Returns one boolean vector for each k, whether the target is within the + output's top-k.""" + _, pred = output.topk(max(ks), 1, True, True) + pred = pred.t() + correct = pred.eq(target.view(1, -1).expand_as(pred)) + return [correct[:k].max(0)[0] for k in ks] + + +def get_group_slices(classes_per_group): + group_slices = [] + start = 0 + for num_cls in classes_per_group: + end = start + num_cls + 1 + group_slices.append([start, end]) + start = end + return torch.LongTensor(group_slices) + + +def cal_ood_score(logits, group_slices): + num_groups = group_slices.shape[0] + + all_group_ood_score_MOS = [] + for i in range(num_groups): + group_logit = logits[:, group_slices[i][0]:group_slices[i][1]] + + group_softmax = F.softmax(group_logit, dim=-1) + group_others_score = group_softmax[:, 0] + + all_group_ood_score_MOS.append(-group_others_score) + + all_group_ood_score_MOS = torch.stack(all_group_ood_score_MOS, dim=1) + final_max_score_MOS, _ = torch.max(all_group_ood_score_MOS, dim=1) + return final_max_score_MOS.data.cpu().numpy() + + +def iterate_data(data_loader, model, group_slices): + confs_mos = [] + dataiter = iter(data_loader) + + with torch.no_grad(): + for _ in tqdm(range(1, + len(dataiter) + 1), + desc='Batches', + position=0, + leave=True, + disable=not comm.is_main_process()): + batch = next(dataiter) + data = batch['data'].cuda() + + logits = model(data) + conf_mos = cal_ood_score(logits, group_slices) + confs_mos.extend(conf_mos) + + return np.array(confs_mos) + + +def calc_group_softmax_acc(logits, labels, group_slices): + num_groups = group_slices.shape[0] + loss = 0 + num_samples = logits.shape[0] + + all_group_max_score, all_group_max_class = [], [] + + smax = torch.nn.Softmax(dim=-1).cuda() + cri = torch.nn.CrossEntropyLoss(reduction='none').cuda() + + for i in range(num_groups): + group_logit = logits[:, group_slices[i][0]:group_slices[i][1]] + group_label = labels[:, i] + loss += cri(group_logit, group_label) + + group_softmax = smax(group_logit) + group_softmax = group_softmax[:, 1:] # disregard others category + group_max_score, group_max_class = torch.max(group_softmax, dim=1) + group_max_class += 1 # shift the class index by 1 + + all_group_max_score.append(group_max_score) + all_group_max_class.append(group_max_class) + + all_group_max_score = torch.stack(all_group_max_score, dim=1) + all_group_max_class = torch.stack(all_group_max_class, dim=1) + + final_max_score, max_group = torch.max(all_group_max_score, dim=1) + + pred_cls_within_group = all_group_max_class[torch.arange(num_samples), + max_group] + + gt_class, gt_group = torch.max(labels, dim=1) + + selected_groups = (max_group == gt_group) + + pred_acc = torch.zeros(logits.shape[0]).bool().cuda() + + pred_acc[selected_groups] = ( + pred_cls_within_group[selected_groups] == gt_class[selected_groups]) + + return loss, pred_acc + + +def run_eval_acc(model, data_loader, group_slices, num_group): + # switch to evaluate mode + model.eval() + + print('Running validation...') + + all_c, all_top1 = [], [] + + train_dataiter = iter(data_loader) + for train_step in tqdm(range(1, + len(train_dataiter) + 1), + desc='Test: ', + position=0, + leave=True, + disable=not comm.is_main_process()): + batch = next(train_dataiter) + data = batch['data'].cuda() + group_label = batch['group_label'].cuda() + class_label = batch['class_label'].cuda() + labels = [] + for i in range(len(group_label)): + label = torch.zeros(num_group, dtype=torch.int64) + label[group_label[i]] = class_label[i] + 1 + labels.append(label.unsqueeze(0)) + labels = torch.cat(labels, dim=0).cuda() + + with torch.no_grad(): + # compute output, measure accuracy and record loss. + logits = model(data) + if group_slices is not None: + c, top1 = calc_group_softmax_acc(logits, labels, group_slices) + else: + c = torch.nn.CrossEntropyLoss(reduction='none')(logits, labels) + top1 = topk(logits, labels, ks=(1, ))[0] + + all_c.extend(c.cpu()) # Also ensures a sync point. + all_top1.extend(top1.cpu()) + + model.train() + # all_c is val loss + # all_top1 is val top1 acc + return all_c, all_top1 + + +class MOSEvaluator(BaseEvaluator): + def __init__(self, config: Config): + super(MOSEvaluator, self).__init__(config) + self.config = config + self.num_groups = None + self.group_slices = None + self.acc = None + + def cal_group_slices(self, train_loader): + config = self.config + # if specified group_config + if (config.trainer.group_config.endswith('npy')): + classes_per_group = np.load(config.trainer.group_config) + elif (config.trainer.group_config.endswith('txt')): + classes_per_group = np.loadtxt(config.trainer.group_config, + dtype=int) + else: + # cal group config + config = self.config + group = {} + train_dataiter = iter(train_loader) + for train_step in tqdm(range(1, + len(train_dataiter) + 1), + desc='cal group_config', + position=0, + leave=True, + disable=not comm.is_main_process()): + batch = next(train_dataiter) + group_label = batch['group_label'] + class_label = batch['class_label'] + + for i in range(len(class_label)): + gl = group_label[i].item() + cl = class_label[i].item() + + try: + group[str(gl)] + except: + group[str(gl)] = [] + + if cl not in group[str(gl)]: + group[str(gl)].append(cl) + + classes_per_group = [] + for i in range(len(group)): + classes_per_group.append(max(group[str(i)]) + 1) + + self.num_groups = len(classes_per_group) + self.group_slices = get_group_slices(classes_per_group) + self.group_slices = self.group_slices.cuda() + + def eval_ood(self, + net: nn.Module, + id_data_loader: DataLoader, + ood_data_loaders: Dict[str, Dict[str, DataLoader]], + postprocessor=None, + fsood=False): + net.eval() + if self.group_slices is None or self.num_groups is None: + self.cal_group_slices(id_data_loader['train']) + dataset_name = self.config.dataset.name + + print(f'Performing inference on {dataset_name} dataset...', flush=True) + id_conf = iterate_data(id_data_loader['test'], net, self.group_slices) + # dummy pred and gt + # the accuracy will be handled by self.eval_acc + id_pred = np.zeros_like(id_conf) + id_gt = np.zeros_like(id_conf) + + if fsood: + # load csid data and compute confidence + for dataset_name, csid_dl in ood_data_loaders['csid'].items(): + print(f'Performing inference on {dataset_name} dataset...', + flush=True) + csid_conf = iterate_data(csid_dl, net, self.group_slices) + # dummy pred and gt + # the accuracy will be handled by self.eval_acc + csid_pred = np.zeros_like(csid_conf) + csid_gt = np.zeros_like(csid_conf) + if self.config.recorder.save_scores: + self._save_scores(csid_pred, csid_conf, csid_gt, + dataset_name) + id_pred = np.concatenate([id_pred, csid_pred]) + id_conf = np.concatenate([id_conf, csid_conf]) + id_gt = np.concatenate([id_gt, csid_gt]) + + # load nearood data and compute ood metrics + self._eval_ood(net, [id_pred, id_conf, id_gt], + ood_data_loaders, + ood_split='nearood') + # load farood data and compute ood metrics + self._eval_ood(net, [id_pred, id_conf, id_gt], + ood_data_loaders, + ood_split='farood') + + def _eval_ood(self, + net: nn.Module, + id_list: List[np.ndarray], + ood_data_loaders: Dict[str, Dict[str, DataLoader]], + ood_split: str = 'nearood'): + print(f'Processing {ood_split}...', flush=True) + [id_pred, id_conf, id_gt] = id_list + metrics_list = [] + for dataset_name, ood_dl in ood_data_loaders[ood_split].items(): + print(f'Performing inference on {dataset_name} dataset...', + flush=True) + ood_conf = iterate_data(ood_dl, net, self.group_slices) + ood_gt = -1 * np.ones_like(ood_conf) # hard set to -1 as ood + # dummy pred + ood_pred = np.zeros_like(ood_conf) + if self.config.recorder.save_scores: + self._save_scores(ood_pred, ood_conf, ood_gt, dataset_name) + + pred = np.concatenate([id_pred, ood_pred]) + conf = np.concatenate([id_conf, ood_conf]) + label = np.concatenate([id_gt, ood_gt]) + + print(f'Computing metrics on {dataset_name} dataset...') + + ood_metrics = compute_all_metrics(conf, label, pred) + # the acc here is not reliable + # since we use dummy pred and gt for id samples + # so we use the acc computed by self.eval_acc + ood_metrics[-1] = self.acc + + if self.config.recorder.save_csv: + self._save_csv(ood_metrics, dataset_name=dataset_name) + metrics_list.append(ood_metrics) + + print('Computing mean metrics...', flush=True) + metrics_list = np.array(metrics_list) + metrics_mean = np.mean(metrics_list, axis=0) + if self.config.recorder.save_csv: + self._save_csv(metrics_mean, dataset_name=ood_split) + + def _save_csv(self, metrics, dataset_name): + [fpr, auroc, aupr_in, aupr_out, accuracy] = metrics + + write_content = { + 'dataset': dataset_name, + 'FPR@95': '{:.2f}'.format(100 * fpr), + 'AUROC': '{:.2f}'.format(100 * auroc), + 'AUPR_IN': '{:.2f}'.format(100 * aupr_in), + 'AUPR_OUT': '{:.2f}'.format(100 * aupr_out), + 'ACC': '{:.2f}'.format(100 * accuracy) + } + + fieldnames = list(write_content.keys()) + + # print ood metric results + print('FPR@95: {:.2f}, AUROC: {:.2f}'.format(100 * fpr, 100 * auroc), + end=' ', + flush=True) + print('AUPR_IN: {:.2f}, AUPR_OUT: {:.2f}'.format( + 100 * aupr_in, 100 * aupr_out), + flush=True) + print('ACC: {:.2f}'.format(accuracy * 100), flush=True) + print(u'\u2500' * 70, flush=True) + + csv_path = os.path.join(self.config.output_dir, 'ood.csv') + if not os.path.exists(csv_path): + with open(csv_path, 'w', newline='') as csvfile: + writer = csv.DictWriter(csvfile, fieldnames=fieldnames) + writer.writeheader() + writer.writerow(write_content) + else: + with open(csv_path, 'a', newline='') as csvfile: + writer = csv.DictWriter(csvfile, fieldnames=fieldnames) + writer.writerow(write_content) + + def _save_scores(self, pred, conf, gt, save_name): + save_dir = os.path.join(self.config.output_dir, 'scores') + os.makedirs(save_dir, exist_ok=True) + np.savez(os.path.join(save_dir, save_name), + pred=pred, + conf=conf, + label=gt) + + def eval_acc(self, + net: nn.Module, + data_loader: DataLoader, + postprocessor: BasePostprocessor = None, + epoch_idx: int = -1, + num_groups: int = None, + group_slices: torch.Tensor = None, + fsood: bool = False, + csid_data_loaders: DataLoader = None): + net.eval() + if num_groups is None or group_slices is None: + self.cal_group_slices(data_loader) + else: + self.num_groups = num_groups + self.group_slices = group_slices.cuda() + + loss, top1 = run_eval_acc(net, data_loader, self.group_slices, + self.num_groups) + + if fsood: + assert csid_data_loaders is not None + for dataset_name, csid_dl in csid_data_loaders.items(): + _, temp = run_eval_acc(net, csid_dl, self.group_slices, + self.num_groups) + top1.extend(temp) + + metrics = {} + metrics['acc'] = np.mean(top1) + metrics['epoch_idx'] = epoch_idx + metrics['loss'] = np.mean(loss) + self.acc = metrics['acc'] + + return metrics + + def report(self, test_metrics): + print('Completed!', flush=True) diff --git a/OpenOOD/openood/evaluators/ood_evaluator.py b/OpenOOD/openood/evaluators/ood_evaluator.py new file mode 100644 index 0000000000000000000000000000000000000000..67991a208b0540c74451181d70eb2758afb937a0 --- /dev/null +++ b/OpenOOD/openood/evaluators/ood_evaluator.py @@ -0,0 +1,289 @@ +import csv +import os +from typing import Dict, List + +import numpy as np +import torch.nn as nn +from torch.utils.data import DataLoader + +from openood.postprocessors import BasePostprocessor +from openood.utils import Config + +from .base_evaluator import BaseEvaluator +from .metrics import compute_all_metrics + + +class OODEvaluator(BaseEvaluator): + def __init__(self, config: Config): + """OOD Evaluator. + + Args: + config (Config): Config file from + """ + super(OODEvaluator, self).__init__(config) + self.id_pred = None + self.id_conf = None + self.id_gt = None + + def eval_ood(self, + net: nn.Module, + id_data_loaders: Dict[str, DataLoader], + ood_data_loaders: Dict[str, Dict[str, DataLoader]], + postprocessor: BasePostprocessor, + fsood: bool = False): + if type(net) is dict: + for subnet in net.values(): + subnet.eval() + else: + net.eval() + assert 'test' in id_data_loaders, \ + 'id_data_loaders should have the key: test!' + dataset_name = self.config.dataset.name + + if self.config.postprocessor.APS_mode: + assert 'val' in id_data_loaders + assert 'val' in ood_data_loaders + self.hyperparam_search(net, id_data_loaders['val'], + ood_data_loaders['val'], postprocessor) + + print(f'Performing inference on {dataset_name} dataset...', flush=True) + id_pred, id_conf, id_gt = postprocessor.inference( + net, id_data_loaders['test']) + if self.config.recorder.save_scores: + self._save_scores(id_pred, id_conf, id_gt, dataset_name) + + if fsood: + # load csid data and compute confidence + for dataset_name, csid_dl in ood_data_loaders['csid'].items(): + print(f'Performing inference on {dataset_name} dataset...', + flush=True) + csid_pred, csid_conf, csid_gt = postprocessor.inference( + net, csid_dl) + if self.config.recorder.save_scores: + self._save_scores(csid_pred, csid_conf, csid_gt, + dataset_name) + id_pred = np.concatenate([id_pred, csid_pred]) + id_conf = np.concatenate([id_conf, csid_conf]) + id_gt = np.concatenate([id_gt, csid_gt]) + + # load nearood data and compute ood metrics + print(u'\u2500' * 70, flush=True) + self._eval_ood(net, [id_pred, id_conf, id_gt], + ood_data_loaders, + postprocessor, + ood_split='nearood') + + # load farood data and compute ood metrics + print(u'\u2500' * 70, flush=True) + self._eval_ood(net, [id_pred, id_conf, id_gt], + ood_data_loaders, + postprocessor, + ood_split='midood') + + # load farood data and compute ood metrics + print(u'\u2500' * 70, flush=True) + self._eval_ood(net, [id_pred, id_conf, id_gt], + ood_data_loaders, + postprocessor, + ood_split='farood') + + def _eval_ood(self, + net: nn.Module, + id_list: List[np.ndarray], + ood_data_loaders: Dict[str, Dict[str, DataLoader]], + postprocessor: BasePostprocessor, + ood_split: str = 'nearood'): + print(f'Processing {ood_split}...', flush=True) + [id_pred, id_conf, id_gt] = id_list + metrics_list = [] + for dataset_name, ood_dl in ood_data_loaders[ood_split].items(): + print(f'Performing inference on {dataset_name} dataset...', + flush=True) + ood_pred, ood_conf, ood_gt = postprocessor.inference(net, ood_dl) + ood_gt = -1 * np.ones_like(ood_gt) # hard set to -1 as ood + if self.config.recorder.save_scores: + self._save_scores(ood_pred, ood_conf, ood_gt, dataset_name) + + pred = np.concatenate([id_pred, ood_pred]) + conf = np.concatenate([id_conf, ood_conf]) + label = np.concatenate([id_gt, ood_gt]) + + print(f'Computing metrics on {dataset_name} dataset...') + + ood_metrics = compute_all_metrics(conf, label, pred) + if self.config.recorder.save_csv: + self._save_csv(ood_metrics, dataset_name=dataset_name) + metrics_list.append(ood_metrics) + + print('Computing mean metrics...', flush=True) + metrics_list = np.array(metrics_list) + metrics_mean = np.mean(metrics_list, axis=0) + if self.config.recorder.save_csv: + self._save_csv(metrics_mean, dataset_name=ood_split) + + def eval_ood_val(self, net: nn.Module, id_data_loaders: Dict[str, + DataLoader], + ood_data_loaders: Dict[str, DataLoader], + postprocessor: BasePostprocessor): + if type(net) is dict: + for subnet in net.values(): + subnet.eval() + else: + net.eval() + assert 'val' in id_data_loaders + assert 'val' in ood_data_loaders + if self.config.postprocessor.APS_mode: + val_auroc = self.hyperparam_search(net, id_data_loaders['val'], + ood_data_loaders['val'], + postprocessor) + else: + id_pred, id_conf, id_gt = postprocessor.inference( + net, id_data_loaders['val']) + ood_pred, ood_conf, ood_gt = postprocessor.inference( + net, ood_data_loaders['val']) + ood_gt = -1 * np.ones_like(ood_gt) # hard set to -1 as ood + pred = np.concatenate([id_pred, ood_pred]) + conf = np.concatenate([id_conf, ood_conf]) + label = np.concatenate([id_gt, ood_gt]) + ood_metrics = compute_all_metrics(conf, label, pred) + val_auroc = ood_metrics[1] + return {'auroc': 100 * val_auroc} + + def _save_csv(self, metrics, dataset_name): + [fpr, auroc, aupr_in, aupr_out, accuracy] = metrics + + write_content = { + 'dataset': dataset_name, + 'FPR@95': '{:.2f}'.format(100 * fpr), + 'AUROC': '{:.2f}'.format(100 * auroc), + 'AUPR_IN': '{:.2f}'.format(100 * aupr_in), + 'AUPR_OUT': '{:.2f}'.format(100 * aupr_out), + 'ACC': '{:.2f}'.format(100 * accuracy) + } + + fieldnames = list(write_content.keys()) + + # print ood metric results + print('FPR@95: {:.2f}, AUROC: {:.2f}'.format(100 * fpr, 100 * auroc), + end=' ', + flush=True) + print('AUPR_IN: {:.2f}, AUPR_OUT: {:.2f}'.format( + 100 * aupr_in, 100 * aupr_out), + flush=True) + print('ACC: {:.2f}'.format(accuracy * 100), flush=True) + print(u'\u2500' * 70, flush=True) + + csv_path = os.path.join(self.config.output_dir, 'ood.csv') + if not os.path.exists(csv_path): + with open(csv_path, 'w', newline='') as csvfile: + writer = csv.DictWriter(csvfile, fieldnames=fieldnames) + writer.writeheader() + writer.writerow(write_content) + else: + with open(csv_path, 'a', newline='') as csvfile: + writer = csv.DictWriter(csvfile, fieldnames=fieldnames) + writer.writerow(write_content) + + def _save_scores(self, pred, conf, gt, save_name): + save_dir = os.path.join(self.config.output_dir, 'scores') + os.makedirs(save_dir, exist_ok=True) + np.savez(os.path.join(save_dir, save_name), + pred=pred, + conf=conf, + label=gt) + + def eval_acc(self, + net: nn.Module, + data_loader: DataLoader, + postprocessor: BasePostprocessor = None, + epoch_idx: int = -1, + fsood: bool = False, + csid_data_loaders: DataLoader = None): + """Returns the accuracy score of the labels and predictions. + + :return: float + """ + if type(net) is dict: + net['backbone'].eval() + else: + net.eval() + self.id_pred, self.id_conf, self.id_gt = postprocessor.inference( + net, data_loader) + + if fsood: + assert csid_data_loaders is not None + for dataset_name, csid_dl in csid_data_loaders.items(): + csid_pred, csid_conf, csid_gt = postprocessor.inference( + net, csid_dl) + self.id_pred = np.concatenate([self.id_pred, csid_pred]) + self.id_conf = np.concatenate([self.id_conf, csid_conf]) + self.id_gt = np.concatenate([self.id_gt, csid_gt]) + + metrics = {} + metrics['acc'] = sum(self.id_pred == self.id_gt) / len(self.id_pred) + metrics['epoch_idx'] = epoch_idx + return metrics + + def report(self, test_metrics): + print('Completed!', flush=True) + + def hyperparam_search( + self, + net: nn.Module, + id_data_loader, + ood_data_loader, + postprocessor: BasePostprocessor, + ): + print('Starting automatic parameter search...') + aps_dict = {} + max_auroc = 0 + hyperparam_names = [] + hyperparam_list = [] + count = 0 + for name in postprocessor.args_dict.keys(): + hyperparam_names.append(name) + count += 1 + for name in hyperparam_names: + hyperparam_list.append(postprocessor.args_dict[name]) + hyperparam_combination = self.recursive_generator( + hyperparam_list, count) + for hyperparam in hyperparam_combination: + postprocessor.set_hyperparam(hyperparam) + id_pred, id_conf, id_gt = postprocessor.inference( + net, id_data_loader) + ood_pred, ood_conf, ood_gt = postprocessor.inference( + net, ood_data_loader) + ood_gt = -1 * np.ones_like(ood_gt) # hard set to -1 as ood + pred = np.concatenate([id_pred, ood_pred]) + conf = np.concatenate([id_conf, ood_conf]) + label = np.concatenate([id_gt, ood_gt]) + ood_metrics = compute_all_metrics(conf, label, pred) + index = hyperparam_combination.index(hyperparam) + aps_dict[index] = ood_metrics[1] + print('Hyperparam:{}, auroc:{}'.format(hyperparam, + aps_dict[index])) + if ood_metrics[1] > max_auroc: + max_auroc = ood_metrics[1] + for key in aps_dict.keys(): + if aps_dict[key] == max_auroc: + postprocessor.set_hyperparam(hyperparam_combination[key]) + print('Final hyperparam: {}'.format(postprocessor.get_hyperparam())) + return max_auroc + + def recursive_generator(self, list, n): + if n == 1: + results = [] + for x in list[0]: + k = [] + k.append(x) + results.append(k) + return results + else: + results = [] + temp = self.recursive_generator(list, n - 1) + for x in list[n - 1]: + for y in temp: + k = y.copy() + k.append(x) + results.append(k) + return results diff --git a/OpenOOD/openood/evaluators/osr_evaluator.py b/OpenOOD/openood/evaluators/osr_evaluator.py new file mode 100644 index 0000000000000000000000000000000000000000..897281d6f942cba0d98b3c843172a27fdca57e80 --- /dev/null +++ b/OpenOOD/openood/evaluators/osr_evaluator.py @@ -0,0 +1,39 @@ +from typing import Dict + +import torch.nn as nn +from torch.utils.data import DataLoader + +from openood.postprocessors import BasePostprocessor +from openood.utils import Config + +from .ood_evaluator import OODEvaluator + + +class OSREvaluator(OODEvaluator): + def __init__(self, config: Config): + super(OSREvaluator, self).__init__(config) + + def eval_ood(self, net: nn.Module, id_data_loader: DataLoader, + ood_data_loaders: Dict[str, Dict[str, DataLoader]], + postprocessor: BasePostprocessor): + if type(net) is dict: + for subnet in net.values(): + subnet.eval() + else: + net.eval() + + # load training in-distribution data + assert 'test' in id_data_loader, \ + 'id_data_loaders should have the key: test!' + dataset_name = self.config.dataset.name + print(f'Performing inference on {dataset_name} dataset...', flush=True) + id_pred, id_conf, id_gt = postprocessor.inference( + net, id_data_loader['test']) + if self.config.recorder.save_scores: + self._save_scores(id_pred, id_conf, id_gt, dataset_name) + + # load nearood data and compute ood metrics + self._eval_ood(net, [id_pred, id_conf, id_gt], + ood_data_loaders, + postprocessor, + ood_split='osr') diff --git a/OpenOOD/openood/evaluators/patchcore_evaluator.py b/OpenOOD/openood/evaluators/patchcore_evaluator.py new file mode 100644 index 0000000000000000000000000000000000000000..ca0c087de02233f3f634baf53d3e78084a5fd286 --- /dev/null +++ b/OpenOOD/openood/evaluators/patchcore_evaluator.py @@ -0,0 +1,111 @@ +import os +from typing import Dict + +import cv2 +import numpy as np +import torch +import torch.nn as nn +from PIL import Image +from scipy.ndimage import gaussian_filter +from sklearn.metrics import roc_auc_score +from torch.utils.data import DataLoader +from torchvision import transforms + +from openood.postprocessors import BasePostprocessor +from openood.utils import Config + +from .base_evaluator import BaseEvaluator + + +class PatchCoreEvaluator(BaseEvaluator): + def __init__(self, config: Config): + super(PatchCoreEvaluator, self).__init__(config) + self.config = config + + def eval_ood(self, net: nn.Module, id_data_loader: DataLoader, + ood_data_loaders: Dict[str, Dict[str, DataLoader]], + postprocessor: BasePostprocessor): + net.eval() + + dataset_name = self.config.dataset.name + print(f'Performing inference on {dataset_name} dataset...', flush=True) + id_pred, id_conf, id_gt = postprocessor.inference( + net, ood_data_loaders['val']) # not good + good_pred, good_conf, good_gt = postprocessor.inference( + net, id_data_loader['test']) # good + + # pred = np.concatenate([id_pred, good_pred]) + conf = np.concatenate([id_conf, good_conf]) + gt = np.concatenate([id_gt, good_gt]) + + self.gt_transform = transforms.Compose([ + transforms.Resize((256, 256)), + transforms.ToTensor(), + transforms.CenterCrop(224) + ]) + mean_train = [0.485, 0.456, 0.406] + std_train = [0.229, 0.224, 0.225] + self.transform = transforms.Compose([ + transforms.Resize((256, 256), Image.ANTIALIAS), + transforms.ToTensor(), + transforms.CenterCrop(224), + transforms.Normalize(mean=mean_train, std=std_train) + ]) + count = 0 + self.gt_list_px_lvl = [] + + for batch in id_data_loader['trainGT']: + #data = batch['data'].cuda() + data = [] + label = batch['label'].cuda() + name = batch['image_name'] + for i in name: + path = os.path.join('./data/images/', i) + gt_img = Image.open(path) + gt_img = self.gt_transform(gt_img) + gt_img = torch.unsqueeze(gt_img, 0) + + # gt_img = self.gt_transform(gt_img) + gt_np = gt_img.cpu().numpy()[0, 0].astype(int) + self.gt_list_px_lvl.extend(gt_np.ravel()) + + self.pred_list_px_lvl = [] + self.pred_list_img_lvl = [] + + for patchscore in conf: + + anomaly_map = patchscore[:, 0].reshape((28, 28)) + N_b = patchscore[np.argmax(patchscore[:, 0])] + w = (1 - (np.max(np.exp(N_b)) / np.sum(np.exp(N_b)))) + score = w * max(patchscore[:, 0]) # Image-level score + + anomaly_map_resized = cv2.resize(anomaly_map, (224, 224)) + anomaly_map_resized_blur = gaussian_filter(anomaly_map_resized, + sigma=4) + self.pred_list_px_lvl.extend(anomaly_map_resized_blur.ravel()) + self.pred_list_img_lvl.append(score) + + print('Total image-level auc-roc score :') + img_auc = roc_auc_score(gt, self.pred_list_img_lvl) + print(img_auc) + + if (test_pix): + print('Total pixel-level auc-roc score :') + pixel_auc = roc_auc_score(self.gt_list_px_lvl, + self.pred_list_px_lvl) + print(pixel_auc) + + def eval_acc(self, + net: nn.Module, + data_loader: DataLoader, + postprocessor: BasePostprocessor = None, + epoch_idx: int = -1): + net.eval() + id_pred, _, id_gt = postprocessor.inference(net, data_loader) + metrics = {} + metrics['acc'] = sum(id_pred == id_gt) / len(id_pred) + metrics['epoch_idx'] = epoch_idx + return metrics + + def report(self, test_metrics): + print('Completed!', flush=True) diff --git a/OpenOOD/openood/evaluators/utils.py b/OpenOOD/openood/evaluators/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..02e9d74186c306da820e45620511fa3963d63755 --- /dev/null +++ b/OpenOOD/openood/evaluators/utils.py @@ -0,0 +1,26 @@ +from openood.evaluators.mos_evaluator import MOSEvaluator +from openood.utils import Config + +from .ad_evaluator import ADEvaluator +from .arpl_evaluator import ARPLEvaluator +from .base_evaluator import BaseEvaluator +from .ece_evaluator import ECEEvaluator +from .fsood_evaluator import FSOODEvaluator +from .ood_evaluator import OODEvaluator +from .osr_evaluator import OSREvaluator +from .patchcore_evaluator import PatchCoreEvaluator + + +def get_evaluator(config: Config): + evaluators = { + 'base': BaseEvaluator, + 'ood': OODEvaluator, + 'fsood': FSOODEvaluator, + 'patch': PatchCoreEvaluator, + 'arpl': ARPLEvaluator, + 'ad': ADEvaluator, + 'mos': MOSEvaluator, + 'ece': ECEEvaluator, + 'osr': OSREvaluator + } + return evaluators[config.evaluator.name](config) diff --git a/OpenOOD/openood/losses/__init__.py b/OpenOOD/openood/losses/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..7d18808fb7ffe906850c8c921af8ba50d22798b0 --- /dev/null +++ b/OpenOOD/openood/losses/__init__.py @@ -0,0 +1,3 @@ +from .draem_loss import get_draem_losses +from .reweight import rew_ce, rew_sce +from .sce import soft_cross_entropy diff --git a/OpenOOD/openood/losses/__pycache__/__init__.cpython-311.pyc b/OpenOOD/openood/losses/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..39538f798af480cacb0db5ecb6bc33cda036a74e Binary files /dev/null and b/OpenOOD/openood/losses/__pycache__/__init__.cpython-311.pyc differ diff --git a/OpenOOD/openood/losses/__pycache__/__init__.cpython-37.pyc b/OpenOOD/openood/losses/__pycache__/__init__.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aa9523079d309add1e324d2f0f58f13d6b16312f Binary files /dev/null and b/OpenOOD/openood/losses/__pycache__/__init__.cpython-37.pyc differ diff --git a/OpenOOD/openood/losses/__pycache__/draem_loss.cpython-311.pyc b/OpenOOD/openood/losses/__pycache__/draem_loss.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a9b8ef58541967d79d0b95c8d23c6a0544bc0679 Binary files /dev/null and b/OpenOOD/openood/losses/__pycache__/draem_loss.cpython-311.pyc differ diff --git a/OpenOOD/openood/losses/__pycache__/draem_loss.cpython-37.pyc b/OpenOOD/openood/losses/__pycache__/draem_loss.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4ceed98fed1898211c5bbfcb8bc492a0cc8c5a9d Binary files /dev/null and b/OpenOOD/openood/losses/__pycache__/draem_loss.cpython-37.pyc differ diff --git a/OpenOOD/openood/losses/__pycache__/focal.cpython-311.pyc b/OpenOOD/openood/losses/__pycache__/focal.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a190d9607ffa33421c1efaa97d722fc6858c63de Binary files /dev/null and b/OpenOOD/openood/losses/__pycache__/focal.cpython-311.pyc differ diff --git a/OpenOOD/openood/losses/__pycache__/focal.cpython-37.pyc b/OpenOOD/openood/losses/__pycache__/focal.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8c2c081f83e29c5ffef7c9dcca764dfd36814e86 Binary files /dev/null and b/OpenOOD/openood/losses/__pycache__/focal.cpython-37.pyc differ diff --git a/OpenOOD/openood/losses/__pycache__/kdad_losses.cpython-311.pyc b/OpenOOD/openood/losses/__pycache__/kdad_losses.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bbf6b56d1dc08d24a78559a90ff90d9071d5d77e Binary files /dev/null and b/OpenOOD/openood/losses/__pycache__/kdad_losses.cpython-311.pyc differ diff --git a/OpenOOD/openood/losses/__pycache__/kdad_losses.cpython-37.pyc b/OpenOOD/openood/losses/__pycache__/kdad_losses.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6dd1368e3666128578437979e386c22a312edcb4 Binary files /dev/null and b/OpenOOD/openood/losses/__pycache__/kdad_losses.cpython-37.pyc differ diff --git a/OpenOOD/openood/losses/__pycache__/rd4ad_loss.cpython-311.pyc b/OpenOOD/openood/losses/__pycache__/rd4ad_loss.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..81db38854c2d86aa5c48d8f2183b69c95997a4a0 Binary files /dev/null and b/OpenOOD/openood/losses/__pycache__/rd4ad_loss.cpython-311.pyc differ diff --git a/OpenOOD/openood/losses/__pycache__/rd4ad_loss.cpython-37.pyc b/OpenOOD/openood/losses/__pycache__/rd4ad_loss.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7bc25d8979d073982b9c20015ce5d9a357b467ab Binary files /dev/null and b/OpenOOD/openood/losses/__pycache__/rd4ad_loss.cpython-37.pyc differ diff --git a/OpenOOD/openood/losses/__pycache__/reweight.cpython-311.pyc b/OpenOOD/openood/losses/__pycache__/reweight.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3623b7e5964b2cca0d7b5704992fc269afe3ce4d Binary files /dev/null and b/OpenOOD/openood/losses/__pycache__/reweight.cpython-311.pyc differ diff --git a/OpenOOD/openood/losses/__pycache__/reweight.cpython-37.pyc b/OpenOOD/openood/losses/__pycache__/reweight.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f854ce211b4a92ceb5ce5aa76644485aa48a86eb Binary files /dev/null and b/OpenOOD/openood/losses/__pycache__/reweight.cpython-37.pyc differ diff --git a/OpenOOD/openood/losses/__pycache__/sce.cpython-311.pyc b/OpenOOD/openood/losses/__pycache__/sce.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4cbb91e80be575ffca2d62778e309fe38cae3174 Binary files /dev/null and b/OpenOOD/openood/losses/__pycache__/sce.cpython-311.pyc differ diff --git a/OpenOOD/openood/losses/__pycache__/sce.cpython-37.pyc b/OpenOOD/openood/losses/__pycache__/sce.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..beaeab7c69547fde2485a1a173fb1105f7cb29f0 Binary files /dev/null and b/OpenOOD/openood/losses/__pycache__/sce.cpython-37.pyc differ diff --git a/OpenOOD/openood/losses/__pycache__/ssim.cpython-311.pyc b/OpenOOD/openood/losses/__pycache__/ssim.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b30e7ee8d8a6c696de0aa484dfcd70627c6e6953 Binary files /dev/null and b/OpenOOD/openood/losses/__pycache__/ssim.cpython-311.pyc differ diff --git a/OpenOOD/openood/losses/__pycache__/ssim.cpython-37.pyc b/OpenOOD/openood/losses/__pycache__/ssim.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2dfcbcd0388d3fa7b7bed4b7de24030b1d8a67cd Binary files /dev/null and b/OpenOOD/openood/losses/__pycache__/ssim.cpython-37.pyc differ diff --git a/OpenOOD/openood/losses/draem_loss.py b/OpenOOD/openood/losses/draem_loss.py new file mode 100644 index 0000000000000000000000000000000000000000..06b07bf9207a8ec6214c931d9ed7247ca8cfee30 --- /dev/null +++ b/OpenOOD/openood/losses/draem_loss.py @@ -0,0 +1,13 @@ +import torch + +from .focal import FocalLoss +from .ssim import SSIM + + +def get_draem_losses(): + losses = { + 'l2': torch.nn.modules.loss.MSELoss(), + 'ssim': SSIM(), + 'focal': FocalLoss() + } + return losses diff --git a/OpenOOD/openood/losses/focal.py b/OpenOOD/openood/losses/focal.py new file mode 100644 index 0000000000000000000000000000000000000000..efe2a51220f2451969fb4e2bea5954dd0ed7c6ad --- /dev/null +++ b/OpenOOD/openood/losses/focal.py @@ -0,0 +1,79 @@ +import numpy as np +import torch +import torch.nn as nn + + +class FocalLoss(nn.Module): + def __init__(self, + apply_nonlin=None, + alpha=None, + gamma=2, + balance_index=0, + smooth=1e-5, + size_average=True): + super(FocalLoss, self).__init__() + self.apply_nonlin = apply_nonlin + self.alpha = alpha + self.gamma = gamma + self.balance_index = balance_index + self.smooth = smooth + self.size_average = size_average + + if self.smooth is not None: + if self.smooth < 0 or self.smooth > 1.0: + raise ValueError('smooth value should be in [0,1]') + + def forward(self, logit, target): + if self.apply_nonlin is not None: + logit = self.apply_nonlin(logit) + num_class = logit.shape[1] + + if logit.dim() > 2: + # N,C,d1,d2 -> N,C,m (m=d1*d2*...) + logit = logit.view(logit.size(0), logit.size(1), -1) + logit = logit.permute(0, 2, 1).contiguous() + logit = logit.view(-1, logit.size(-1)) + target = torch.squeeze(target, 1) + target = target.view(-1, 1) + alpha = self.alpha + + if alpha is None: + alpha = torch.ones(num_class, 1) + elif isinstance(alpha, (list, np.ndarray)): + assert len(alpha) == num_class + alpha = torch.FloatTensor(alpha).view(num_class, 1) + alpha = alpha / alpha.sum() + elif isinstance(alpha, float): + alpha = torch.ones(num_class, 1) + alpha = alpha * (1 - self.alpha) + alpha[self.balance_index] = self.alpha + + else: + raise TypeError('Not support alpha type') + + if alpha.device != logit.device: + alpha = alpha.to(logit.device) + + idx = target.cpu().long() + + one_hot_key = torch.FloatTensor(target.size(0), num_class).zero_() + one_hot_key = one_hot_key.scatter_(1, idx, 1) + if one_hot_key.device != logit.device: + one_hot_key = one_hot_key.to(logit.device) + + if self.smooth: + one_hot_key = torch.clamp(one_hot_key, + self.smooth / (num_class - 1), + 1.0 - self.smooth) + pt = (one_hot_key * logit).sum(1) + self.smooth + logpt = pt.log() + + gamma = self.gamma + + alpha = alpha[idx] + alpha = torch.squeeze(alpha) + loss = -1 * alpha * torch.pow((1 - pt), gamma) * logpt + + if self.size_average: + loss = loss.mean() + return loss diff --git a/OpenOOD/openood/losses/kdad_losses.py b/OpenOOD/openood/losses/kdad_losses.py new file mode 100644 index 0000000000000000000000000000000000000000..407460d58a3b9c13a0553d0538d56e73c74443e6 --- /dev/null +++ b/OpenOOD/openood/losses/kdad_losses.py @@ -0,0 +1,63 @@ +import torch +from torch import nn + + +class MseDirectionLoss(nn.Module): + """Define MSE + Direction loss.""" + def __init__(self, lamda): + super(MseDirectionLoss, self).__init__() + self.lamda = lamda + self.criterion = nn.MSELoss() + self.similarity_loss = torch.nn.CosineSimilarity() + + def forward(self, output_pred, output_real): + y_pred_0, y_pred_1, y_pred_2, y_pred_3 = output_pred[3], output_pred[ + 6], output_pred[9], output_pred[12] + y_0, y_1, y_2, y_3 = output_real[3], output_real[6], output_real[ + 9], output_real[12] + + # different terms of loss + abs_loss_0 = self.criterion(y_pred_0, y_0) + loss_0 = torch.mean(1 - self.similarity_loss( + y_pred_0.view(y_pred_0.shape[0], -1), y_0.view(y_0.shape[0], -1))) + abs_loss_1 = self.criterion(y_pred_1, y_1) + loss_1 = torch.mean(1 - self.similarity_loss( + y_pred_1.view(y_pred_1.shape[0], -1), y_1.view(y_1.shape[0], -1))) + abs_loss_2 = self.criterion(y_pred_2, y_2) + loss_2 = torch.mean(1 - self.similarity_loss( + y_pred_2.view(y_pred_2.shape[0], -1), y_2.view(y_2.shape[0], -1))) + abs_loss_3 = self.criterion(y_pred_3, y_3) + loss_3 = torch.mean(1 - self.similarity_loss( + y_pred_3.view(y_pred_3.shape[0], -1), y_3.view(y_3.shape[0], -1))) + + total_loss = loss_0 + loss_1 + loss_2 + loss_3 + self.lamda * ( + abs_loss_0 + abs_loss_1 + abs_loss_2 + abs_loss_3) + + return total_loss + + +class DirectionOnlyLoss(nn.Module): + """Define Direction loss.""" + def __init__(self): + super(DirectionOnlyLoss, self).__init__() + self.similarity_loss = torch.nn.CosineSimilarity() + + def forward(self, output_pred, output_real): + y_pred_0, y_pred_1, y_pred_2, y_pred_3 = output_pred[3], output_pred[ + 6], output_pred[9], output_pred[12] + y_0, y_1, y_2, y_3 = output_real[3], output_real[6], output_real[ + 9], output_real[12] + + # different terms of loss + loss_0 = torch.mean(1 - self.similarity_loss( + y_pred_0.view(y_pred_0.shape[0], -1), y_0.view(y_0.shape[0], -1))) + loss_1 = torch.mean(1 - self.similarity_loss( + y_pred_1.view(y_pred_1.shape[0], -1), y_1.view(y_1.shape[0], -1))) + loss_2 = torch.mean(1 - self.similarity_loss( + y_pred_2.view(y_pred_2.shape[0], -1), y_2.view(y_2.shape[0], -1))) + loss_3 = torch.mean(1 - self.similarity_loss( + y_pred_3.view(y_pred_3.shape[0], -1), y_3.view(y_3.shape[0], -1))) + + total_loss = loss_0 + loss_1 + loss_2 + loss_3 + + return total_loss diff --git a/OpenOOD/openood/losses/rd4ad_loss.py b/OpenOOD/openood/losses/rd4ad_loss.py new file mode 100644 index 0000000000000000000000000000000000000000..1a68083e8e9168b5e32c32f413691bdd473c7bd2 --- /dev/null +++ b/OpenOOD/openood/losses/rd4ad_loss.py @@ -0,0 +1,11 @@ +import torch +from torch import nn + +def loss_function(a, b): + + cos_loss = torch.nn.CosineSimilarity() + loss = 0 + for item in range(len(a)): + loss += torch.mean(1-cos_loss(a[item].view(a[item].shape[0],-1), + b[item].view(b[item].shape[0],-1))) + return loss \ No newline at end of file diff --git a/OpenOOD/openood/losses/reweight.py b/OpenOOD/openood/losses/reweight.py new file mode 100644 index 0000000000000000000000000000000000000000..07092d89d90a8992f5795d6a40731e975bfac329 --- /dev/null +++ b/OpenOOD/openood/losses/reweight.py @@ -0,0 +1,14 @@ +import torch +import torch.nn.functional as F + +from .sce import soft_cross_entropy + + +def rew_ce(logits, labels, sample_weights): + losses = F.cross_entropy(logits, labels, reduction='none') + return (losses * sample_weights.type_as(losses)).mean() + + +def rew_sce(logits, soft_labels, sample_weights): + losses = soft_cross_entropy(logits, soft_labels, reduce=False) + return torch.mean(losses * sample_weights.type_as(losses)) diff --git a/OpenOOD/openood/losses/sce.py b/OpenOOD/openood/losses/sce.py new file mode 100644 index 0000000000000000000000000000000000000000..2db7c3e4c3d33937860f918b94447333af2f0187 --- /dev/null +++ b/OpenOOD/openood/losses/sce.py @@ -0,0 +1,88 @@ +import torch + + +class SoftCrossEntropyFunction(torch.autograd.Function): + @staticmethod + def forward(ctx, logit, label, weight=None): + assert logit.size() == label.size(), 'logit.size() != label.size()' + dim = logit.dim() + max_logit = logit.max(dim - 1, keepdim=True)[0] + logit = logit - max_logit + exp_logit = logit.exp() + exp_sum = exp_logit.sum(dim - 1, keepdim=True) + prob = exp_logit / exp_sum + log_exp_sum = exp_sum.log() + neg_log_prob = log_exp_sum - logit + + if weight is None: + weighted_label = label + else: + if weight.size() != (logit.size(-1), ): + raise ValueError( + 'since logit.size() = {}, '\ + 'weight.size() should be ({},), but got {}' + .format( + logit.size(), + logit.size(-1), + weight.size(), + )) + size = [1] * label.dim() + size[-1] = label.size(-1) + weighted_label = label * weight.view(size) + ctx.save_for_backward(weighted_label, prob) + out = (neg_log_prob * weighted_label).sum(dim - 1) + return out + + @staticmethod + def backward(ctx, grad_output): + weighted_label, prob = ctx.saved_tensors + old_size = weighted_label.size() + # num_classes + K = old_size[-1] + # batch_size + B = weighted_label.numel() // K + + grad_output = grad_output.view(B, 1) + weighted_label = weighted_label.view(B, K) + prob = prob.view(B, K) + grad_input = grad_output * (prob * weighted_label.sum(1, True) - + weighted_label) + grad_input = grad_input.view(old_size) + return grad_input, None, None + + +def soft_cross_entropy(logit, + label, + weight=None, + reduce=None, + reduction='mean'): + if weight is not None and weight.requires_grad: + raise RuntimeError('gradient for weight is not supported') + losses = SoftCrossEntropyFunction.apply(logit, label, weight) + reduction = { + True: 'mean', + False: 'none', + None: reduction, + }[reduce] + if reduction == 'mean': + return losses.mean() + elif reduction == 'sum': + return losses.sum() + elif reduction == 'none': + return losses + else: + raise ValueError('invalid value for reduction: {}'.format(reduction)) + + +class SoftCrossEntropyLoss(torch.nn.Module): + def __init__(self, weight=None, reduce=None, reduction='mean'): + super(SoftCrossEntropyLoss, self).__init__() + self.weight = weight + self.reduce = reduce + self.reduction = reduction + + def forward(self, logit, label, weight=None): + if weight is None: + weight = self.weight + return soft_cross_entropy(logit, label, weight, self.reduce, + self.reduction) diff --git a/OpenOOD/openood/losses/ssim.py b/OpenOOD/openood/losses/ssim.py new file mode 100644 index 0000000000000000000000000000000000000000..a6559f821baa2fe1fe56d0045987230af6a7ecd5 --- /dev/null +++ b/OpenOOD/openood/losses/ssim.py @@ -0,0 +1,112 @@ +from math import exp + +import torch +import torch.nn.functional as F + + +def gaussian(window_size, sigma): + gauss = torch.Tensor([ + exp(-(x - window_size // 2)**2 / float(2 * sigma**2)) + for x in range(window_size) + ]) + return gauss / gauss.sum() + + +def create_window(window_size, channel=1): + _1D_window = gaussian(window_size, 1.5).unsqueeze(1) + _2D_window = _1D_window.mm( + _1D_window.t()).float().unsqueeze(0).unsqueeze(0) + window = _2D_window.expand(channel, 1, window_size, + window_size).contiguous() + return window + + +def ssim(img1, + img2, + window_size=11, + window=None, + size_average=True, + full=False, + val_range=None): + if val_range is None: + if torch.max(img1) > 128: + max_val = 255 + else: + max_val = 1 + + if torch.min(img1) < -0.5: + min_val = -1 + else: + min_val = 0 + val_range = max_val - min_val + # else: + # l = val_range + + padd = window_size // 2 + (_, channel, height, width) = img1.size() + if window is None: + real_size = min(window_size, height, width) + window = create_window(real_size, channel=channel).to(img1.device) + + mu1 = F.conv2d(img1, window, padding=padd, groups=channel) + mu2 = F.conv2d(img2, window, padding=padd, groups=channel) + + mu1_sq = mu1.pow(2) + mu2_sq = mu2.pow(2) + mu1_mu2 = mu1 * mu2 + + sigma1_sq = F.conv2d(img1 * img1, window, padding=padd, + groups=channel) - mu1_sq + sigma2_sq = F.conv2d(img2 * img2, window, padding=padd, + groups=channel) - mu2_sq + sigma12 = F.conv2d(img1 * img2, window, padding=padd, + groups=channel) - mu1_mu2 + + c1 = (0.01 * val_range)**2 + c2 = (0.03 * val_range)**2 + + v1 = 2.0 * sigma12 + c2 + v2 = sigma1_sq + sigma2_sq + c2 + cs = torch.mean(v1 / v2) # contrast sensitivity + + ssim_map = ((2 * mu1_mu2 + c1) * v1) / ((mu1_sq + mu2_sq + c1) * v2) + + if size_average: + ret = ssim_map.mean() + else: + ret = ssim_map.mean(1).mean(1).mean(1) + + if full: + return ret, cs + return ret, ssim_map + + +class SSIM(torch.nn.Module): + # For DRAEM + def __init__(self, window_size=11, size_average=True, val_range=None): + super(SSIM, self).__init__() + self.window_size = window_size + self.size_average = size_average + self.val_range = val_range + + # Assume 1 channel for SSIM + self.channel = 1 + self.window = create_window(window_size).cuda() + + def forward(self, img1, img2): + (_, channel, _, _) = img1.size() + + if channel == self.channel and self.window.dtype == img1.dtype: + window = self.window + else: + window = create_window(self.window_size, + channel).to(img1.device).type(img1.dtype) + self.window = window + self.channel = channel + + s_score, ssim_map = ssim(img1, + img2, + window=window, + window_size=self.window_size, + size_average=self.size_average) + return 1.0 - s_score diff --git a/OpenOOD/openood/networks/__init__.py b/OpenOOD/openood/networks/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..56bcce1eb46d0ab9e71fe2f077d0030a1bf62a41 --- /dev/null +++ b/OpenOOD/openood/networks/__init__.py @@ -0,0 +1,17 @@ +from .ash_net import ASHNet +try: + from .clip import CLIPZeroshot +except ModuleNotFoundError: + pass +from .densenet import DenseNet3 +# from .mmcls_featext import ImageClassifierWithReturnFeature +from .resnet18_32x32 import ResNet18_32x32 +from .resnet18_224x224 import ResNet18_224x224 +from .resnet50 import ResNet50 +from .utils import get_network +from .wrn import WideResNet +from .swin_t import Swin_T +from .vit_b_16 import ViT_B_16 +from .regnet_y_16gf import RegNet_Y_16GF + +from .model_bronze import AKG \ No newline at end of file diff --git a/OpenOOD/openood/networks/__pycache__/__init__.cpython-311.pyc b/OpenOOD/openood/networks/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c492881171c3de1fe8713b8f5735a2f3a586de17 Binary files /dev/null and b/OpenOOD/openood/networks/__pycache__/__init__.cpython-311.pyc differ diff --git a/OpenOOD/openood/networks/__pycache__/__init__.cpython-37.pyc b/OpenOOD/openood/networks/__pycache__/__init__.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0f5f9f3fe68223196adda39361e76a0d9884bc83 Binary files /dev/null and b/OpenOOD/openood/networks/__pycache__/__init__.cpython-37.pyc differ diff --git a/OpenOOD/openood/networks/__pycache__/ash_net.cpython-311.pyc b/OpenOOD/openood/networks/__pycache__/ash_net.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0e99850ae73962112f56742ea4227b685521f804 Binary files /dev/null and b/OpenOOD/openood/networks/__pycache__/ash_net.cpython-311.pyc differ diff --git a/OpenOOD/openood/networks/__pycache__/ash_net.cpython-37.pyc b/OpenOOD/openood/networks/__pycache__/ash_net.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..176a29fa7ffea6e8c18ddb4f888559025dd81f83 Binary files /dev/null and b/OpenOOD/openood/networks/__pycache__/ash_net.cpython-37.pyc differ diff --git a/OpenOOD/openood/networks/__pycache__/bit.cpython-311.pyc b/OpenOOD/openood/networks/__pycache__/bit.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c60f9e08fd452a755e8d0ad2fba4ad7e217f6969 Binary files /dev/null and b/OpenOOD/openood/networks/__pycache__/bit.cpython-311.pyc differ diff --git a/OpenOOD/openood/networks/__pycache__/bit.cpython-37.pyc b/OpenOOD/openood/networks/__pycache__/bit.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..27e46b283254ede025497b8ab2c43600f1ddeac8 Binary files /dev/null and b/OpenOOD/openood/networks/__pycache__/bit.cpython-37.pyc differ diff --git a/OpenOOD/openood/networks/__pycache__/bronze_net.cpython-37.pyc b/OpenOOD/openood/networks/__pycache__/bronze_net.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6b8be601cbbc95d467efc7e20dd9261e91434ec3 Binary files /dev/null and b/OpenOOD/openood/networks/__pycache__/bronze_net.cpython-37.pyc differ diff --git a/OpenOOD/openood/networks/__pycache__/cider_net.cpython-311.pyc b/OpenOOD/openood/networks/__pycache__/cider_net.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ec7e570a6186fe166dd77d9411a12acc9d5a8485 Binary files /dev/null and b/OpenOOD/openood/networks/__pycache__/cider_net.cpython-311.pyc differ diff --git a/OpenOOD/openood/networks/__pycache__/cider_net.cpython-37.pyc b/OpenOOD/openood/networks/__pycache__/cider_net.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2d18a5470fec01bd90d685083b306701cc92db6f Binary files /dev/null and b/OpenOOD/openood/networks/__pycache__/cider_net.cpython-37.pyc differ diff --git a/OpenOOD/openood/networks/__pycache__/clip.cpython-311.pyc b/OpenOOD/openood/networks/__pycache__/clip.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f55e9643c2757ded69c80728898ec2bb8e5a8043 Binary files /dev/null and b/OpenOOD/openood/networks/__pycache__/clip.cpython-311.pyc differ diff --git a/OpenOOD/openood/networks/__pycache__/clip.cpython-37.pyc b/OpenOOD/openood/networks/__pycache__/clip.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..781b9731264fc37ac971789b0b2a4027258a79ec Binary files /dev/null and b/OpenOOD/openood/networks/__pycache__/clip.cpython-37.pyc differ diff --git a/OpenOOD/openood/networks/__pycache__/conf_branch_net.cpython-311.pyc b/OpenOOD/openood/networks/__pycache__/conf_branch_net.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..28f209aeec53d00b7b884a8c976791d09e84f7e3 Binary files /dev/null and b/OpenOOD/openood/networks/__pycache__/conf_branch_net.cpython-311.pyc differ diff --git a/OpenOOD/openood/networks/__pycache__/conf_branch_net.cpython-37.pyc b/OpenOOD/openood/networks/__pycache__/conf_branch_net.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..32f886d51867b850d8d2c616ceb5e972ecdf551c Binary files /dev/null and b/OpenOOD/openood/networks/__pycache__/conf_branch_net.cpython-37.pyc differ diff --git a/OpenOOD/openood/networks/__pycache__/csi_net.cpython-311.pyc b/OpenOOD/openood/networks/__pycache__/csi_net.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..124537439f8a1ad4e389d8ede82f33ecd8d0c13b Binary files /dev/null and b/OpenOOD/openood/networks/__pycache__/csi_net.cpython-311.pyc differ diff --git a/OpenOOD/openood/networks/__pycache__/csi_net.cpython-37.pyc b/OpenOOD/openood/networks/__pycache__/csi_net.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ad5dbcc2251f409d37b9900fefe8725bc438fa0f Binary files /dev/null and b/OpenOOD/openood/networks/__pycache__/csi_net.cpython-37.pyc differ diff --git a/OpenOOD/openood/networks/__pycache__/de_resnet18_256x256.cpython-311.pyc b/OpenOOD/openood/networks/__pycache__/de_resnet18_256x256.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..71a3c0e1312fb29821e4e6c5dd916f9acd7ef8e8 Binary files /dev/null and b/OpenOOD/openood/networks/__pycache__/de_resnet18_256x256.cpython-311.pyc differ diff --git a/OpenOOD/openood/networks/__pycache__/de_resnet18_256x256.cpython-37.pyc b/OpenOOD/openood/networks/__pycache__/de_resnet18_256x256.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..efef5a4e62bc807a6bac740ec2f3292cd395b031 Binary files /dev/null and b/OpenOOD/openood/networks/__pycache__/de_resnet18_256x256.cpython-37.pyc differ diff --git a/OpenOOD/openood/networks/__pycache__/densenet.cpython-311.pyc b/OpenOOD/openood/networks/__pycache__/densenet.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a5e841b07bfac341de6c6d6d6071dca8ee0cada2 Binary files /dev/null and b/OpenOOD/openood/networks/__pycache__/densenet.cpython-311.pyc differ diff --git a/OpenOOD/openood/networks/__pycache__/densenet.cpython-37.pyc b/OpenOOD/openood/networks/__pycache__/densenet.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ea0e042930f2bd089c2365d769efbdaec2414f14 Binary files /dev/null and b/OpenOOD/openood/networks/__pycache__/densenet.cpython-37.pyc differ diff --git a/OpenOOD/openood/networks/__pycache__/draem_net.cpython-311.pyc b/OpenOOD/openood/networks/__pycache__/draem_net.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..757a3a02a35eb510888f22b2b4892d4d46efcf96 Binary files /dev/null and b/OpenOOD/openood/networks/__pycache__/draem_net.cpython-311.pyc differ diff --git a/OpenOOD/openood/networks/__pycache__/draem_net.cpython-37.pyc b/OpenOOD/openood/networks/__pycache__/draem_net.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b876762803e7ef6e111ab2f37134b5686e45b7e8 Binary files /dev/null and b/OpenOOD/openood/networks/__pycache__/draem_net.cpython-37.pyc differ diff --git a/OpenOOD/openood/networks/__pycache__/dropout_net.cpython-311.pyc b/OpenOOD/openood/networks/__pycache__/dropout_net.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bb1e48b4e42ed0df88c49d087f9610081250860e Binary files /dev/null and b/OpenOOD/openood/networks/__pycache__/dropout_net.cpython-311.pyc differ diff --git a/OpenOOD/openood/networks/__pycache__/dropout_net.cpython-37.pyc b/OpenOOD/openood/networks/__pycache__/dropout_net.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0ae37ef5aef66936d106d5a53d4d52234da1599b Binary files /dev/null and b/OpenOOD/openood/networks/__pycache__/dropout_net.cpython-37.pyc differ diff --git a/OpenOOD/openood/networks/__pycache__/dsvdd_net.cpython-311.pyc b/OpenOOD/openood/networks/__pycache__/dsvdd_net.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c67b10be30ea5c467faa9dcd9ba12efe10815da0 Binary files /dev/null and b/OpenOOD/openood/networks/__pycache__/dsvdd_net.cpython-311.pyc differ diff --git a/OpenOOD/openood/networks/__pycache__/dsvdd_net.cpython-37.pyc b/OpenOOD/openood/networks/__pycache__/dsvdd_net.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..67c7ebbfcfc8a74044c586c1205e2c096dfb150d Binary files /dev/null and b/OpenOOD/openood/networks/__pycache__/dsvdd_net.cpython-37.pyc differ diff --git a/OpenOOD/openood/networks/__pycache__/godin_net.cpython-311.pyc b/OpenOOD/openood/networks/__pycache__/godin_net.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..648de83287c68ac66eda3f3b87849310f181acdd Binary files /dev/null and b/OpenOOD/openood/networks/__pycache__/godin_net.cpython-311.pyc differ diff --git a/OpenOOD/openood/networks/__pycache__/godin_net.cpython-37.pyc b/OpenOOD/openood/networks/__pycache__/godin_net.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1916c973dbc6883af06c068a3e667a3198e25230 Binary files /dev/null and b/OpenOOD/openood/networks/__pycache__/godin_net.cpython-37.pyc differ diff --git a/OpenOOD/openood/networks/__pycache__/lenet.cpython-311.pyc b/OpenOOD/openood/networks/__pycache__/lenet.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4c1606d0ce16f34c89123d6ea7f2aac0a3877962 Binary files /dev/null and b/OpenOOD/openood/networks/__pycache__/lenet.cpython-311.pyc differ diff --git a/OpenOOD/openood/networks/__pycache__/lenet.cpython-37.pyc b/OpenOOD/openood/networks/__pycache__/lenet.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2bd4a1f1bcf666fd69c441089227b0806de330e7 Binary files /dev/null and b/OpenOOD/openood/networks/__pycache__/lenet.cpython-37.pyc differ diff --git a/OpenOOD/openood/networks/__pycache__/mcd_net.cpython-311.pyc b/OpenOOD/openood/networks/__pycache__/mcd_net.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0944a5c18814954de5e570007748913dcd6d922b Binary files /dev/null and b/OpenOOD/openood/networks/__pycache__/mcd_net.cpython-311.pyc differ diff --git a/OpenOOD/openood/networks/__pycache__/mcd_net.cpython-37.pyc b/OpenOOD/openood/networks/__pycache__/mcd_net.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cabc5d92c4d1c61e86412a09b90f0e3880d70c93 Binary files /dev/null and b/OpenOOD/openood/networks/__pycache__/mcd_net.cpython-37.pyc differ diff --git a/OpenOOD/openood/networks/__pycache__/model_bronze.cpython-311.pyc b/OpenOOD/openood/networks/__pycache__/model_bronze.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..44ea84de7d705acc7aea1efcb7b3a4ed6b1cce31 Binary files /dev/null and b/OpenOOD/openood/networks/__pycache__/model_bronze.cpython-311.pyc differ diff --git a/OpenOOD/openood/networks/__pycache__/model_bronze.cpython-37.pyc b/OpenOOD/openood/networks/__pycache__/model_bronze.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0a6b50fa164fa638d961c5e6fe77caa91f3fc06d Binary files /dev/null and b/OpenOOD/openood/networks/__pycache__/model_bronze.cpython-37.pyc differ diff --git a/OpenOOD/openood/networks/__pycache__/npos_net.cpython-311.pyc b/OpenOOD/openood/networks/__pycache__/npos_net.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6e91ea2d62858ef833262ef2980cb4d4b2de3fe3 Binary files /dev/null and b/OpenOOD/openood/networks/__pycache__/npos_net.cpython-311.pyc differ diff --git a/OpenOOD/openood/networks/__pycache__/npos_net.cpython-37.pyc b/OpenOOD/openood/networks/__pycache__/npos_net.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..31a7a3b40c823c0538cd8ebca2e330a5a9fb1558 Binary files /dev/null and b/OpenOOD/openood/networks/__pycache__/npos_net.cpython-37.pyc differ diff --git a/OpenOOD/openood/networks/__pycache__/opengan.cpython-311.pyc b/OpenOOD/openood/networks/__pycache__/opengan.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6a5a71bd3eb81b46f606688bf58cd86504d1735b Binary files /dev/null and b/OpenOOD/openood/networks/__pycache__/opengan.cpython-311.pyc differ diff --git a/OpenOOD/openood/networks/__pycache__/openmax_net.cpython-311.pyc b/OpenOOD/openood/networks/__pycache__/openmax_net.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..44737b8f100c1a992f67615057a705a19fec1d9b Binary files /dev/null and b/OpenOOD/openood/networks/__pycache__/openmax_net.cpython-311.pyc differ diff --git a/OpenOOD/openood/networks/__pycache__/openmax_net.cpython-37.pyc b/OpenOOD/openood/networks/__pycache__/openmax_net.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d71235018a83ea92c74c69be4644616a9726f767 Binary files /dev/null and b/OpenOOD/openood/networks/__pycache__/openmax_net.cpython-37.pyc differ diff --git a/OpenOOD/openood/networks/__pycache__/p2pnet_utils.cpython-37.pyc b/OpenOOD/openood/networks/__pycache__/p2pnet_utils.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c85eb2c648c754e106389455ab20a73e0a47853e Binary files /dev/null and b/OpenOOD/openood/networks/__pycache__/p2pnet_utils.cpython-37.pyc differ diff --git a/OpenOOD/openood/networks/__pycache__/patchcore_net.cpython-311.pyc b/OpenOOD/openood/networks/__pycache__/patchcore_net.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1528660f89af8b99476d33e68989b8d39fd6b6cf Binary files /dev/null and b/OpenOOD/openood/networks/__pycache__/patchcore_net.cpython-311.pyc differ diff --git a/OpenOOD/openood/networks/__pycache__/patchcore_net.cpython-37.pyc b/OpenOOD/openood/networks/__pycache__/patchcore_net.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..11959bed3432a9b1fca963c43236debbf1e4dbad Binary files /dev/null and b/OpenOOD/openood/networks/__pycache__/patchcore_net.cpython-37.pyc differ diff --git a/OpenOOD/openood/networks/__pycache__/projection_net.cpython-311.pyc b/OpenOOD/openood/networks/__pycache__/projection_net.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..96405e0e383a8b68e7d7efd0b5068f8bd34e825a Binary files /dev/null and b/OpenOOD/openood/networks/__pycache__/projection_net.cpython-311.pyc differ diff --git a/OpenOOD/openood/networks/__pycache__/projection_net.cpython-37.pyc b/OpenOOD/openood/networks/__pycache__/projection_net.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..98578d5f5c63bea7b1c5f82d7e50f8ffcf0d313b Binary files /dev/null and b/OpenOOD/openood/networks/__pycache__/projection_net.cpython-37.pyc differ diff --git a/OpenOOD/openood/networks/__pycache__/react_net.cpython-311.pyc b/OpenOOD/openood/networks/__pycache__/react_net.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f6d2f728f70480f81266905d334f88f14fde9a09 Binary files /dev/null and b/OpenOOD/openood/networks/__pycache__/react_net.cpython-311.pyc differ diff --git a/OpenOOD/openood/networks/__pycache__/react_net.cpython-37.pyc b/OpenOOD/openood/networks/__pycache__/react_net.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1da479177c5ee44ef4eee724ad271777e3177975 Binary files /dev/null and b/OpenOOD/openood/networks/__pycache__/react_net.cpython-37.pyc differ diff --git a/OpenOOD/openood/networks/__pycache__/regnet_y_16gf.cpython-311.pyc b/OpenOOD/openood/networks/__pycache__/regnet_y_16gf.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c65d1b91a07f0d6c970605960e5f7fb1227574e4 Binary files /dev/null and b/OpenOOD/openood/networks/__pycache__/regnet_y_16gf.cpython-311.pyc differ diff --git a/OpenOOD/openood/networks/__pycache__/regnet_y_16gf.cpython-37.pyc b/OpenOOD/openood/networks/__pycache__/regnet_y_16gf.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a5b4d6c7983714cb48990a0f246d936e8a36fb81 Binary files /dev/null and b/OpenOOD/openood/networks/__pycache__/regnet_y_16gf.cpython-37.pyc differ diff --git a/OpenOOD/openood/networks/__pycache__/resnet18_224x224.cpython-311.pyc b/OpenOOD/openood/networks/__pycache__/resnet18_224x224.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..62c1c079b76ae82bc7d7501d87dda8dcd25c150f Binary files /dev/null and b/OpenOOD/openood/networks/__pycache__/resnet18_224x224.cpython-311.pyc differ diff --git a/OpenOOD/openood/networks/__pycache__/resnet18_224x224.cpython-37.pyc b/OpenOOD/openood/networks/__pycache__/resnet18_224x224.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9279100c0304baf3c0ef5bed94e95b5576c1c25b Binary files /dev/null and b/OpenOOD/openood/networks/__pycache__/resnet18_224x224.cpython-37.pyc differ diff --git a/OpenOOD/openood/networks/__pycache__/resnet18_256x256.cpython-311.pyc b/OpenOOD/openood/networks/__pycache__/resnet18_256x256.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9fd92bd60bd5d36b46d36d522bd213fb017d641c Binary files /dev/null and b/OpenOOD/openood/networks/__pycache__/resnet18_256x256.cpython-311.pyc differ diff --git a/OpenOOD/openood/networks/__pycache__/resnet18_256x256.cpython-37.pyc b/OpenOOD/openood/networks/__pycache__/resnet18_256x256.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ba1bc0b58b69761dc1ead679ce13d52eea54d8f4 Binary files /dev/null and b/OpenOOD/openood/networks/__pycache__/resnet18_256x256.cpython-37.pyc differ diff --git a/OpenOOD/openood/networks/__pycache__/resnet18_32x32.cpython-311.pyc b/OpenOOD/openood/networks/__pycache__/resnet18_32x32.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b5ecd7ee7520fcc4fba2a03977051954a6d564ca Binary files /dev/null and b/OpenOOD/openood/networks/__pycache__/resnet18_32x32.cpython-311.pyc differ diff --git a/OpenOOD/openood/networks/__pycache__/resnet18_32x32.cpython-37.pyc b/OpenOOD/openood/networks/__pycache__/resnet18_32x32.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..46c92115602559deb8c34de93f280839069a4eea Binary files /dev/null and b/OpenOOD/openood/networks/__pycache__/resnet18_32x32.cpython-37.pyc differ diff --git a/OpenOOD/openood/networks/__pycache__/resnet18_64x64.cpython-311.pyc b/OpenOOD/openood/networks/__pycache__/resnet18_64x64.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..205321c7069b52b7427a2b0be22a93532d9636b9 Binary files /dev/null and b/OpenOOD/openood/networks/__pycache__/resnet18_64x64.cpython-311.pyc differ diff --git a/OpenOOD/openood/networks/__pycache__/resnet18_64x64.cpython-37.pyc b/OpenOOD/openood/networks/__pycache__/resnet18_64x64.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c4a1efc2a09277c4bde0ae14e2271156aa550d50 Binary files /dev/null and b/OpenOOD/openood/networks/__pycache__/resnet18_64x64.cpython-37.pyc differ diff --git a/OpenOOD/openood/networks/__pycache__/resnet50.cpython-311.pyc b/OpenOOD/openood/networks/__pycache__/resnet50.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3b566a139e901afb17dc7d5ab866f925c76e45ca Binary files /dev/null and b/OpenOOD/openood/networks/__pycache__/resnet50.cpython-311.pyc differ diff --git a/OpenOOD/openood/networks/__pycache__/resnet50.cpython-37.pyc b/OpenOOD/openood/networks/__pycache__/resnet50.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..101c2be4a75b316f6b5dde8a25d33b491eec100a Binary files /dev/null and b/OpenOOD/openood/networks/__pycache__/resnet50.cpython-37.pyc differ diff --git a/OpenOOD/openood/networks/__pycache__/rot_net.cpython-311.pyc b/OpenOOD/openood/networks/__pycache__/rot_net.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8eae58aba8ad04bbbf98bb3213ae62c36983be26 Binary files /dev/null and b/OpenOOD/openood/networks/__pycache__/rot_net.cpython-311.pyc differ diff --git a/OpenOOD/openood/networks/__pycache__/rot_net.cpython-37.pyc b/OpenOOD/openood/networks/__pycache__/rot_net.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1b09d1630eeb62b3ea89fa2d3f561d6f1865185f Binary files /dev/null and b/OpenOOD/openood/networks/__pycache__/rot_net.cpython-37.pyc differ diff --git a/OpenOOD/openood/networks/__pycache__/rts_net.cpython-311.pyc b/OpenOOD/openood/networks/__pycache__/rts_net.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f54d6e94d145c879d644dbeadbf2a36e727e713b Binary files /dev/null and b/OpenOOD/openood/networks/__pycache__/rts_net.cpython-311.pyc differ diff --git a/OpenOOD/openood/networks/__pycache__/rts_net.cpython-37.pyc b/OpenOOD/openood/networks/__pycache__/rts_net.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..01fa1ea2893a3ef1c7827204a21d57790bcbf1f3 Binary files /dev/null and b/OpenOOD/openood/networks/__pycache__/rts_net.cpython-37.pyc differ diff --git a/OpenOOD/openood/networks/__pycache__/scale_net.cpython-311.pyc b/OpenOOD/openood/networks/__pycache__/scale_net.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..214272890b8dd2707390358236ee308ff2350067 Binary files /dev/null and b/OpenOOD/openood/networks/__pycache__/scale_net.cpython-311.pyc differ diff --git a/OpenOOD/openood/networks/__pycache__/scale_net.cpython-37.pyc b/OpenOOD/openood/networks/__pycache__/scale_net.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e38c5387dd59de21cd6f1ddd6c8c7d109e7bdb78 Binary files /dev/null and b/OpenOOD/openood/networks/__pycache__/scale_net.cpython-37.pyc differ diff --git a/OpenOOD/openood/networks/__pycache__/swin_t.cpython-311.pyc b/OpenOOD/openood/networks/__pycache__/swin_t.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ae084c744297f283b85524c9407258072b56cf16 Binary files /dev/null and b/OpenOOD/openood/networks/__pycache__/swin_t.cpython-311.pyc differ diff --git a/OpenOOD/openood/networks/__pycache__/swin_t.cpython-37.pyc b/OpenOOD/openood/networks/__pycache__/swin_t.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b419124337d1b0a855f31105810d6740a32d01e3 Binary files /dev/null and b/OpenOOD/openood/networks/__pycache__/swin_t.cpython-37.pyc differ diff --git a/OpenOOD/openood/networks/__pycache__/t2fnorm_net.cpython-311.pyc b/OpenOOD/openood/networks/__pycache__/t2fnorm_net.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6a5d5f870d3da7cc9b700ac6bc9b2d356d7ac36c Binary files /dev/null and b/OpenOOD/openood/networks/__pycache__/t2fnorm_net.cpython-311.pyc differ diff --git a/OpenOOD/openood/networks/__pycache__/t2fnorm_net.cpython-37.pyc b/OpenOOD/openood/networks/__pycache__/t2fnorm_net.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..586c7ffba7f0c0f79b92b03e3d8a00cb9dd93701 Binary files /dev/null and b/OpenOOD/openood/networks/__pycache__/t2fnorm_net.cpython-37.pyc differ diff --git a/OpenOOD/openood/networks/__pycache__/udg_net.cpython-311.pyc b/OpenOOD/openood/networks/__pycache__/udg_net.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..69de39bf4de886c9fece441e4ab89520f1231393 Binary files /dev/null and b/OpenOOD/openood/networks/__pycache__/udg_net.cpython-311.pyc differ diff --git a/OpenOOD/openood/networks/__pycache__/udg_net.cpython-37.pyc b/OpenOOD/openood/networks/__pycache__/udg_net.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3050759ed6cd1ffd0f9df2b315ae8cecf7c21ebd Binary files /dev/null and b/OpenOOD/openood/networks/__pycache__/udg_net.cpython-37.pyc differ diff --git a/OpenOOD/openood/networks/__pycache__/utils.cpython-311.pyc b/OpenOOD/openood/networks/__pycache__/utils.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ffe8c4502f6e1f0115afbe5777c064a061f431ca Binary files /dev/null and b/OpenOOD/openood/networks/__pycache__/utils.cpython-311.pyc differ diff --git a/OpenOOD/openood/networks/__pycache__/utils.cpython-37.pyc b/OpenOOD/openood/networks/__pycache__/utils.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cae0eed868021de3a05fe028a9fdfb60769a26e7 Binary files /dev/null and b/OpenOOD/openood/networks/__pycache__/utils.cpython-37.pyc differ diff --git a/OpenOOD/openood/networks/__pycache__/vit_b_16.cpython-311.pyc b/OpenOOD/openood/networks/__pycache__/vit_b_16.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3ac5d56021299d8e0ea73bafb8ad2df933b043a1 Binary files /dev/null and b/OpenOOD/openood/networks/__pycache__/vit_b_16.cpython-311.pyc differ diff --git a/OpenOOD/openood/networks/__pycache__/vit_b_16.cpython-37.pyc b/OpenOOD/openood/networks/__pycache__/vit_b_16.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..176e148df140af5fc0d80dfea861457361b0c013 Binary files /dev/null and b/OpenOOD/openood/networks/__pycache__/vit_b_16.cpython-37.pyc differ diff --git a/OpenOOD/openood/networks/__pycache__/wrn.cpython-311.pyc b/OpenOOD/openood/networks/__pycache__/wrn.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9c630051440044d884a4c224e2543cd5bcee94ad Binary files /dev/null and b/OpenOOD/openood/networks/__pycache__/wrn.cpython-311.pyc differ diff --git a/OpenOOD/openood/networks/__pycache__/wrn.cpython-37.pyc b/OpenOOD/openood/networks/__pycache__/wrn.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8e8843291fdb8b99019bbd9de1b7a6b1186acf7c Binary files /dev/null and b/OpenOOD/openood/networks/__pycache__/wrn.cpython-37.pyc differ diff --git a/OpenOOD/openood/networks/arpl_net.py b/OpenOOD/openood/networks/arpl_net.py new file mode 100644 index 0000000000000000000000000000000000000000..80be469a9cb55a8d854a8cd0de86aa58023ef5dc --- /dev/null +++ b/OpenOOD/openood/networks/arpl_net.py @@ -0,0 +1,647 @@ +## reference code https://github.com/pytorch/examples/blob/master/dcgan/main.py + +import operator +from collections import OrderedDict +from itertools import islice + +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.nn.functional as F +from torch.nn.modules.conv import _ConvNd +from torch.nn.modules.utils import _ntuple + + +def weights_init(m): + classname = m.__class__.__name__ + if classname.find('Conv') != -1: + m.weight.data.normal_(0.0, 0.02) + elif classname.find('BatchNorm') != -1: + m.weight.data.normal_(1.0, 0.02) + m.bias.data.fill_(0) + + +class _netD32(nn.Module): + def __init__(self, ngpu, nc, ndf): + super(_netD32, self).__init__() + self.ngpu = ngpu + self.main = nn.Sequential( + # input size. (nc) x 32 x 32 + nn.Conv2d(nc, ndf * 2, 4, 2, 1, bias=False), + nn.LeakyReLU(0.2, inplace=True), + # state size. (ndf*2) x 16 x 16 + nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False), + nn.BatchNorm2d(ndf * 4), + nn.LeakyReLU(0.2, inplace=True), + # state size. (ndf*4) x 8 x 8 + nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1, bias=False), + nn.BatchNorm2d(ndf * 8), + nn.LeakyReLU(0.2, inplace=True), + # state size. (ndf*8) x 4 x 4 + nn.Conv2d(ndf * 8, ndf * 16, 4, 1, 0, bias=False), + nn.Sigmoid()) + self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) + self.classifier = nn.Sequential(nn.Linear(ndf * 16, 1), nn.Sigmoid()) + + def forward(self, input): + if isinstance(input.data, torch.cuda.FloatTensor) and self.ngpu > 1: + output = nn.parallel.data_parallel(self.main, input, + range(self.ngpu)) + else: + output = self.main(input) + + output = self.avgpool(output) + output = torch.flatten(output, 1) + output = self.classifier(output).flatten() + + return output + + +class _netG32(nn.Module): + def __init__(self, ngpu, nz, ngf, nc): + super(_netG32, self).__init__() + self.ngpu = ngpu + self.main = nn.Sequential( + # input is Z, going into a convolution + nn.ConvTranspose2d(nz, ngf * 8, 4, 1, 0, bias=False), + nn.BatchNorm2d(ngf * 8), + nn.ReLU(True), + # state size. (ngf*8) x 4 x 4 + nn.ConvTranspose2d(ngf * 8, ngf * 4, 4, 2, 1, bias=False), + nn.BatchNorm2d(ngf * 4), + nn.ReLU(True), + # state size. (ngf*4) x 8 x 8 + nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1, bias=False), + nn.BatchNorm2d(ngf * 2), + nn.ReLU(True), + # state size. (ngf*2) x 16 x 16 + nn.ConvTranspose2d(ngf * 2, nc, 4, 2, 1, bias=False), + # nn.Sigmoid() + # state size. (nc) x 32 x 32 + ) + + def forward(self, input): + if isinstance(input.data, torch.cuda.FloatTensor) and self.ngpu > 1: + output = nn.parallel.data_parallel(self.main, input, + range(self.ngpu)) + else: + output = self.main(input) + + return output + + +def Generator32(n_gpu, nz, ngf, nc): + model = _netG32(n_gpu, nz, ngf, nc) + model.apply(weights_init) + return model + + +def Discriminator32(n_gpu, nc, ndf): + model = _netD32(n_gpu, nc, ndf) + model.apply(weights_init) + return model + + +class _netD(nn.Module): + def __init__(self, ngpu, nc, ndf): + super(_netD, self).__init__() + self.ngpu = ngpu + self.main = nn.Sequential( + # input size. (nc) x 32 x 32 + nn.Conv2d(nc, ndf * 2, 4, 2, 1, bias=False), + nn.LeakyReLU(0.2, inplace=True), + # state size. (ndf*2) x 16 x 16 + nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False), + nn.BatchNorm2d(ndf * 4), + nn.LeakyReLU(0.2, inplace=True), + # state size. (ndf*4) x 8 x 8 + nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1, bias=False), + nn.BatchNorm2d(ndf * 8), + nn.LeakyReLU(0.2, inplace=True), + # state size. (ndf*8) x 4 x 4 + nn.Conv2d(ndf * 8, ndf * 16, 4, 1, 0, bias=False), + nn.Sigmoid()) + self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) + self.classifier = nn.Sequential(nn.Linear(ndf * 16, 1), nn.Sigmoid()) + + def forward(self, input): + if isinstance(input.data, torch.cuda.FloatTensor) and self.ngpu > 1: + output = nn.parallel.data_parallel(self.main, input, + range(self.ngpu)) + else: + output = self.main(input) + + output = self.avgpool(output) + output = torch.flatten(output, 1) + output = self.classifier(output).flatten() + + return output + + +class _netG(nn.Module): + def __init__(self, ngpu, nz, ngf, nc): + super(_netG, self).__init__() + self.ngpu = ngpu + self.main = nn.Sequential( + # input is Z, going into a convolution + nn.ConvTranspose2d(nz, ngf * 8, 4, 1, 0, bias=False), + nn.BatchNorm2d(ngf * 8), + nn.ReLU(True), + # state size. (ngf*8) x 4 x 4 + nn.ConvTranspose2d(ngf * 8, ngf * 4, 4, 2, 1, bias=False), + nn.BatchNorm2d(ngf * 4), + nn.ReLU(True), + # state size. (ngf*4) x 8 x 8 + nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1, bias=False), + nn.BatchNorm2d(ngf * 2), + nn.ReLU(True), + # state size. (ngf*2) x 16 x 16 + nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 1, bias=False), + nn.BatchNorm2d(ngf), + nn.ReLU(True), + # state size. (nc) x 32 x 32 + nn.ConvTranspose2d(ngf, nc, 4, 2, 1, bias=False), + # nn.Sigmoid() + ) + + def forward(self, input): + if isinstance(input.data, torch.cuda.FloatTensor) and self.ngpu > 1: + output = nn.parallel.data_parallel(self.main, input, + range(self.ngpu)) + else: + output = self.main(input) + + return output + + +def Generator(n_gpu, nz, ngf, nc): + model = _netG(n_gpu, nz, ngf, nc) + model.apply(weights_init) + return model + + +def Discriminator(n_gpu, nc, ndf): + model = _netD(n_gpu, nc, ndf) + model.apply(weights_init) + return model + + +class _MultiBatchNorm(nn.Module): + _version = 2 + + def __init__(self, + num_features, + num_classes, + eps=1e-5, + momentum=0.1, + affine=True, + track_running_stats=True): + super(_MultiBatchNorm, self).__init__() + # self.bns = nn.ModuleList([nn.modules.batchnorm._BatchNorm( + # num_features, eps, momentum, affine, track_running_stats) + # for _ in range(num_classes)]) + self.bns = nn.ModuleList([ + nn.BatchNorm2d(num_features, eps, momentum, affine, + track_running_stats) for _ in range(num_classes) + ]) + + def reset_running_stats(self): + for bn in self.bns: + bn.reset_running_stats() + + def reset_parameters(self): + for bn in self.bns: + bn.reset_parameters() + + def _check_input_dim(self, input): + raise NotImplementedError + + def forward(self, x, domain_label): + self._check_input_dim(x) + bn = self.bns[domain_label[0]] + return bn(x), domain_label + + +class MultiBatchNorm(_MultiBatchNorm): + def _check_input_dim(self, input): + if input.dim() != 4: + raise ValueError('expected 4D input (got {}D input)'.format( + input.dim())) + + +_pair = _ntuple(2) + +__all__ = [ + 'resnet18ABN', 'resnet34ABN', 'resnet50ABN', 'resnet101ABN', 'resnet152ABN' +] + +model_urls = { + 'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth', + 'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth', + 'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth', + 'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth', + 'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth', +} + + +def conv3x3(in_planes, out_planes, stride=1): + """3x3 convolution with padding.""" + return nn.Conv2d(in_planes, + out_planes, + kernel_size=3, + stride=stride, + padding=1, + bias=False) + + +class Conv2d(_ConvNd): + def __init__(self, + in_channels, + out_channels, + kernel_size, + stride=1, + padding=0, + dilation=1, + groups=1, + bias=True): + kernel_size = _pair(kernel_size) + stride = _pair(stride) + padding = _pair(padding) + dilation = _pair(dilation) + super(Conv2d, self).__init__(in_channels, + out_channels, + kernel_size, + stride, + padding, + dilation, + False, + _pair(0), + groups, + bias, + padding_mode='zeros') + + def forward(self, input, domain_label): + return F.conv2d(input, self.weight, self.bias, self.stride, + self.padding, self.dilation, self.groups), domain_label + + +class TwoInputSequential(nn.Module): + r"""A sequential container forward with two inputs. + """ + def __init__(self, *args): + super(TwoInputSequential, self).__init__() + if len(args) == 1 and isinstance(args[0], OrderedDict): + for key, module in args[0].items(): + self.add_module(key, module) + else: + for idx, module in enumerate(args): + self.add_module(str(idx), module) + + def _get_item_by_idx(self, iterator, idx): + """Get the idx-th item of the iterator.""" + size = len(self) + idx = operator.index(idx) + if not -size <= idx < size: + raise IndexError('index {} is out of range'.format(idx)) + idx %= size + return next(islice(iterator, idx, None)) + + def __getitem__(self, idx): + if isinstance(idx, slice): + return TwoInputSequential( + OrderedDict(list(self._modules.items())[idx])) + else: + return self._get_item_by_idx(self._modules.values(), idx) + + def __setitem__(self, idx, module): + key = self._get_item_by_idx(self._modules.keys(), idx) + return setattr(self, key, module) + + def __delitem__(self, idx): + if isinstance(idx, slice): + for key in list(self._modules.keys())[idx]: + delattr(self, key) + else: + key = self._get_item_by_idx(self._modules.keys(), idx) + delattr(self, key) + + def __len__(self): + return len(self._modules) + + def __dir__(self): + keys = super(TwoInputSequential, self).__dir__() + keys = [key for key in keys if not key.isdigit()] + return keys + + def forward(self, input1, input2): + for module in self._modules.values(): + input1, input2 = module(input1, input2) + return input1, input2 + + +def resnet18ABN(num_classes=10, num_bns=2): + model = ResNetABN(BasicBlock, [2, 2, 2, 2], + num_classes=num_classes, + num_bns=num_bns) + + return model + + +def resnet34ABN(num_classes=10, num_bns=2): + model = ResNetABN(BasicBlock, [3, 4, 6, 3], + num_classes=num_classes, + num_bns=num_bns) + + return model + + +def resnet50ABN(num_classes=10, num_bns=2): + model = ResNetABN(Bottleneck, [3, 4, 6, 3], + num_classes=num_classes, + num_bns=num_bns) + + return model + + +def _update_initial_weights_ABN(state_dict, + num_classes=1000, + num_bns=2, + ABN_type='all'): + new_state_dict = state_dict.copy() + + for key, val in state_dict.items(): + update_dict = False + if ((('bn' in key or 'downsample.1' in key) and ABN_type == 'all') + or (('bn1' in key) and ABN_type == 'partial-bn1')): + update_dict = True + + if (update_dict): + if 'weight' in key: + for d in range(num_bns): + new_state_dict[ + key[0:-6] + + 'bns.{}.weight'.format(d)] = val.data.clone() + + elif 'bias' in key: + for d in range(num_bns): + new_state_dict[key[0:-4] + + 'bns.{}.bias'.format(d)] = val.data.clone() + + if 'running_mean' in key: + for d in range(num_bns): + new_state_dict[ + key[0:-12] + + 'bns.{}.running_mean'.format(d)] = val.data.clone() + + if 'running_var' in key: + for d in range(num_bns): + new_state_dict[ + key[0:-11] + + 'bns.{}.running_var'.format(d)] = val.data.clone() + + if 'num_batches_tracked' in key: + for d in range(num_bns): + new_state_dict[key[0:-len('num_batches_tracked')] + + 'bns.{}.num_batches_tracked'.format( + d)] = val.data.clone() + + if num_classes != 1000 or len( + [key for key in new_state_dict.keys() if 'fc' in key]) > 1: + key_list = list(new_state_dict.keys()) + for key in key_list: + if 'fc' in key: + print('pretrained {} are not used as initial params.'.format( + key)) + del new_state_dict[key] + + return new_state_dict + + +class ResNetABN(nn.Module): + def __init__(self, block, layers, num_classes=10, num_bns=2): + self.inplanes = 64 + self.num_bns = num_bns + self.num_classes = num_classes + super(ResNetABN, self).__init__() + self.conv1 = conv3x3(3, 64) + self.bn1 = MultiBatchNorm(64, self.num_bns) + self.layer1 = self._make_layer(block, + 64, + layers[0], + stride=1, + num_bns=self.num_bns) + self.layer2 = self._make_layer(block, + 128, + layers[1], + stride=2, + num_bns=self.num_bns) + self.layer3 = self._make_layer(block, + 256, + layers[2], + stride=2, + num_bns=self.num_bns) + self.layer4 = self._make_layer(block, + 512, + layers[3], + stride=2, + num_bns=self.num_bns) + self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) + self.fc = nn.Linear(512 * block.expansion, num_classes) + + def _make_layer(self, block, planes, blocks, stride=1, num_bns=2): + downsample = None + if stride != 1 or self.inplanes != planes * block.expansion: + downsample = TwoInputSequential( + Conv2d(self.inplanes, + planes * block.expansion, + kernel_size=1, + stride=stride, + bias=False), + MultiBatchNorm(planes * block.expansion, num_bns), + ) + + layers = [] + layers.append( + block(self.inplanes, planes, stride, downsample, num_bns=num_bns)) + self.inplanes = planes * block.expansion + for i in range(1, blocks): + layers.append(block(self.inplanes, planes, num_bns=num_bns)) + + return TwoInputSequential(*layers) + + def forward(self, x, return_feature=False, domain_label=None): + if domain_label is None: + domain_label = 0 * torch.ones(x.shape[0], dtype=torch.long).cuda() + x = self.conv1(x) + x, _ = self.bn1(x, domain_label) + x = F.relu(x) + x, _ = self.layer1(x, domain_label) + x, _ = self.layer2(x, domain_label) + x, _ = self.layer3(x, domain_label) + x, _ = self.layer4(x, domain_label) + + x = self.avgpool(x) + feat = x.view(x.size(0), -1) + x = self.fc(feat) + + if return_feature: + return x, feat + else: + return x + + +class BasicBlock(nn.Module): + expansion = 1 + + def __init__(self, inplanes, planes, stride=1, downsample=None, num_bns=2): + super(BasicBlock, self).__init__() + self.conv1 = conv3x3(inplanes, planes, stride) + self.bn1 = MultiBatchNorm(planes, num_bns) + self.conv2 = conv3x3(planes, planes) + self.bn2 = MultiBatchNorm(planes, num_bns) + self.downsample = downsample + self.stride = stride + + def forward(self, x, domain_label): + residual = x + + out = self.conv1(x) + out, _ = self.bn1(out, domain_label) + out = F.relu(out) + + out = self.conv2(out) + out, _ = self.bn2(out, domain_label) + + if self.downsample is not None: + residual, _ = self.downsample(x, domain_label) + + out += residual + out = F.relu(out) + + return out, domain_label + + +class Bottleneck(nn.Module): + expansion = 4 + + def __init__(self, inplanes, planes, stride=1, downsample=None, num_bns=2): + super(Bottleneck, self).__init__() + self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False) + self.bn1 = MultiBatchNorm(planes, num_bns) + self.conv2 = nn.Conv2d(planes, + planes, + kernel_size=3, + stride=stride, + padding=1, + bias=False) + self.bn2 = MultiBatchNorm(planes, num_bns) + self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False) + self.bn3 = MultiBatchNorm(planes * 4, num_bns) + self.relu = nn.ReLU(inplace=True) + self.downsample = downsample + self.stride = stride + + def forward(self, x, domain_label): + residual = x + + out = self.conv1(x) + out, _ = self.bn1(out, domain_label) + out = self.relu(out) + + out = self.conv2(out) + out, _ = self.bn2(out, domain_label) + out = self.relu(out) + + out = self.conv3(out) + out, _ = self.bn3(out, domain_label) + + if self.downsample is not None: + residual, _ = self.downsample(x, domain_label) + + out += residual + out = self.relu(out) + + return out, domain_label + + +class Dist(nn.Module): + def __init__(self, + num_classes=10, + num_centers=1, + feat_dim=2, + init='random'): + super(Dist, self).__init__() + self.feat_dim = feat_dim + self.num_classes = num_classes + self.num_centers = num_centers + + if init == 'random': + self.centers = nn.Parameter( + 0.1 * torch.randn(num_classes * num_centers, self.feat_dim)) + else: + self.centers = nn.Parameter( + torch.Tensor(num_classes * num_centers, self.feat_dim)) + self.centers.data.fill_(0) + + def forward(self, features, center=None, metric='l2'): + if metric == 'l2': + f_2 = torch.sum(torch.pow(features, 2), dim=1, keepdim=True) + if center is None: + c_2 = torch.sum(torch.pow(self.centers, 2), + dim=1, + keepdim=True) + dist = f_2 - 2 * torch.matmul( + features, torch.transpose(self.centers, 1, + 0)) + torch.transpose(c_2, 1, 0) + else: + c_2 = torch.sum(torch.pow(center, 2), dim=1, keepdim=True) + dist = f_2 - 2 * torch.matmul( + features, torch.transpose(center, 1, 0)) + torch.transpose( + c_2, 1, 0) + dist = dist / float(features.shape[1]) + else: + if center is None: + center = self.centers + else: + center = center + dist = features.matmul(center.t()) + dist = torch.reshape(dist, [-1, self.num_classes, self.num_centers]) + dist = torch.mean(dist, dim=2) + + return dist + + +class ARPLayer(nn.Module): + def __init__(self, feat_dim=2, num_classes=10, weight_pl=0.1, temp=1.0): + super(ARPLayer, self).__init__() + self.weight_pl = weight_pl + self.temp = temp + self.Dist = Dist(num_classes, feat_dim=feat_dim) + self.points = self.Dist.centers + self.radius = nn.Parameter(torch.Tensor(1)) + self.radius.data.fill_(0) + self.margin_loss = nn.MarginRankingLoss(margin=1.0) + + def forward(self, x, labels=None): + dist_dot_p = self.Dist(x, center=self.points, metric='dot') + dist_l2_p = self.Dist(x, center=self.points) + logits = dist_l2_p - dist_dot_p + + if labels is None: return logits + loss = F.cross_entropy(logits / self.temp, labels) + + center_batch = self.points[labels, :] + _dis_known = (x - center_batch).pow(2).mean(1) + target = torch.ones(_dis_known.size()).cuda() + loss_r = self.margin_loss(self.radius, _dis_known, target) + + loss = loss + self.weight_pl * loss_r + + return logits, loss + + def fake_loss(self, x): + logits = self.Dist(x, center=self.points) + prob = F.softmax(logits, dim=1) + loss = (prob * torch.log(prob)).sum(1).mean().exp() + + return loss diff --git a/OpenOOD/openood/networks/ash_net.py b/OpenOOD/openood/networks/ash_net.py new file mode 100644 index 0000000000000000000000000000000000000000..25e2f800a231a0d39a3a60b6c520bb1ee9fa728c --- /dev/null +++ b/OpenOOD/openood/networks/ash_net.py @@ -0,0 +1,96 @@ +import numpy as np +import torch +import torch.nn as nn + + +class ASHNet(nn.Module): + def __init__(self, backbone): + super(ASHNet, self).__init__() + self.backbone = backbone + + def forward(self, x, return_feature=False, return_feature_list=False): + try: + return self.backbone(x, return_feature, return_feature_list) + except TypeError: + return self.backbone(x, return_feature) + + def forward_threshold(self, x, percentile): + _, feature = self.backbone(x, return_feature=True) + feature = ash_b(feature.view(feature.size(0), -1, 1, 1), percentile) + feature = feature.view(feature.size(0), -1) + logits_cls = self.backbone.get_fc_layer()(feature) + return logits_cls + + def get_fc(self): + fc = self.backbone.fc + return fc.weight.cpu().detach().numpy(), fc.bias.cpu().detach().numpy() + + +def ash_b(x, percentile=65): + assert x.dim() == 4 + assert 0 <= percentile <= 100 + b, c, h, w = x.shape + + # calculate the sum of the input per sample + s1 = x.sum(dim=[1, 2, 3]) + + n = x.shape[1:].numel() + k = n - int(np.round(n * percentile / 100.0)) + t = x.view((b, c * h * w)) + v, i = torch.topk(t, k, dim=1) + fill = s1 / k + fill = fill.unsqueeze(dim=1).expand(v.shape) + t.zero_().scatter_(dim=1, index=i, src=fill) + return x + + +def ash_p(x, percentile=65): + assert x.dim() == 4 + assert 0 <= percentile <= 100 + + b, c, h, w = x.shape + + n = x.shape[1:].numel() + k = n - int(np.round(n * percentile / 100.0)) + t = x.view((b, c * h * w)) + v, i = torch.topk(t, k, dim=1) + t.zero_().scatter_(dim=1, index=i, src=v) + + return x + + +def ash_s(x, percentile=65): + assert x.dim() == 4 + assert 0 <= percentile <= 100 + b, c, h, w = x.shape + + # calculate the sum of the input per sample + s1 = x.sum(dim=[1, 2, 3]) + n = x.shape[1:].numel() + k = n - int(np.round(n * percentile / 100.0)) + t = x.view((b, c * h * w)) + v, i = torch.topk(t, k, dim=1) + t.zero_().scatter_(dim=1, index=i, src=v) + + # calculate new sum of the input per sample after pruning + s2 = x.sum(dim=[1, 2, 3]) + + # apply sharpening + scale = s1 / s2 + x = x * torch.exp(scale[:, None, None, None]) + + return x + + +def ash_rand(x, percentile=65, r1=0, r2=10): + assert x.dim() == 4 + assert 0 <= percentile <= 100 + b, c, h, w = x.shape + + n = x.shape[1:].numel() + k = n - int(np.round(n * percentile / 100.0)) + t = x.view((b, c * h * w)) + v, i = torch.topk(t, k, dim=1) + v = v.uniform_(r1, r2) + t.zero_().scatter_(dim=1, index=i, src=v) + return x diff --git a/OpenOOD/openood/networks/bit.py b/OpenOOD/openood/networks/bit.py new file mode 100644 index 0000000000000000000000000000000000000000..ddc904fc6bf7cd76ca9f8fe9952cf5992a02bb33 --- /dev/null +++ b/OpenOOD/openood/networks/bit.py @@ -0,0 +1,385 @@ +"""Bottleneck ResNet v2 with GroupNorm and Weight Standardization.""" + +from collections import OrderedDict + +import torch +import torch.nn as nn +import torch.nn.functional as F + + +class Reshape(nn.Module): + def __init__(self, *args): + super(Reshape, self).__init__() + self.shape = args + + def forward(self, x): + return x.view(self.shape) + + +class StdConv2d(nn.Conv2d): + def forward(self, x): + w = self.weight + v, m = torch.var_mean(w, dim=[1, 2, 3], keepdim=True, unbiased=False) + w = (w - m) / torch.sqrt(v + 1e-10) + return F.conv2d(x, w, self.bias, self.stride, self.padding, + self.dilation, self.groups) + + +def conv3x3(cin, cout, stride=1, groups=1, bias=False): + return StdConv2d(cin, + cout, + kernel_size=3, + stride=stride, + padding=1, + bias=bias, + groups=groups) + + +def conv1x1(cin, cout, stride=1, bias=False): + return StdConv2d(cin, + cout, + kernel_size=1, + stride=stride, + padding=0, + bias=bias) + + +def tf2th(conv_weights): + """Possibly convert HWIO to OIHW.""" + if conv_weights.ndim == 4: + conv_weights = conv_weights.transpose([3, 2, 0, 1]) + return torch.from_numpy(conv_weights) + + +class PreActBottleneck(nn.Module): + """Pre-activation (v2) bottleneck block. + + Follows the implementation of + "Identity Mappings in Deep Residual Networks": + https://github.com/KaimingHe/resnet-1k-layers/blob/master/resnet-pre-act.lua + + Except it puts the stride on 3x3 conv when available. + """ + def __init__(self, cin, cout=None, cmid=None, stride=1): + super().__init__() + cout = cout or cin + cmid = cmid or cout // 4 + + self.gn1 = nn.GroupNorm(32, cin) + self.conv1 = conv1x1(cin, cmid) + self.gn2 = nn.GroupNorm(32, cmid) + self.conv2 = conv3x3(cmid, cmid, + stride) # Original code has it on conv1!! + self.gn3 = nn.GroupNorm(32, cmid) + self.conv3 = conv1x1(cmid, cout) + self.relu = nn.ReLU(inplace=True) + + if (stride != 1 or cin != cout): + # Projection also with pre-activation according to paper. + self.downsample = conv1x1(cin, cout, stride) + + def forward(self, x): + out = self.relu(self.gn1(x)) + + # Residual branch + residual = x + if hasattr(self, 'downsample'): + residual = self.downsample(out) + + # Unit's branch + out = self.conv1(out) + out = self.conv2(self.relu(self.gn2(out))) + out = self.conv3(self.relu(self.gn3(out))) + + return out + residual + + def load_from(self, weights, prefix=''): + convname = 'standardized_conv2d' + with torch.no_grad(): + self.conv1.weight.copy_( + tf2th(weights[f'{prefix}a/{convname}/kernel'])) + self.conv2.weight.copy_( + tf2th(weights[f'{prefix}b/{convname}/kernel'])) + self.conv3.weight.copy_( + tf2th(weights[f'{prefix}c/{convname}/kernel'])) + self.gn1.weight.copy_(tf2th( + weights[f'{prefix}a/group_norm/gamma'])) + self.gn2.weight.copy_(tf2th( + weights[f'{prefix}b/group_norm/gamma'])) + self.gn3.weight.copy_(tf2th( + weights[f'{prefix}c/group_norm/gamma'])) + self.gn1.bias.copy_(tf2th(weights[f'{prefix}a/group_norm/beta'])) + self.gn2.bias.copy_(tf2th(weights[f'{prefix}b/group_norm/beta'])) + self.gn3.bias.copy_(tf2th(weights[f'{prefix}c/group_norm/beta'])) + if hasattr(self, 'downsample'): + w = weights[f'{prefix}a/proj/{convname}/kernel'] + self.downsample.weight.copy_(tf2th(w)) + + +class ResNetV2(nn.Module): + """Implementation of Pre-activation (v2) ResNet mode.""" + def __init__(self, + block_units, + width_factor, + head_size=1000, + zero_head=False, + num_block_open=-1): + super().__init__() + self.zero_head = zero_head + wf = width_factor # shortcut 'cause we'll use it a lot. + + if num_block_open == -1: + self.fix_parts = [] + self.fix_gn1 = None + elif num_block_open == 0: + self.fix_parts = [ + 'root', 'block1', 'block2', 'block3', 'block4', 'before_head' + ] + self.fix_gn1 = None + elif num_block_open == 1: + self.fix_parts = ['root', 'block1', 'block2', 'block3'] + self.fix_gn1 = 'block4' + elif num_block_open == 2: + self.fix_parts = ['root', 'block1', 'block2'] + self.fix_gn1 = 'block3' + elif num_block_open == 3: + self.fix_parts = ['root', 'block1'] + self.fix_gn1 = 'block2' + elif num_block_open == 4: + self.fix_parts = ['root'] + self.fix_gn1 = 'block1' + else: + raise ValueError( + 'Unexpected block number {}'.format(num_block_open)) + + self.root = nn.Sequential( + OrderedDict([ + ('conv', + StdConv2d(3, + 64 * wf, + kernel_size=7, + stride=2, + padding=3, + bias=False)), + ('pad', nn.ConstantPad2d(1, 0)), + ('pool', nn.MaxPool2d(kernel_size=3, stride=2, padding=0)), + # The following is subtly not the same! + # ('pool', nn.MaxPool2d(kernel_size=3, stride=2, padding=1)), + ])) + + self.body = nn.Sequential( + OrderedDict([ + ('block1', + nn.Sequential( + OrderedDict( + [('unit01', + PreActBottleneck( + cin=64 * wf, cout=256 * wf, cmid=64 * wf))] + + [(f'unit{i:02d}', + PreActBottleneck( + cin=256 * wf, cout=256 * wf, cmid=64 * wf)) + for i in range(2, block_units[0] + 1)], ))), + ('block2', + nn.Sequential( + OrderedDict( + [('unit01', + PreActBottleneck(cin=256 * wf, + cout=512 * wf, + cmid=128 * wf, + stride=2))] + + [(f'unit{i:02d}', + PreActBottleneck( + cin=512 * wf, cout=512 * wf, cmid=128 * wf)) + for i in range(2, block_units[1] + 1)], ))), + ('block3', + nn.Sequential( + OrderedDict( + [('unit01', + PreActBottleneck(cin=512 * wf, + cout=1024 * wf, + cmid=256 * wf, + stride=2))] + + [(f'unit{i:02d}', + PreActBottleneck( + cin=1024 * wf, cout=1024 * wf, cmid=256 * wf)) + for i in range(2, block_units[2] + 1)], ))), + ('block4', + nn.Sequential( + OrderedDict( + [('unit01', + PreActBottleneck(cin=1024 * wf, + cout=2048 * wf, + cmid=512 * wf, + stride=2))] + + [(f'unit{i:02d}', + PreActBottleneck( + cin=2048 * wf, cout=2048 * wf, cmid=512 * wf)) + for i in range(2, block_units[3] + 1)], ))), + ])) + + self.before_head = nn.Sequential( + OrderedDict([ + ('gn', nn.GroupNorm(32, 2048 * wf)), + ('relu', nn.ReLU(inplace=True)), + ('avg', nn.AdaptiveAvgPool2d(output_size=1)), + ])) + + self.head = nn.Sequential( + OrderedDict([ + ('conv', + nn.Conv2d(2048 * wf, head_size, kernel_size=1, bias=True)), + ])) + + if 'root' in self.fix_parts: + for param in self.root.parameters(): + param.requires_grad = False + for bname, block in self.body.named_children(): + if bname in self.fix_parts: + for param in block.parameters(): + param.requires_grad = False + elif bname == self.fix_gn1: + for param in block.unit01.gn1.parameters(): + param.requires_grad = False + + def intermediate_forward(self, x, layer_index=None): + if layer_index == 'all': + out_list = [] + out = self.root(x) + out_list.append(out) + out = self.body.block1(out) + out_list.append(out) + out = self.body.block2(out) + out_list.append(out) + out = self.body.block3(out) + out_list.append(out) + out = self.body.block4(out) + out_list.append(out) + out = self.head(self.before_head(out)) + return out[..., 0, 0], out_list + + out = self.root(x) + if layer_index == 1: + out = self.body.block1(out) + elif layer_index == 2: + out = self.body.block1(out) + out = self.body.block2(out) + elif layer_index == 3: + out = self.body.block1(out) + out = self.body.block2(out) + out = self.body.block3(out) + elif layer_index == 4: + out = self.body.block1(out) + out = self.body.block2(out) + out = self.body.block3(out) + out = self.body.block4(out) + elif layer_index == 5: + out = self.body.block1(out) + out = self.body.block2(out) + out = self.body.block3(out) + out = self.body.block4(out) + out = self.before_head(out) + return out + + def get_fc(self): + w = self.head.conv.weight.cpu().detach().squeeze().numpy() + b = self.head.conv.bias.cpu().detach().squeeze().numpy() + return w, b + + def forward(self, x, layer_index=None, return_feature=False): + if return_feature: + return x, self.intermediate_forward(x, 5)[..., 0, 0] + if layer_index is not None: + return self.intermediate_forward(x, layer_index) + + if 'root' in self.fix_parts: + with torch.no_grad(): + x = self.root(x) + else: + x = self.root(x) + + for bname, block in self.body.named_children(): + if bname in self.fix_parts: + with torch.no_grad(): + x = block(x) + else: + x = block(x) + if 'before_head' in self.fix_parts: + with torch.no_grad(): + x = self.before_head(x) + else: + x = self.before_head(x) + + x = self.head(x) + assert x.shape[-2:] == (1, 1) # We should have no spatial shape left. + return x[..., 0, 0] + + def load_state_dict_custom(self, state_dict): + state_dict_new = {} + for k, v in state_dict.items(): + state_dict_new[k[len('module.'):]] = v + self.load_state_dict(state_dict_new, strict=True) + + def load_from(self, weights, prefix='resnet/'): + with torch.no_grad(): + self.root.conv.weight.copy_( + tf2th( + weights[f'{prefix}root_block/standardized_conv2d/kernel'])) + # pylint: disable=line-too-long + self.before_head.gn.weight.copy_( + tf2th(weights[f'{prefix}group_norm/gamma'])) + self.before_head.gn.bias.copy_( + tf2th(weights[f'{prefix}group_norm/beta'])) + + if self.zero_head: + nn.init.zeros_(self.head.conv.weight) + nn.init.zeros_(self.head.conv.bias) + else: + self.head.conv.weight.copy_( + tf2th(weights[f'{prefix}head/conv2d/kernel'])) + # pylint: disable=line-too-long + self.head.conv.bias.copy_( + tf2th(weights[f'{prefix}head/conv2d/bias'])) + + for bname, block in self.body.named_children(): + for uname, unit in block.named_children(): + unit.load_from(weights, prefix=f'{prefix}{bname}/{uname}/') + + def train(self, mode=True): + self.training = mode + for module in self.children(): + module.train(mode) + + self.head.train(mode) + if 'root' in self.fix_parts: + self.root.eval() + else: + self.root.train(mode) + for bname, block in self.body.named_children(): + if bname in self.fix_parts: + block.eval() + elif bname == self.fix_gn1: + block.train(mode) + block.unit01.gn1.eval() + else: + block.train(mode) + if 'before_head' in self.fix_parts: + self.before_head.eval() + else: + self.before_head.train(mode) + return self + + +KNOWN_MODELS = OrderedDict([ + ('BiT-M-R50x1', lambda *a, **kw: ResNetV2([3, 4, 6, 3], 1, *a, **kw)), + ('BiT-M-R50x3', lambda *a, **kw: ResNetV2([3, 4, 6, 3], 3, *a, **kw)), + ('BiT-M-R101x1', lambda *a, **kw: ResNetV2([3, 4, 23, 3], 1, *a, **kw)), + ('BiT-M-R101x3', lambda *a, **kw: ResNetV2([3, 4, 23, 3], 3, *a, **kw)), + ('BiT-M-R152x2', lambda *a, **kw: ResNetV2([3, 8, 36, 3], 2, *a, **kw)), + ('BiT-M-R152x4', lambda *a, **kw: ResNetV2([3, 8, 36, 3], 4, *a, **kw)), + ('BiT-S-R50x1', lambda *a, **kw: ResNetV2([3, 4, 6, 3], 1, *a, **kw)), + ('BiT-S-R50x3', lambda *a, **kw: ResNetV2([3, 4, 6, 3], 3, *a, **kw)), + ('BiT-S-R101x1', lambda *a, **kw: ResNetV2([3, 4, 23, 3], 1, *a, **kw)), + ('BiT-S-R101x3', lambda *a, **kw: ResNetV2([3, 4, 23, 3], 3, *a, **kw)), + ('BiT-S-R152x2', lambda *a, **kw: ResNetV2([3, 8, 36, 3], 2, *a, **kw)), + ('BiT-S-R152x4', lambda *a, **kw: ResNetV2([3, 8, 36, 3], 4, *a, **kw)), +]) diff --git a/OpenOOD/openood/networks/cider_net.py b/OpenOOD/openood/networks/cider_net.py new file mode 100644 index 0000000000000000000000000000000000000000..8d31f923f20569c1d88e90d7a821715f982e4d1c --- /dev/null +++ b/OpenOOD/openood/networks/cider_net.py @@ -0,0 +1,35 @@ +import torch.nn as nn +import torch.nn.functional as F + + +class CIDERNet(nn.Module): + def __init__(self, backbone, head, feat_dim, num_classes): + super(CIDERNet, self).__init__() + + self.backbone = backbone + if hasattr(self.backbone, 'fc'): + # remove fc otherwise ddp will + # report unused params + self.backbone.fc = nn.Identity() + + try: + feature_size = backbone.feature_size + except AttributeError: + feature_size = backbone.module.feature_size + + if head == 'linear': + self.head = nn.Linear(feature_size, feat_dim) + elif head == 'mlp': + self.head = nn.Sequential(nn.Linear(feature_size, feature_size), + nn.ReLU(inplace=True), + nn.Linear(feature_size, feat_dim)) + + def forward(self, x): + feat = self.backbone(x).squeeze() + unnorm_features = self.head(feat) + features = F.normalize(unnorm_features, dim=1) + return features + + def intermediate_forward(self, x): + feat = self.backbone(x).squeeze() + return F.normalize(feat, dim=1) diff --git a/OpenOOD/openood/networks/clip.py b/OpenOOD/openood/networks/clip.py new file mode 100644 index 0000000000000000000000000000000000000000..915150dc7b8294d548d81b087d291df7c4e56c31 --- /dev/null +++ b/OpenOOD/openood/networks/clip.py @@ -0,0 +1,40 @@ +import numpy as np +from tqdm import tqdm + +import torch +import torch.nn as nn +import torch.nn.functional as F +import clip + + +# https://github.com/openai/CLIP/blob/main/notebooks/Prompt_Engineering_for_ImageNet.ipynb +def zeroshot_classifier(model, classnames, templates): + with torch.no_grad(): + zeroshot_weights = [] + for classname in tqdm(classnames): + texts = [template.format(classname) + for template in templates] # format with class + texts = clip.tokenize(texts).cuda() # tokenize + class_embeddings = model.encode_text( + texts) # embed with text encoder + class_embeddings /= class_embeddings.norm(dim=-1, keepdim=True) + class_embedding = class_embeddings.mean(dim=0) + class_embedding /= class_embedding.norm() + zeroshot_weights.append(class_embedding) + zeroshot_weights = torch.stack(zeroshot_weights, dim=1).cuda() + return zeroshot_weights + + +class CLIPZeroshot(nn.Module): + def __init__(self, classnames, templates, backbone='ViT-B/16'): + super().__init__() + assert backbone in clip.available_models() + self.model, self.preprocess = clip.load(backbone, device='cuda') + self.zeroshot_weights = zeroshot_classifier(self.model, classnames, + templates) + + def forward(self, x): + image_features = self.model.encode_image(x) + image_features /= image_features.norm(dim=-1, keepdim=True) + logits = image_features @ self.zeroshot_weights + return logits diff --git a/OpenOOD/openood/networks/conf_branch_net.py b/OpenOOD/openood/networks/conf_branch_net.py new file mode 100644 index 0000000000000000000000000000000000000000..cf019869a1ce6bd41841e79e13353f949650e79f --- /dev/null +++ b/OpenOOD/openood/networks/conf_branch_net.py @@ -0,0 +1,33 @@ +import torch.nn as nn + + +class ConfBranchNet(nn.Module): + def __init__(self, backbone, num_classes): + super(ConfBranchNet, self).__init__() + + self.backbone = backbone + if hasattr(self.backbone, 'fc'): + # remove fc otherwise ddp will + # report unused params + self.backbone.fc = nn.Identity() + + try: + feature_size = backbone.feature_size + except AttributeError: + feature_size = backbone.module.feature_size + + self.fc = nn.Linear(feature_size, num_classes) + self.confidence = nn.Linear(feature_size, 1) + + # test conf + def forward(self, x, return_confidence=False): + + _, feature = self.backbone(x, return_feature=True) + + pred = self.fc(feature) + confidence = self.confidence(feature) + + if return_confidence: + return pred, confidence + else: + return pred diff --git a/OpenOOD/openood/networks/csi_net.py b/OpenOOD/openood/networks/csi_net.py new file mode 100644 index 0000000000000000000000000000000000000000..da33062b2a487d81734029874ec029777651255f --- /dev/null +++ b/OpenOOD/openood/networks/csi_net.py @@ -0,0 +1,97 @@ +import torch.nn as nn + + +def get_csi_linear_layers(feature_size, + num_classes, + simclr_dim, + shift_trans_type='rotation'): + simclr_layer = nn.Sequential( + nn.Linear(feature_size, feature_size), + nn.ReLU(), + nn.Linear(feature_size, simclr_dim), + ) + shift_cls_layer = nn.Linear(feature_size, + get_shift_module(shift_trans_type)) + joint_distribution_layer = nn.Linear(feature_size, 4 * num_classes) + linear = nn.Linear(feature_size, num_classes) + + return { + 'simclr_layer': simclr_layer, + 'shift_cls_layer': shift_cls_layer, + 'joint_distribution_layer': joint_distribution_layer, + 'linear': linear, + } + + +class CSINet(nn.Module): + def __init__(self, + backbone, + feature_size, + num_classes=10, + simclr_dim=128, + shift_trans_type='rotation'): + super(CSINet, self).__init__() + self.backbone = backbone + if hasattr(self.backbone, 'fc'): + # remove fc otherwise ddp will + # report unused params + self.backbone.fc = nn.Identity() + + self.linear = nn.Linear(feature_size, num_classes) + self.simclr_layer = nn.Sequential( + nn.Linear(feature_size, feature_size), + nn.ReLU(), + nn.Linear(feature_size, simclr_dim), + ) + self.feature_size = feature_size + self.joint_distribution_layer = nn.Linear(feature_size, + 4 * num_classes) + + self.K_shift = get_shift_module(shift_trans_type) + self.shift_cls_layer = nn.Linear(feature_size, self.K_shift) + + def forward(self, + inputs, + penultimate=False, + simclr=False, + shift=False, + joint=False): + _aux = {} + _return_aux = False + + _, features = self.backbone(inputs, return_feature=True) + + output = self.linear(features) + + if penultimate: + _return_aux = True + _aux['penultimate'] = features + + if simclr: + _return_aux = True + _aux['simclr'] = self.simclr_layer(features) + + if shift: + _return_aux = True + _aux['shift'] = self.shift_cls_layer(features) + + if joint: + _return_aux = True + _aux['joint'] = self.joint_distribution_layer(features) + + if _return_aux: + return output, _aux + + return output + + +def get_shift_module(shift_trans_type): + + if shift_trans_type == 'rotation': + K_shift = 4 + elif shift_trans_type == 'cutperm': + K_shift = 4 + else: + K_shift = 1 + + return K_shift diff --git a/OpenOOD/openood/networks/de_resnet18_256x256.py b/OpenOOD/openood/networks/de_resnet18_256x256.py new file mode 100644 index 0000000000000000000000000000000000000000..6b3d3a22c233669e255672a829fb6369c0a88cb0 --- /dev/null +++ b/OpenOOD/openood/networks/de_resnet18_256x256.py @@ -0,0 +1,234 @@ +import torch +import torch.nn as nn +from torch import Tensor + + +class BasicBlock(nn.Module): + expansion = 1 + + def __init__(self, in_planes, planes, stride=1, upsample=None): + super(BasicBlock, self).__init__() + self.stride = stride + if self.stride == 2: + self.conv1 = nn.ConvTranspose2d(in_planes, + planes, + kernel_size=2, + stride=stride, + bias=False) + else: + self.conv1 = nn.Conv2d(in_planes, + planes, + kernel_size=3, + stride=stride, + padding=1, + bias=False) + self.upsample = upsample + self.bn1 = nn.BatchNorm2d(planes) + self.conv2 = nn.Conv2d(planes, + planes, + kernel_size=3, + stride=1, + padding=1, + bias=False) + self.bn2 = nn.BatchNorm2d(planes) + self.relu = nn.ReLU(inplace=True) + + def forward(self, x): + identity = x + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + if self.upsample is not None: + identity = self.upsample(x) + out += identity + out = self.relu(out) + + return out + + +class De_ResNet18_256x256(nn.Module): + def __init__(self, block=BasicBlock, num_blocks=None, num_classes=10): + super(De_ResNet18_256x256, self).__init__() + self._norm_layer = nn.BatchNorm2d + if num_blocks is None: + num_blocks = [2, 2, 2, 2] + self.inplanes = 512 * block.expansion + self.layer1 = self._make_layer(block, 256, num_blocks[0], stride=2) + self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2) + self.layer3 = self._make_layer(block, 64, num_blocks[2], stride=2) + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, + mode='fan_out', + nonlinearity='relu') + elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + + def _make_layer(self, block, planes, blocks, stride): + norm_layer = self._norm_layer + upsample = None + if stride != 1 or self.inplanes != planes * block.expansion: + upsample = nn.Sequential( + nn.ConvTranspose2d(self.inplanes, + planes * block.expansion, + kernel_size=2, + stride=stride, + bias=False), + norm_layer(planes * block.expansion), + ) + layers = [] + layers.append(block(self.inplanes, planes, stride, upsample)) + + self.inplanes = planes * block.expansion + for _ in range(1, blocks): + layers.append(block(self.inplanes, planes)) + + return nn.Sequential(*layers) + + def forward(self, x): + feature_a = self.layer1(x) # 512*8*8->256*16*16 + feature_b = self.layer2(feature_a) # 256*16*16->128*32*32 + feature_c = self.layer3(feature_b) # 128*32*32->64*64*64 + return [feature_c, feature_b, feature_a] + + +class AttnBasicBlock(nn.Module): + expansion: int = 1 + + def __init__(self, + inplanes: int, + planes: int, + stride: int = 1, + downsample=None) -> None: + super(AttnBasicBlock, self).__init__() + + norm_layer = nn.BatchNorm2d + self.conv1 = nn.Conv2d(inplanes, + planes, + kernel_size=3, + stride=stride, + padding=1, + bias=False) + self.bn1 = norm_layer(planes) + self.relu = nn.ReLU(inplace=True) + self.conv2 = nn.Conv2d(planes, + planes, + kernel_size=3, + stride=1, + padding=1, + bias=False) + self.bn2 = norm_layer(planes) + self.stride = stride + self.downsample = downsample + + def forward(self, x: Tensor) -> Tensor: + identity = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + if self.downsample is not None: + identity = self.downsample(x) + out += identity + out = self.relu(out) + + return out + + +class BN_layer(nn.Module): + def __init__( + self, + block, + layers: int, + width_per_group: int = 64, + ): + super(BN_layer, self).__init__() + + self._norm_layer = nn.BatchNorm2d + self.base_width = width_per_group + self.inplanes = 256 * block.expansion + self.bn_layer = self._make_layer(block, 512, layers, stride=2) + + self.conv1 = nn.Conv2d(64 * block.expansion, + 128 * block.expansion, + kernel_size=3, + stride=2, + padding=1, + bias=False) + self.bn1 = self._norm_layer(128 * block.expansion) + self.relu = nn.ReLU(inplace=True) + self.conv2 = nn.Conv2d(128 * block.expansion, + 256 * block.expansion, + kernel_size=3, + stride=2, + padding=1, + bias=False) + self.bn2 = self._norm_layer(256 * block.expansion) + self.conv3 = nn.Conv2d(128 * block.expansion, + 256 * block.expansion, + kernel_size=3, + stride=2, + padding=1, + bias=False) + self.bn3 = self._norm_layer(256 * block.expansion) + + self.conv4 = nn.Conv2d(1024 * block.expansion, + 512 * block.expansion, + kernel_size=1, + stride=1, + bias=False) + self.bn4 = self._norm_layer(512 * block.expansion) + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, + mode='fan_out', + nonlinearity='relu') + elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + + def _make_layer( + self, + block, + planes: int, + blocks: int, + stride: int = 1, + ) -> nn.Sequential: + norm_layer = self._norm_layer + downsample = None + + if stride != 1 or self.inplanes != planes * block.expansion: + downsample = nn.Sequential( + nn.Conv2d(self.inplanes * 3, + planes * block.expansion, + kernel_size=1, + stride=stride, + bias=False), + norm_layer(planes * block.expansion), + ) + + layers = [] + layers.append(block(self.inplanes * 3, planes, stride, downsample)) + self.inplanes = planes * block.expansion + for _ in range(1, blocks): + layers.append(block(self.inplanes, planes)) + + return nn.Sequential(*layers) + + def forward(self, x: Tensor) -> Tensor: + l1 = self.relu( + self.bn2(self.conv2(self.relu(self.bn1(self.conv1(x[0])))))) + l2 = self.relu(self.bn3(self.conv3(x[1]))) + feature = torch.cat([l1, l2, x[2]], 1) + output = self.bn_layer(feature) + + return output.contiguous() diff --git a/OpenOOD/openood/networks/densenet.py b/OpenOOD/openood/networks/densenet.py new file mode 100644 index 0000000000000000000000000000000000000000..26792eba7e5a29df2cb9103bb42dbf05da368c57 --- /dev/null +++ b/OpenOOD/openood/networks/densenet.py @@ -0,0 +1,181 @@ +import math + +import torch +import torch.nn as nn +import torch.nn.functional as F + + +class BasicBlock(nn.Module): + def __init__(self, in_planes, out_planes, dropRate=0.0): + super(BasicBlock, self).__init__() + self.bn1 = nn.BatchNorm2d(in_planes) + self.relu = nn.ReLU(inplace=True) + self.conv1 = nn.Conv2d(in_planes, + out_planes, + kernel_size=3, + stride=1, + padding=1, + bias=False) + self.droprate = dropRate + + def forward(self, x): + out = self.conv1(self.relu(self.bn1(x))) + if self.droprate > 0: + out = F.dropout(out, p=self.droprate, training=self.training) + return torch.cat([x, out], 1) + + +class BottleneckBlock(nn.Module): + def __init__(self, in_planes, out_planes, dropRate=0.0): + super(BottleneckBlock, self).__init__() + inter_planes = out_planes * 4 + self.bn1 = nn.BatchNorm2d(in_planes) + self.relu = nn.ReLU(inplace=True) + self.conv1 = nn.Conv2d(in_planes, + inter_planes, + kernel_size=1, + stride=1, + padding=0, + bias=False) + self.bn2 = nn.BatchNorm2d(inter_planes) + self.conv2 = nn.Conv2d(inter_planes, + out_planes, + kernel_size=3, + stride=1, + padding=1, + bias=False) + self.droprate = dropRate + + def forward(self, x): + out = self.conv1(self.relu(self.bn1(x))) + if self.droprate > 0: + out = F.dropout(out, + p=self.droprate, + inplace=False, + training=self.training) + out = self.conv2(self.relu(self.bn2(out))) + if self.droprate > 0: + out = F.dropout(out, + p=self.droprate, + inplace=False, + training=self.training) + return torch.cat([x, out], 1) + + +class TransitionBlock(nn.Module): + def __init__(self, in_planes, out_planes, dropRate=0.0): + super(TransitionBlock, self).__init__() + self.bn1 = nn.BatchNorm2d(in_planes) + self.relu = nn.ReLU(inplace=True) + self.conv1 = nn.Conv2d(in_planes, + out_planes, + kernel_size=1, + stride=1, + padding=0, + bias=False) + self.droprate = dropRate + + def forward(self, x): + out = self.conv1(self.relu(self.bn1(x))) + if self.droprate > 0: + out = F.dropout(out, + p=self.droprate, + inplace=False, + training=self.training) + return F.avg_pool2d(out, 2) + + +class DenseBlock(nn.Module): + def __init__(self, nb_layers, in_planes, growth_rate, block, dropRate=0.0): + super(DenseBlock, self).__init__() + self.layer = self._make_layer(block, in_planes, growth_rate, nb_layers, + dropRate) + + def _make_layer(self, block, in_planes, growth_rate, nb_layers, dropRate): + layers = [] + for i in range(nb_layers): + layers.append( + block(in_planes + i * growth_rate, growth_rate, dropRate)) + return nn.Sequential(*layers) + + def forward(self, x): + return self.layer(x) + + +class DenseNet3(nn.Module): + def __init__(self, + depth=100, + growth_rate=12, + reduction=0.5, + bottleneck=True, + dropRate=0.0, + num_classes=10): + super(DenseNet3, self).__init__() + in_planes = 2 * growth_rate + n = (depth - 4) / 3 + if bottleneck == True: + n = n / 2 + block = BottleneckBlock + else: + block = BasicBlock + n = int(n) + # 1st conv before any dense block + self.conv1 = nn.Conv2d(3, + in_planes, + kernel_size=3, + stride=1, + padding=1, + bias=False) + # 1st block + self.block1 = DenseBlock(n, in_planes, growth_rate, block, dropRate) + in_planes = int(in_planes + n * growth_rate) + self.trans1 = TransitionBlock(in_planes, + int(math.floor(in_planes * reduction)), + dropRate=dropRate) + in_planes = int(math.floor(in_planes * reduction)) + # 2nd block + self.block2 = DenseBlock(n, in_planes, growth_rate, block, dropRate) + in_planes = int(in_planes + n * growth_rate) + self.trans2 = TransitionBlock(in_planes, + int(math.floor(in_planes * reduction)), + dropRate=dropRate) + in_planes = int(math.floor(in_planes * reduction)) + # 3rd block + self.block3 = DenseBlock(n, in_planes, growth_rate, block, dropRate) + in_planes = int(in_planes + n * growth_rate) + # global average pooling and classifier + self.bn1 = nn.BatchNorm2d(in_planes) + self.relu = nn.ReLU(inplace=True) + self.fc = nn.Linear(in_planes, num_classes) + self.in_planes = in_planes + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels + m.weight.data.normal_(0, math.sqrt(2. / n)) + elif isinstance(m, nn.BatchNorm2d): + m.weight.data.fill_(1) + m.bias.data.zero_() + elif isinstance(m, nn.Linear): + m.bias.data.zero_() + + def forward(self, x, return_feature=False): + feature1 = self.conv1(x) + feature2 = self.trans1(self.block1(feature1)) + feature3 = self.trans2(self.block2(feature2)) + feature4 = self.block3(feature3) + feature5 = self.relu(self.bn1(feature4)) + out = F.avg_pool2d(feature5, 8) + feature = out.view(-1, self.in_planes) + logits_cls = self.fc(feature) + feature_list = [ + feature, feature1, feature2, feature3, feature4, feature5 + ] + if return_feature: + return logits_cls, feature_list + else: + return logits_cls + + def get_fc(self): + fc = self.fc + return fc.weight.cpu().detach().numpy(), fc.bias.cpu().detach().numpy() diff --git a/OpenOOD/openood/networks/dinov2.py b/OpenOOD/openood/networks/dinov2.py new file mode 100644 index 0000000000000000000000000000000000000000..ef5204062bfc22002443d8c417cda08a1237ca85 --- /dev/null +++ b/OpenOOD/openood/networks/dinov2.py @@ -0,0 +1,37 @@ +import torch +import torch.nn as nn + + +class DINOv2Wrapper(nn.Module): + def __init__(self, model): + super().__init__() + self.model = model + + def forward(self, x): + return self.model(x) + + def forward_threshold(self, x, threshold): + # Reshape and permute the input tensor + x = self._process_input(x) + n = x.shape[0] + + # Expand the class token to the full batch + batch_class_token = self.class_token.expand(n, -1, -1) + x = torch.cat([batch_class_token, x], dim=1) + + x = self.encoder(x) + + # Classifier "token" as used by standard language architectures + x = x[:, 0] + + feature = x.clip(max=threshold) + logits_cls = self.heads(feature) + + return logits_cls + + def get_fc(self): + fc = self.heads[0] + return fc.weight.cpu().detach().numpy(), fc.bias.cpu().detach().numpy() + + def get_fc_layer(self): + return self.heads[0] diff --git a/OpenOOD/openood/networks/draem_net.py b/OpenOOD/openood/networks/draem_net.py new file mode 100644 index 0000000000000000000000000000000000000000..3f38300b7aa8c50aa691a2d6b7ab19f3df2630b1 --- /dev/null +++ b/OpenOOD/openood/networks/draem_net.py @@ -0,0 +1,331 @@ +import torch +import torch.nn as nn + + +class ReconstructiveSubNetwork(nn.Module): + def __init__(self, in_channels=3, out_channels=3, base_width=128): + super(ReconstructiveSubNetwork, self).__init__() + self.encoder = EncoderReconstructive(in_channels, base_width) + self.decoder = DecoderReconstructive(base_width, + out_channels=out_channels) + + def forward(self, x): + b5 = self.encoder(x) + output = self.decoder(b5) + return output + + +class DiscriminativeSubNetwork(nn.Module): + def __init__(self, + in_channels=3, + out_channels=3, + base_channels=64, + out_features=False): + super(DiscriminativeSubNetwork, self).__init__() + base_width = base_channels + self.encoder_segment = EncoderDiscriminative(in_channels, base_width) + self.decoder_segment = DecoderDiscriminative(base_width, + out_channels=out_channels) + # self.segment_act = torch.nn.Sigmoid() + self.out_features = out_features + + def forward(self, x): + b1, b2, b3, b4, b5, b6 = self.encoder_segment(x) + output_segment = self.decoder_segment(b1, b2, b3, b4, b5, b6) + if self.out_features: + return output_segment, b2, b3, b4, b5, b6 + else: + return output_segment + + +class EncoderDiscriminative(nn.Module): + def __init__(self, in_channels, base_width): + super(EncoderDiscriminative, self).__init__() + self.block1 = nn.Sequential( + nn.Conv2d(in_channels, base_width, kernel_size=3, padding=1), + nn.BatchNorm2d(base_width), nn.ReLU(inplace=True), + nn.Conv2d(base_width, base_width, kernel_size=3, padding=1), + nn.BatchNorm2d(base_width), nn.ReLU(inplace=True)) + self.mp1 = nn.Sequential(nn.MaxPool2d(2)) + self.block2 = nn.Sequential( + nn.Conv2d(base_width, base_width * 2, kernel_size=3, padding=1), + nn.BatchNorm2d(base_width * 2), nn.ReLU(inplace=True), + nn.Conv2d(base_width * 2, base_width * 2, kernel_size=3, + padding=1), nn.BatchNorm2d(base_width * 2), + nn.ReLU(inplace=True)) + self.mp2 = nn.Sequential(nn.MaxPool2d(2)) + self.block3 = nn.Sequential( + nn.Conv2d(base_width * 2, base_width * 4, kernel_size=3, + padding=1), nn.BatchNorm2d(base_width * 4), + nn.ReLU(inplace=True), + nn.Conv2d(base_width * 4, base_width * 4, kernel_size=3, + padding=1), nn.BatchNorm2d(base_width * 4), + nn.ReLU(inplace=True)) + self.mp3 = nn.Sequential(nn.MaxPool2d(2)) + self.block4 = nn.Sequential( + nn.Conv2d(base_width * 4, base_width * 8, kernel_size=3, + padding=1), nn.BatchNorm2d(base_width * 8), + nn.ReLU(inplace=True), + nn.Conv2d(base_width * 8, base_width * 8, kernel_size=3, + padding=1), nn.BatchNorm2d(base_width * 8), + nn.ReLU(inplace=True)) + self.mp4 = nn.Sequential(nn.MaxPool2d(2)) + self.block5 = nn.Sequential( + nn.Conv2d(base_width * 8, base_width * 8, kernel_size=3, + padding=1), nn.BatchNorm2d(base_width * 8), + nn.ReLU(inplace=True), + nn.Conv2d(base_width * 8, base_width * 8, kernel_size=3, + padding=1), nn.BatchNorm2d(base_width * 8), + nn.ReLU(inplace=True)) + + self.mp5 = nn.Sequential(nn.MaxPool2d(2)) + self.block6 = nn.Sequential( + nn.Conv2d(base_width * 8, base_width * 8, kernel_size=3, + padding=1), nn.BatchNorm2d(base_width * 8), + nn.ReLU(inplace=True), + nn.Conv2d(base_width * 8, base_width * 8, kernel_size=3, + padding=1), nn.BatchNorm2d(base_width * 8), + nn.ReLU(inplace=True)) + + def forward(self, x): + b1 = self.block1(x) + mp1 = self.mp1(b1) + b2 = self.block2(mp1) + mp2 = self.mp3(b2) + b3 = self.block3(mp2) + mp3 = self.mp3(b3) + b4 = self.block4(mp3) + mp4 = self.mp4(b4) + b5 = self.block5(mp4) + mp5 = self.mp5(b5) + b6 = self.block6(mp5) + return b1, b2, b3, b4, b5, b6 + + +class DecoderDiscriminative(nn.Module): + def __init__(self, base_width, out_channels=1): + super(DecoderDiscriminative, self).__init__() + + self.up_b = nn.Sequential( + nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True), + nn.Conv2d(base_width * 8, base_width * 8, kernel_size=3, + padding=1), nn.BatchNorm2d(base_width * 8), + nn.ReLU(inplace=True)) + self.db_b = nn.Sequential( + nn.Conv2d(base_width * (8 + 8), + base_width * 8, + kernel_size=3, + padding=1), nn.BatchNorm2d(base_width * 8), + nn.ReLU(inplace=True), + nn.Conv2d(base_width * 8, base_width * 8, kernel_size=3, + padding=1), nn.BatchNorm2d(base_width * 8), + nn.ReLU(inplace=True)) + + self.up1 = nn.Sequential( + nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True), + nn.Conv2d(base_width * 8, base_width * 4, kernel_size=3, + padding=1), nn.BatchNorm2d(base_width * 4), + nn.ReLU(inplace=True)) + self.db1 = nn.Sequential( + nn.Conv2d(base_width * (4 + 8), + base_width * 4, + kernel_size=3, + padding=1), nn.BatchNorm2d(base_width * 4), + nn.ReLU(inplace=True), + nn.Conv2d(base_width * 4, base_width * 4, kernel_size=3, + padding=1), nn.BatchNorm2d(base_width * 4), + nn.ReLU(inplace=True)) + + self.up2 = nn.Sequential( + nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True), + nn.Conv2d(base_width * 4, base_width * 2, kernel_size=3, + padding=1), nn.BatchNorm2d(base_width * 2), + nn.ReLU(inplace=True)) + self.db2 = nn.Sequential( + nn.Conv2d(base_width * (2 + 4), + base_width * 2, + kernel_size=3, + padding=1), nn.BatchNorm2d(base_width * 2), + nn.ReLU(inplace=True), + nn.Conv2d(base_width * 2, base_width * 2, kernel_size=3, + padding=1), nn.BatchNorm2d(base_width * 2), + nn.ReLU(inplace=True)) + + self.up3 = nn.Sequential( + nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True), + nn.Conv2d(base_width * 2, base_width, kernel_size=3, padding=1), + nn.BatchNorm2d(base_width), nn.ReLU(inplace=True)) + self.db3 = nn.Sequential( + nn.Conv2d(base_width * (2 + 1), + base_width, + kernel_size=3, + padding=1), nn.BatchNorm2d(base_width), + nn.ReLU(inplace=True), + nn.Conv2d(base_width, base_width, kernel_size=3, padding=1), + nn.BatchNorm2d(base_width), nn.ReLU(inplace=True)) + + self.up4 = nn.Sequential( + nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True), + nn.Conv2d(base_width, base_width, kernel_size=3, padding=1), + nn.BatchNorm2d(base_width), nn.ReLU(inplace=True)) + self.db4 = nn.Sequential( + nn.Conv2d(base_width * 2, base_width, kernel_size=3, padding=1), + nn.BatchNorm2d(base_width), nn.ReLU(inplace=True), + nn.Conv2d(base_width, base_width, kernel_size=3, padding=1), + nn.BatchNorm2d(base_width), nn.ReLU(inplace=True)) + + self.fin_out = nn.Sequential( + nn.Conv2d(base_width, out_channels, kernel_size=3, padding=1)) + + def forward(self, b1, b2, b3, b4, b5, b6): + up_b = self.up_b(b6) + cat_b = torch.cat((up_b, b5), dim=1) + db_b = self.db_b(cat_b) + + up1 = self.up1(db_b) + cat1 = torch.cat((up1, b4), dim=1) + db1 = self.db1(cat1) + + up2 = self.up2(db1) + cat2 = torch.cat((up2, b3), dim=1) + db2 = self.db2(cat2) + + up3 = self.up3(db2) + cat3 = torch.cat((up3, b2), dim=1) + db3 = self.db3(cat3) + + up4 = self.up4(db3) + cat4 = torch.cat((up4, b1), dim=1) + db4 = self.db4(cat4) + + out = self.fin_out(db4) + return out + + +class EncoderReconstructive(nn.Module): + def __init__(self, in_channels, base_width): + super(EncoderReconstructive, self).__init__() + self.block1 = nn.Sequential( + nn.Conv2d(in_channels, base_width, kernel_size=3, padding=1), + nn.BatchNorm2d(base_width), nn.ReLU(inplace=True), + nn.Conv2d(base_width, base_width, kernel_size=3, padding=1), + nn.BatchNorm2d(base_width), nn.ReLU(inplace=True)) + self.mp1 = nn.Sequential(nn.MaxPool2d(2)) + self.block2 = nn.Sequential( + nn.Conv2d(base_width, base_width * 2, kernel_size=3, padding=1), + nn.BatchNorm2d(base_width * 2), nn.ReLU(inplace=True), + nn.Conv2d(base_width * 2, base_width * 2, kernel_size=3, + padding=1), nn.BatchNorm2d(base_width * 2), + nn.ReLU(inplace=True)) + self.mp2 = nn.Sequential(nn.MaxPool2d(2)) + self.block3 = nn.Sequential( + nn.Conv2d(base_width * 2, base_width * 4, kernel_size=3, + padding=1), nn.BatchNorm2d(base_width * 4), + nn.ReLU(inplace=True), + nn.Conv2d(base_width * 4, base_width * 4, kernel_size=3, + padding=1), nn.BatchNorm2d(base_width * 4), + nn.ReLU(inplace=True)) + self.mp3 = nn.Sequential(nn.MaxPool2d(2)) + self.block4 = nn.Sequential( + nn.Conv2d(base_width * 4, base_width * 8, kernel_size=3, + padding=1), nn.BatchNorm2d(base_width * 8), + nn.ReLU(inplace=True), + nn.Conv2d(base_width * 8, base_width * 8, kernel_size=3, + padding=1), nn.BatchNorm2d(base_width * 8), + nn.ReLU(inplace=True)) + self.mp4 = nn.Sequential(nn.MaxPool2d(2)) + self.block5 = nn.Sequential( + nn.Conv2d(base_width * 8, base_width * 8, kernel_size=3, + padding=1), nn.BatchNorm2d(base_width * 8), + nn.ReLU(inplace=True), + nn.Conv2d(base_width * 8, base_width * 8, kernel_size=3, + padding=1), nn.BatchNorm2d(base_width * 8), + nn.ReLU(inplace=True)) + + def forward(self, x): + b1 = self.block1(x) + mp1 = self.mp1(b1) + b2 = self.block2(mp1) + mp2 = self.mp3(b2) + b3 = self.block3(mp2) + mp3 = self.mp3(b3) + b4 = self.block4(mp3) + mp4 = self.mp4(b4) + b5 = self.block5(mp4) + return b5 + + +class DecoderReconstructive(nn.Module): + def __init__(self, base_width, out_channels=1): + super(DecoderReconstructive, self).__init__() + + self.up1 = nn.Sequential( + nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True), + nn.Conv2d(base_width * 8, base_width * 8, kernel_size=3, + padding=1), nn.BatchNorm2d(base_width * 8), + nn.ReLU(inplace=True)) + self.db1 = nn.Sequential( + nn.Conv2d(base_width * 8, base_width * 8, kernel_size=3, + padding=1), nn.BatchNorm2d(base_width * 8), + nn.ReLU(inplace=True), + nn.Conv2d(base_width * 8, base_width * 4, kernel_size=3, + padding=1), nn.BatchNorm2d(base_width * 4), + nn.ReLU(inplace=True)) + + self.up2 = nn.Sequential( + nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True), + nn.Conv2d(base_width * 4, base_width * 4, kernel_size=3, + padding=1), nn.BatchNorm2d(base_width * 4), + nn.ReLU(inplace=True)) + self.db2 = nn.Sequential( + nn.Conv2d(base_width * 4, base_width * 4, kernel_size=3, + padding=1), nn.BatchNorm2d(base_width * 4), + nn.ReLU(inplace=True), + nn.Conv2d(base_width * 4, base_width * 2, kernel_size=3, + padding=1), nn.BatchNorm2d(base_width * 2), + nn.ReLU(inplace=True)) + + self.up3 = nn.Sequential( + nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True), + nn.Conv2d(base_width * 2, base_width * 2, kernel_size=3, + padding=1), nn.BatchNorm2d(base_width * 2), + nn.ReLU(inplace=True)) + # cat with base*1 + self.db3 = nn.Sequential( + nn.Conv2d(base_width * 2, base_width * 2, kernel_size=3, + padding=1), nn.BatchNorm2d(base_width * 2), + nn.ReLU(inplace=True), + nn.Conv2d(base_width * 2, base_width * 1, kernel_size=3, + padding=1), nn.BatchNorm2d(base_width * 1), + nn.ReLU(inplace=True)) + + self.up4 = nn.Sequential( + nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True), + nn.Conv2d(base_width, base_width, kernel_size=3, padding=1), + nn.BatchNorm2d(base_width), nn.ReLU(inplace=True)) + self.db4 = nn.Sequential( + nn.Conv2d(base_width * 1, base_width, kernel_size=3, padding=1), + nn.BatchNorm2d(base_width), nn.ReLU(inplace=True), + nn.Conv2d(base_width, base_width, kernel_size=3, padding=1), + nn.BatchNorm2d(base_width), nn.ReLU(inplace=True)) + + self.fin_out = nn.Sequential( + nn.Conv2d(base_width, out_channels, kernel_size=3, padding=1)) + # self.fin_out = nn.Conv2d( + # base_width, out_channels, kernel_size=3, adding=1) + + def forward(self, b5): + up1 = self.up1(b5) + db1 = self.db1(up1) + + up2 = self.up2(db1) + db2 = self.db2(up2) + + up3 = self.up3(db2) + db3 = self.db3(up3) + + up4 = self.up4(db3) + db4 = self.db4(up4) + + out = self.fin_out(db4) + return out diff --git a/OpenOOD/openood/networks/dropout_net.py b/OpenOOD/openood/networks/dropout_net.py new file mode 100644 index 0000000000000000000000000000000000000000..2a9eefc9c3ac9856696ae9119d598c6ea694c514 --- /dev/null +++ b/OpenOOD/openood/networks/dropout_net.py @@ -0,0 +1,22 @@ +import torch.nn as nn +import torch.nn.functional as F + + +class DropoutNet(nn.Module): + def __init__(self, backbone, dropout_p): + super(DropoutNet, self).__init__() + self.backbone = backbone + self.dropout_p = dropout_p + + def forward(self, x, use_dropout=True): + if use_dropout: + return self.forward_with_dropout(x) + else: + return self.backbone(x) + + def forward_with_dropout(self, x): + _, feature = self.backbone(x, return_feature=True) + feature = F.dropout2d(feature, self.dropout_p, training=True) + logits_cls = self.backbone.fc(feature) + + return logits_cls diff --git a/OpenOOD/openood/networks/dsvdd_net.py b/OpenOOD/openood/networks/dsvdd_net.py new file mode 100644 index 0000000000000000000000000000000000000000..85dd70983ed7fd7a4a25673707d20dc8098bcf4e --- /dev/null +++ b/OpenOOD/openood/networks/dsvdd_net.py @@ -0,0 +1,275 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + + +class MNIST_LeNet(nn.Module): + def __init__(self): + super().__init__() + + self.rep_dim = 32 + self.pool = nn.MaxPool2d(2, 2) + + self.conv1 = nn.Conv2d(1, 8, 5, bias=False, padding=2) + self.bn1 = nn.BatchNorm2d(8, eps=1e-04, affine=False) + self.conv2 = nn.Conv2d(8, 4, 5, bias=False, padding=2) + self.bn2 = nn.BatchNorm2d(4, eps=1e-04, affine=False) + self.fc1 = nn.Linear(4 * 7 * 7, self.rep_dim, bias=False) + + def forward(self, x): + x = self.conv1(x) + x = self.pool(F.leaky_relu(self.bn1(x))) + x = self.conv2(x) + x = self.pool(F.leaky_relu(self.bn2(x))) + x = x.view(x.size(0), -1) + x = self.fc1(x) + return x + + +class CIFAR10_LeNet(nn.Module): + def __init__(self): + super().__init__() + + self.rep_dim = 128 + self.pool = nn.MaxPool2d(2, 2) + + self.conv1 = nn.Conv2d(3, 32, 5, bias=False, padding=2) + self.bn2d1 = nn.BatchNorm2d(32, eps=1e-04, affine=False) + self.conv2 = nn.Conv2d(32, 64, 5, bias=False, padding=2) + self.bn2d2 = nn.BatchNorm2d(64, eps=1e-04, affine=False) + self.conv3 = nn.Conv2d(64, 128, 5, bias=False, padding=2) + self.bn2d3 = nn.BatchNorm2d(128, eps=1e-04, affine=False) + self.fc1 = nn.Linear(128 * 4 * 4, self.rep_dim, bias=False) + + def forward(self, x): + x = self.conv1(x) + x = self.pool(F.leaky_relu(self.bn2d1(x))) + x = self.conv2(x) + x = self.pool(F.leaky_relu(self.bn2d2(x))) + x = self.conv3(x) + x = self.pool(F.leaky_relu(self.bn2d3(x))) + x = x.view(x.size(0), -1) + x = self.fc1(x) + return x + + +class CIFAR10_LeNet_ELU(nn.Module): + def __init__(self): + super().__init__() + + self.rep_dim = 128 + self.pool = nn.MaxPool2d(2, 2) + + self.conv1 = nn.Conv2d(3, 32, 5, bias=False, padding=2) + self.bn2d1 = nn.BatchNorm2d(32, eps=1e-04, affine=False) + self.conv2 = nn.Conv2d(32, 64, 5, bias=False, padding=2) + self.bn2d2 = nn.BatchNorm2d(64, eps=1e-04, affine=False) + self.conv3 = nn.Conv2d(64, 128, 5, bias=False, padding=2) + self.bn2d3 = nn.BatchNorm2d(128, eps=1e-04, affine=False) + self.fc1 = nn.Linear(128 * 4 * 4, self.rep_dim, bias=False) + + def forward(self, x): + x = self.conv1(x) + x = self.pool(F.elu(self.bn2d1(x))) + x = self.conv2(x) + x = self.pool(F.elu(self.bn2d2(x))) + x = self.conv3(x) + x = self.pool(F.elu(self.bn2d3(x))) + x = x.view(x.size(0), -1) + x = self.fc1(x) + return x + + +def build_network(net_type): + net = None + + if net_type == 'mnist_LeNet': + net = MNIST_LeNet() + + if net_type == 'cifar10_LeNet': + net = CIFAR10_LeNet() + + if net_type == 'cifar10_LeNet_ELU': + net = CIFAR10_LeNet_ELU() + + return net + + +class MNIST_LeNet_Autoencoder(nn.Module): + def __init__(self): + super().__init__() + + self.rep_dim = 32 + self.pool = nn.MaxPool2d(2, 2) + + # Encoder (must match the Deep SVDD network above) + self.conv1 = nn.Conv2d(1, 8, 5, bias=False, padding=2) + self.bn1 = nn.BatchNorm2d(8, eps=1e-04, affine=False) + self.conv2 = nn.Conv2d(8, 4, 5, bias=False, padding=2) + self.bn2 = nn.BatchNorm2d(4, eps=1e-04, affine=False) + self.fc1 = nn.Linear(4 * 7 * 7, self.rep_dim, bias=False) + + # Decoder + self.deconv1 = nn.ConvTranspose2d(2, 4, 5, bias=False, padding=2) + self.bn3 = nn.BatchNorm2d(4, eps=1e-04, affine=False) + self.deconv2 = nn.ConvTranspose2d(4, 8, 5, bias=False, padding=3) + self.bn4 = nn.BatchNorm2d(8, eps=1e-04, affine=False) + self.deconv3 = nn.ConvTranspose2d(8, 1, 5, bias=False, padding=2) + + def forward(self, x): + x = self.conv1(x) + x = self.pool(F.leaky_relu(self.bn1(x))) + x = self.conv2(x) + x = self.pool(F.leaky_relu(self.bn2(x))) + x = x.view(x.size(0), -1) + x = self.fc1(x) + x = x.view(x.size(0), int(self.rep_dim / 16), 4, 4) + x = F.interpolate(F.leaky_relu(x), scale_factor=2) + x = self.deconv1(x) + x = F.interpolate(F.leaky_relu(self.bn3(x)), scale_factor=2) + x = self.deconv2(x) + x = F.interpolate(F.leaky_relu(self.bn4(x)), scale_factor=2) + x = self.deconv3(x) + x = torch.sigmoid(x) + + return x + + +class CIFAR10_LeNet_Autoencoder(nn.Module): + def __init__(self): + super().__init__() + + self.rep_dim = 128 + self.pool = nn.MaxPool2d(2, 2) + + # Encoder (must match the Deep SVDD network above) + self.conv1 = nn.Conv2d(3, 32, 5, bias=False, padding=2) + nn.init.xavier_uniform_(self.conv1.weight, + gain=nn.init.calculate_gain('leaky_relu')) + self.bn2d1 = nn.BatchNorm2d(32, eps=1e-04, affine=False) + self.conv2 = nn.Conv2d(32, 64, 5, bias=False, padding=2) + nn.init.xavier_uniform_(self.conv2.weight, + gain=nn.init.calculate_gain('leaky_relu')) + self.bn2d2 = nn.BatchNorm2d(64, eps=1e-04, affine=False) + self.conv3 = nn.Conv2d(64, 128, 5, bias=False, padding=2) + nn.init.xavier_uniform_(self.conv3.weight, + gain=nn.init.calculate_gain('leaky_relu')) + self.bn2d3 = nn.BatchNorm2d(128, eps=1e-04, affine=False) + self.fc1 = nn.Linear(128 * 4 * 4, self.rep_dim, bias=False) + self.bn1d = nn.BatchNorm1d(self.rep_dim, eps=1e-04, affine=False) + + # Decoder + self.deconv1 = nn.ConvTranspose2d(int(self.rep_dim / (4 * 4)), + 128, + 5, + bias=False, + padding=2) + nn.init.xavier_uniform_(self.deconv1.weight, + gain=nn.init.calculate_gain('leaky_relu')) + self.bn2d4 = nn.BatchNorm2d(128, eps=1e-04, affine=False) + self.deconv2 = nn.ConvTranspose2d(128, 64, 5, bias=False, padding=2) + nn.init.xavier_uniform_(self.deconv2.weight, + gain=nn.init.calculate_gain('leaky_relu')) + self.bn2d5 = nn.BatchNorm2d(64, eps=1e-04, affine=False) + self.deconv3 = nn.ConvTranspose2d(64, 32, 5, bias=False, padding=2) + nn.init.xavier_uniform_(self.deconv3.weight, + gain=nn.init.calculate_gain('leaky_relu')) + self.bn2d6 = nn.BatchNorm2d(32, eps=1e-04, affine=False) + self.deconv4 = nn.ConvTranspose2d(32, 3, 5, bias=False, padding=2) + nn.init.xavier_uniform_(self.deconv4.weight, + gain=nn.init.calculate_gain('leaky_relu')) + + def forward(self, x): + x = self.conv1(x) + x = self.pool(F.leaky_relu(self.bn2d1(x))) + x = self.conv2(x) + x = self.pool(F.leaky_relu(self.bn2d2(x))) + x = self.conv3(x) + x = self.pool(F.leaky_relu(self.bn2d3(x))) + x = x.view(x.size(0), -1) + x = self.bn1d(self.fc1(x)) + x = x.view(x.size(0), int(self.rep_dim / (4 * 4)), 4, 4) + x = F.leaky_relu(x) + x = self.deconv1(x) + x = F.interpolate(F.leaky_relu(self.bn2d4(x)), scale_factor=2) + x = self.deconv2(x) + x = F.interpolate(F.leaky_relu(self.bn2d5(x)), scale_factor=2) + x = self.deconv3(x) + x = F.interpolate(F.leaky_relu(self.bn2d6(x)), scale_factor=2) + x = self.deconv4(x) + x = torch.sigmoid(x) + return x + + +class CIFAR10_LeNet_ELU_Autoencoder(nn.Module): + def __init__(self): + super().__init__() + + self.rep_dim = 128 + self.pool = nn.MaxPool2d(2, 2) + + # Encoder (must match the Deep SVDD network above) + self.conv1 = nn.Conv2d(3, 32, 5, bias=False, padding=2) + nn.init.xavier_uniform_(self.conv1.weight) + self.bn2d1 = nn.BatchNorm2d(32, eps=1e-04, affine=False) + self.conv2 = nn.Conv2d(32, 64, 5, bias=False, padding=2) + nn.init.xavier_uniform_(self.conv2.weight) + self.bn2d2 = nn.BatchNorm2d(64, eps=1e-04, affine=False) + self.conv3 = nn.Conv2d(64, 128, 5, bias=False, padding=2) + nn.init.xavier_uniform_(self.conv3.weight) + self.bn2d3 = nn.BatchNorm2d(128, eps=1e-04, affine=False) + self.fc1 = nn.Linear(128 * 4 * 4, self.rep_dim, bias=False) + self.bn1d = nn.BatchNorm1d(self.rep_dim, eps=1e-04, affine=False) + + # Decoder + self.deconv1 = nn.ConvTranspose2d(int(self.rep_dim / (4 * 4)), + 128, + 5, + bias=False, + padding=2) + nn.init.xavier_uniform_(self.deconv1.weight) + self.bn2d4 = nn.BatchNorm2d(128, eps=1e-04, affine=False) + self.deconv2 = nn.ConvTranspose2d(128, 64, 5, bias=False, padding=2) + nn.init.xavier_uniform_(self.deconv2.weight) + self.bn2d5 = nn.BatchNorm2d(64, eps=1e-04, affine=False) + self.deconv3 = nn.ConvTranspose2d(64, 32, 5, bias=False, padding=2) + nn.init.xavier_uniform_(self.deconv3.weight) + self.bn2d6 = nn.BatchNorm2d(32, eps=1e-04, affine=False) + self.deconv4 = nn.ConvTranspose2d(32, 3, 5, bias=False, padding=2) + nn.init.xavier_uniform_(self.deconv4.weight) + + def forward(self, x): + x = self.conv1(x) + x = self.pool(F.elu(self.bn2d1(x))) + x = self.conv2(x) + x = self.pool(F.elu(self.bn2d2(x))) + x = self.conv3(x) + x = self.pool(F.elu(self.bn2d3(x))) + x = x.view(x.size(0), -1) + x = self.bn1d(self.fc1(x)) + x = x.view(x.size(0), int(self.rep_dim / (4 * 4)), 4, 4) + x = F.elu(x) + x = self.deconv1(x) + x = F.interpolate(F.elu(self.bn2d4(x)), scale_factor=2) + x = self.deconv2(x) + x = F.interpolate(F.elu(self.bn2d5(x)), scale_factor=2) + x = self.deconv3(x) + x = F.interpolate(F.elu(self.bn2d6(x)), scale_factor=2) + x = self.deconv4(x) + x = torch.sigmoid(x) + return x + + +def get_Autoencoder(net_type): + ae_net = None + + if net_type == 'mnist_LeNet': + ae_net = MNIST_LeNet_Autoencoder() + + if net_type == 'cifar10_LeNet': + ae_net = CIFAR10_LeNet_Autoencoder() + + if net_type == 'cifar10_LeNet_ELU': + ae_net = CIFAR10_LeNet_ELU_Autoencoder() + + return ae_net diff --git a/OpenOOD/openood/networks/godin_net.py b/OpenOOD/openood/networks/godin_net.py new file mode 100644 index 0000000000000000000000000000000000000000..a6d73da5449fdb5abd80acaee0fb5661ffd3990a --- /dev/null +++ b/OpenOOD/openood/networks/godin_net.py @@ -0,0 +1,115 @@ +import torch +import torch.nn as nn + + +def norm(x): + norm = torch.norm(x, p=2, dim=1) + x = x / (norm.expand(1, -1).t() + .0001) + return x + + +class CosineDeconf(nn.Module): + def __init__(self, in_features, num_classes): + super(CosineDeconf, self).__init__() + + self.h = nn.Linear(in_features, num_classes, bias=False) + self.init_weights() + + def init_weights(self): + nn.init.kaiming_normal_(self.h.weight.data, nonlinearity='relu') + + def forward(self, x): + x = norm(x) + w = norm(self.h.weight) + + ret = (torch.matmul(x, w.T)) + return ret + + +class EuclideanDeconf(nn.Module): + def __init__(self, in_features, num_classes): + super(EuclideanDeconf, self).__init__() + + self.h = nn.Linear(in_features, num_classes, bias=False) + self.init_weights() + + def init_weights(self): + nn.init.kaiming_normal_(self.h.weight.data, nonlinearity='relu') + + def forward(self, x): + + # size: (batch, latent, 1) + x = x.unsqueeze(2) + + # size: (1, latent, num_classes) + h = self.h.weight.T.unsqueeze(0) + ret = -((x - h).pow(2)).mean(1) + return ret + + +class InnerDeconf(nn.Module): + def __init__(self, in_features, num_classes): + super(InnerDeconf, self).__init__() + + self.h = nn.Linear(in_features, num_classes) + self.init_weights() + + def init_weights(self): + nn.init.kaiming_normal_(self.h.weight.data, nonlinearity='relu') + self.h.bias.data = torch.zeros(size=self.h.bias.size()) + + def forward(self, x): + return self.h(x) + + +class GodinNet(nn.Module): + def __init__(self, + backbone, + feature_size, + num_classes, + similarity_measure='cosine'): + super(GodinNet, self).__init__() + + h_dict = { + 'cosine': CosineDeconf, + 'inner': InnerDeconf, + 'euclid': EuclideanDeconf + } + + self.num_classes = num_classes + + self.backbone = backbone + if hasattr(self.backbone, 'fc'): + # remove fc otherwise ddp will + # report unused params + self.backbone.fc = nn.Identity() + + self.h = h_dict[similarity_measure](feature_size, num_classes) + + self.g = nn.Sequential(nn.Linear(feature_size, 1), nn.BatchNorm1d(1), + nn.Sigmoid()) + + self.softmax = nn.Softmax() + + def forward(self, x, inference=False, score_func='h'): + _, feature = self.backbone(x, return_feature=True) + + numerators = self.h(feature) + + denominators = self.g(feature) + + # calculate the logits results + quotients = numerators / denominators + + # logits, numerators, and denominators + if inference: + if score_func == 'h': + return numerators + elif score_func == 'g': + return denominators + else: + # maybe generate an error instead + print('Invalid score function, using h instead') + return numerators + else: + return quotients diff --git a/OpenOOD/openood/networks/lenet.py b/OpenOOD/openood/networks/lenet.py new file mode 100644 index 0000000000000000000000000000000000000000..cf6a62f1e59e1e8b0ebe670e3756c3f9ae1abae6 --- /dev/null +++ b/OpenOOD/openood/networks/lenet.py @@ -0,0 +1,62 @@ +import logging + +import torch.nn as nn + +logger = logging.getLogger(__name__) + + +class LeNet(nn.Module): + def __init__(self, num_classes, num_channel=3): + super(LeNet, self).__init__() + self.num_classes = num_classes + self.feature_size = 84 + self.block1 = nn.Sequential( + nn.Conv2d(in_channels=num_channel, + out_channels=6, + kernel_size=5, + stride=1, + padding=2), nn.ReLU(), nn.MaxPool2d(kernel_size=2)) + + self.block2 = nn.Sequential( + nn.Conv2d(in_channels=6, out_channels=16, kernel_size=5, stride=1), + nn.ReLU(), nn.MaxPool2d(kernel_size=2)) + + self.block3 = nn.Sequential( + nn.Conv2d(in_channels=16, + out_channels=120, + kernel_size=5, + stride=1), nn.ReLU()) + + self.classifier1 = nn.Linear(in_features=120, out_features=84) + self.relu = nn.ReLU() + self.fc = nn.Linear(in_features=84, out_features=num_classes) + + def get_fc(self): + fc = self.fc + return fc.weight.cpu().detach().numpy(), fc.bias.cpu().detach().numpy() + + def forward(self, x, return_feature=False, return_feature_list=False): + feature1 = self.block1(x) + feature2 = self.block2(feature1) + feature3 = self.block3(feature2) + feature3 = feature3.view(feature3.shape[0], -1) + feature = self.relu(self.classifier1(feature3)) + logits_cls = self.fc(feature) + feature_list = [feature1, feature2, feature3, feature] + if return_feature: + return logits_cls, feature + elif return_feature_list: + return logits_cls, feature_list + else: + return logits_cls + + def forward_threshold(self, x, threshold): + feature1 = self.block1(x) + feature2 = self.block2(feature1) + feature3 = self.block3(feature2) + feature3 = feature3.view(feature3.shape[0], -1) + feature = self.relu(self.classifier1(feature3)) + feature = feature.clip(max=threshold) + logits_cls = self.fc(feature) + + return logits_cls diff --git a/OpenOOD/openood/networks/mcd_net.py b/OpenOOD/openood/networks/mcd_net.py new file mode 100644 index 0000000000000000000000000000000000000000..98bfaf60b3c1365b51539effe1d40cae289d90b7 --- /dev/null +++ b/OpenOOD/openood/networks/mcd_net.py @@ -0,0 +1,28 @@ +import torch.nn as nn + + +class MCDNet(nn.Module): + def __init__(self, backbone, num_classes): + super(MCDNet, self).__init__() + + self.backbone = backbone + try: + feature_size = backbone.feature_size + except AttributeError: + feature_size = backbone.module.feature_size + + self.fc1 = nn.Linear(feature_size, num_classes) + self.fc2 = nn.Linear(feature_size, num_classes) + + # test conf + def forward(self, x, return_double=False): + + _, feature = self.backbone(x, return_feature=True) + + logits1 = self.fc1(feature) + logits2 = self.fc2(feature) + + if return_double: + return logits1, logits2 + else: + return logits1 diff --git a/OpenOOD/openood/networks/mmcls_featext.py b/OpenOOD/openood/networks/mmcls_featext.py new file mode 100644 index 0000000000000000000000000000000000000000..69636187e53daf5e2479feefe287dc36eb30ea74 --- /dev/null +++ b/OpenOOD/openood/networks/mmcls_featext.py @@ -0,0 +1,10 @@ +from mmcls.models import CLASSIFIERS, ImageClassifier + + +@CLASSIFIERS.register_module() +class ImageClassifierWithReturnFeature(ImageClassifier): + def forward(self, x, *args, **kwargs): + if 'return_feature' in kwargs: + return self.backbone(x)[0][-1] + else: + return super().forward(x, *args, **kwargs) diff --git a/OpenOOD/openood/networks/model_bronze.py b/OpenOOD/openood/networks/model_bronze.py new file mode 100644 index 0000000000000000000000000000000000000000..fe961cfb71a98a317ada19f2f6bdf0010ff0db8b --- /dev/null +++ b/OpenOOD/openood/networks/model_bronze.py @@ -0,0 +1,214 @@ +import torch.nn as nn +import torch +import torch.nn.functional as F + + +class BasicConv(nn.Module): + def __init__(self, in_planes, out_planes, kernel_size, stride=1, padding=0, dilation=1, groups=1, relu=True, bn=True, bias=False): + super(BasicConv, self).__init__() + self.out_channels = out_planes + self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, + stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias) + self.bn = nn.BatchNorm2d(out_planes, eps=1e-5, + momentum=0.01, affine=True) if bn else None + self.relu = nn.ReLU() if relu else None + + def forward(self, x): + x = self.conv(x) + if self.bn is not None: + x = self.bn(x) + if self.relu is not None: + x = self.relu(x) + return x + + +class AKG(nn.Module): + def __init__(self, dataset, model, feature_size): + super(AKG, self).__init__() + + self.features = nn.Sequential(*list(model.children())[:-2]) + self.pooling = nn.AdaptiveAvgPool2d(1) + self.relu = nn.ReLU() + self.num_ftrs = 2048 * 1 * 1 + + self.conv_block0 = nn.Sequential( + BasicConv(self.num_ftrs, feature_size, kernel_size=1, stride=1, padding=0, relu=True), + BasicConv(feature_size, self.num_ftrs, kernel_size=3, stride=1, padding=1, relu=True) + ) + + self.conv_block1 = nn.Sequential( + BasicConv(self.num_ftrs, feature_size, kernel_size=1, stride=1, padding=0, relu=True), + BasicConv(feature_size, self.num_ftrs, kernel_size=3, stride=1, padding=1, relu=True) + ) + self.conv_block2 = nn.Sequential( + BasicConv(self.num_ftrs, feature_size, kernel_size=1, stride=1, padding=0, relu=True), + BasicConv(feature_size, self.num_ftrs, kernel_size=3, stride=1, padding=1, relu=True) + ) + + self.conv_block_shape = nn.Sequential( + BasicConv(self.num_ftrs, feature_size, kernel_size=1, stride=1, padding=0, relu=True), + BasicConv(feature_size, self.num_ftrs, kernel_size=3, stride=1, padding=1, relu=True) + ) + + self.conv_block_att = nn.Sequential( + BasicConv(self.num_ftrs, feature_size, kernel_size=1, stride=1, padding=0, relu=True), + BasicConv(feature_size, self.num_ftrs, kernel_size=3, stride=1, padding=1, relu=True) + ) + + self.fc0 = nn.Sequential( + nn.BatchNorm1d(self.num_ftrs), + nn.Linear(self.num_ftrs, feature_size), + nn.BatchNorm1d(feature_size), + nn.ELU(inplace=True), + nn.Linear(feature_size, 512) + ) + + self.fc1 = nn.Sequential( + nn.BatchNorm1d(self.num_ftrs), + nn.Linear(self.num_ftrs, feature_size), + nn.BatchNorm1d(feature_size), + nn.ELU(inplace=True), + nn.Linear(feature_size, 512) + ) + + self.fc2 = nn.Sequential( + nn.BatchNorm1d(self.num_ftrs), + nn.Linear(self.num_ftrs, feature_size), + nn.BatchNorm1d(feature_size), + nn.ELU(inplace=True), + nn.Linear(feature_size, 512) + ) + + self.fc_shape = nn.Sequential( + nn.BatchNorm1d(self.num_ftrs), + nn.Linear(self.num_ftrs, feature_size), + nn.BatchNorm1d(feature_size), + nn.ELU(inplace=True), + nn.Linear(feature_size, 512) + ) + + self.fc_att = nn.Sequential( + nn.BatchNorm1d(self.num_ftrs), + nn.Linear(self.num_ftrs, feature_size), + nn.BatchNorm1d(feature_size), + nn.ELU(inplace=True), + nn.Linear(feature_size, 512) + ) + + + if dataset == 'bronze': + self.classifier_0 = nn.Sequential( + nn.Linear(512, 2), + nn.Sigmoid() + ) + + self.classifier_1 = nn.Sequential( + nn.Linear(512, 4), + nn.Sigmoid() + ) + self.classifier_2 = nn.Sequential( + nn.Linear(512, 11), + nn.Sigmoid() + ) + self.classifier_2_1 = nn.Sequential( + nn.Linear(512, 11), + ) + + self.classifier_shape = nn.Sequential( + nn.Linear(512, 35), + ) + self.classifier_shape_2 = nn.Sequential( + nn.Linear(512, 35), + nn.Sigmoid() + ) + + self.classifier_att = nn.Sequential( + nn.Linear(512, 149), + nn.Sigmoid() + ) + + + def forward(self, x, return_feature=False, return_feature_list=False): + + feature_outputs = [] + def feature_hook(module, input, output): + # 确保只添加最后5个层的输出 + if len(feature_outputs) < 5: + feature_outputs.append(output) + # 动态注册钩子到self.features的后五个层 + layer_hooks = [] + for i, layer in enumerate(list(self.features.children())[-5:]): + hook = layer.register_forward_hook(feature_hook) + layer_hooks.append(hook) + + x = self.features(x) + x_cat = self.conv_block0(x) + x_order = self.conv_block1(x) + x_species = self.conv_block2(x) + shape_feature = self.conv_block_shape(x) + x_att = self.conv_block_att(x) + + x_cat_fc = self.pooling(x_cat) + x_cat_fc = x_cat_fc.view(x_cat_fc.size(0), -1) + x_cat_fc = self.fc0(x_cat_fc) + + x_order_fc = self.pooling(x_order) + x_order_fc = x_order_fc.view(x_order_fc.size(0), -1) + x_order_fc = self.fc1(x_order_fc) + + x_species_fc = self.pooling(x_species) + x_species_fc = x_species_fc.view(x_species_fc.size(0), -1) + x_species_fc = self.fc2(x_species_fc) + + shape_feature = self.pooling(shape_feature) + shape_feature = shape_feature.view(shape_feature.size(0), -1) + shape_feature = self.fc_shape(shape_feature) + + x_att_fc = self.pooling(x_att) + x_att_fc = x_att_fc.view(x_att_fc.size(0), -1) + x_att_fc = self.fc_att(x_att_fc) + + # 修改前 + y_cat_sig = self.classifier_0(self.relu(x_cat_fc + x_order_fc.detach() + x_species_fc.detach().clone())) + + y_order_sig = self.classifier_1(self.relu(x_cat_fc + x_order_fc+x_species_fc.detach().clone())) + + y_species_sig = self.classifier_2(self.relu(x_cat_fc + x_order_fc + x_species_fc)) + y_species_sof = self.classifier_2_1(self.relu(x_cat_fc + x_order_fc + x_species_fc)) + + # 修改后 + # y_cat_sig = self.classifier_0(self.relu(x_cat_fc + x_order_fc.detach().clone() + x_species_fc.detach().clone())) + + # y_order_sig = self.classifier_1(self.relu(x_order_fc+x_species_fc.detach().clone())) + + # y_species_sig = self.classifier_2(self.relu(x_order_fc + x_species_fc)) + # y_species_sof = self.classifier_2_1(self.relu(x_order_fc + x_species_fc)) + + + + + shape_sof = self.classifier_shape(self.relu(shape_feature)) + shape_sig = self.classifier_shape_2(self.relu(shape_feature)) + y_att_sig = self.classifier_att(self.relu(x_att_fc)) + + + + if return_feature_list: + # 移除之前注册的钩子 + for hook in layer_hooks: + hook.remove() + feature_outputs[-1] = self.pooling(feature_outputs[-1]) + return y_species_sof, feature_outputs + for hook in layer_hooks: + hook.remove() + + if return_feature: + return y_species_sof, x_cat_fc + x_order_fc + x_species_fc + return y_species_sof + # return y_cat_sig, y_order_sig, y_species_sof, y_species_sig, shape_sof, shape_sig, y_att_sig + def get_fc(self): + fc = self.classifier_2_1[0] + return fc.weight.cpu().detach().numpy(), fc.bias.cpu().detach().numpy() + def get_fc_layer(self): + return self.classifier_2_1[0] + diff --git a/OpenOOD/openood/networks/net_utils_.py b/OpenOOD/openood/networks/net_utils_.py new file mode 100644 index 0000000000000000000000000000000000000000..56e8ef48df39bc0afb8ccb0940f91161ca23af0a --- /dev/null +++ b/OpenOOD/openood/networks/net_utils_.py @@ -0,0 +1,274 @@ +from types import MethodType + +import mmcv +import numpy as np +import torch +import torch.backends.cudnn as cudnn +import torch.nn as nn +from mmcls.apis import init_model + +import openood.utils.comm as comm + +from .bit import KNOWN_MODELS +from .conf_branch_net import ConfBranchNet +from .csi_net import CSINet +from .de_resnet18_256x256 import AttnBasicBlock, BN_layer, De_ResNet18_256x256 +from .densenet import DenseNet3 +from .draem_net import DiscriminativeSubNetwork, ReconstructiveSubNetwork +from .dropout_net import DropoutNet +from .dsvdd_net import build_network +from .godin_net import GodinNet +from .lenet import LeNet +from .mcd_net import MCDNet +from .openmax_net import OpenMax +from .patchcore_net import PatchcoreNet +from .projection_net import ProjectionNet +from .react_net import ReactNet +from .resnet18_32x32 import ResNet18_32x32 +from .resnet18_64x64 import ResNet18_64x64 +from .resnet18_224x224 import ResNet18_224x224 +from .resnet18_256x256 import ResNet18_256x256 +from .resnet50 import ResNet50 +from .udg_net import UDGNet +from .wrn import WideResNet + + +def get_network(network_config): + + num_classes = network_config.num_classes + + if network_config.name == 'resnet18_32x32': + net = ResNet18_32x32(num_classes=num_classes) + + if network_config.name == 'resnet18_32x32_changed': + net = ResNet18_256x256(num_classes=num_classes) + + elif network_config.name == 'resnet18_64x64': + net = ResNet18_64x64(num_classes=num_classes) + + elif network_config.name == 'resnet18_224x224': + net = ResNet18_224x224(num_classes=num_classes) + + elif network_config.name == 'resnet50': + net = ResNet50(num_classes=num_classes) + + elif network_config.name == 'lenet': + net = LeNet(num_classes=num_classes, num_channel=3) + + elif network_config.name == 'wrn': + net = WideResNet(depth=28, + widen_factor=10, + dropRate=0.0, + num_classes=num_classes) + + elif network_config.name == 'densenet': + net = DenseNet3(depth=100, + growth_rate=12, + reduction=0.5, + bottleneck=True, + dropRate=0.0, + num_classes=num_classes) + + elif network_config.name == 'patchcore_net': + # path = '/home/pengyunwang/.cache/torch/hub/vision-0.9.0' + # module = torch.hub._load_local(path, + # 'wide_resnet50_2', + # pretrained=True) + backbone = get_network(network_config.backbone) + net = PatchcoreNet(backbone) + elif network_config.name == 'wide_resnet_50_2': + module = torch.hub.load('pytorch/vision:v0.9.0', + 'wide_resnet50_2', + pretrained=True) + net = PatchcoreNet(module) + + elif network_config.name == 'godin_net': + backbone = get_network(network_config.backbone) + net = GodinNet(backbone=backbone, + feature_size=backbone.feature_size, + num_classes=num_classes, + similarity_measure=network_config.similarity_measure) + + elif network_config.name == 'react_net': + backbone = get_network(network_config.backbone) + net = ReactNet(backbone) + + elif network_config.name == 'csi_net': + backbone = get_network(network_config.backbone) + net = CSINet(backbone, + feature_size=backbone.feature_size, + num_classes=num_classes, + simclr_dim=network_config.simclr_dim, + shift_trans_type=network_config.shift_trans_type) + + elif network_config.name == 'draem': + model = ReconstructiveSubNetwork(in_channels=3, + out_channels=3, + base_width=int( + network_config.image_size / 2)) + model_seg = DiscriminativeSubNetwork( + in_channels=6, + out_channels=2, + base_channels=int(network_config.image_size / 4)) + + net = {'generative': model, 'discriminative': model_seg} + + elif network_config.name == 'openmax_network': + backbone = get_network(network_config.backbone) + net = OpenMax(backbone=backbone, num_classes=num_classes) + + elif network_config.name == 'mcd': + backbone = get_network(network_config.backbone) + net = MCDNet(backbone=backbone, num_classes=num_classes) + + elif network_config.name == 'udg': + backbone = get_network(network_config.backbone) + net = UDGNet(backbone=backbone, + num_classes=num_classes, + num_clusters=network_config.num_clusters) + + elif network_config.name == 'opengan': + from .opengan import Discriminator, Generator + backbone = get_network(network_config.backbone) + netG = Generator(in_channels=network_config.nz, + feature_size=network_config.ngf, + out_channels=network_config.nc) + netD = Discriminator(in_channels=network_config.nc, + feature_size=network_config.ndf) + + net = {'netG': netG, 'netD': netD, 'backbone': backbone} + + elif network_config.name == 'arpl_gan': + from .arpl_net import (resnet34ABN, Generator, Discriminator, + Generator32, Discriminator32, ARPLayer) + feature_net = resnet34ABN(num_classes=num_classes, num_bns=2) + dim_centers = feature_net.fc.weight.shape[1] + feature_net.fc = nn.Identity() + + criterion = ARPLayer(feat_dim=dim_centers, + num_classes=num_classes, + weight_pl=network_config.weight_pl, + temp=network_config.temp) + + assert network_config.image_size == 32 \ + or network_config.image_size == 64, \ + 'ARPL-GAN only supports 32x32 or 64x64 images!' + + if network_config.image_size == 64: + netG = Generator(1, network_config.nz, network_config.ngf, + network_config.nc) # ngpu, nz, ngf, nc + netD = Discriminator(1, network_config.nc, + network_config.ndf) # ngpu, nc, ndf + else: + netG = Generator32(1, network_config.nz, network_config.ngf, + network_config.nc) # ngpu, nz, ngf, nc + netD = Discriminator32(1, network_config.nc, + network_config.ndf) # ngpu, nc, ndf + + net = { + 'netF': feature_net, + 'criterion': criterion, + 'netG': netG, + 'netD': netD + } + + elif network_config.name == 'arpl_net': + from .arpl_net import ARPLayer + feature_net = get_network(network_config.feat_extract_network) + try: + dim_centers = feature_net.fc.weight.shape[1] + feature_net.fc = nn.Identity() + except Exception: + dim_centers = feature_net.classifier[0].weight.shape[1] + feature_net.classifier = nn.Identity() + + criterion = ARPLayer(feat_dim=dim_centers, + num_classes=num_classes, + weight_pl=network_config.weight_pl, + temp=network_config.temp) + + net = {'netF': feature_net, 'criterion': criterion} + + elif network_config.name == 'bit': + net = KNOWN_MODELS[network_config.model]( + head_size=network_config.num_logits, + zero_head=True, + num_block_open=network_config.num_block_open) + + elif network_config.name == 'vit': + cfg = mmcv.Config.fromfile(network_config.model) + net = init_model(cfg, network_config.checkpoint, 0) + net.get_fc = MethodType( + lambda self: (self.head.layers.head.weight.cpu().numpy(), + self.head.layers.head.bias.cpu().numpy()), net) + + elif network_config.name == 'conf_branch_net': + + backbone = get_network(network_config.backbone) + net = ConfBranchNet(backbone=backbone, num_classes=num_classes) + + elif network_config.name == 'dsvdd': + net = build_network(network_config.type) + + elif network_config.name == 'projectionNet': + backbone = get_network(network_config.backbone) + net = ProjectionNet(backbone=backbone, num_classes=2) + + elif network_config.name == 'dropout_net': + backbone = get_network(network_config.backbone) + net = DropoutNet(backbone=backbone, dropout_p=network_config.dropout_p) + + elif network_config.name == 'rd4ad_net': + encoder = get_network(network_config.backbone) + bn = BN_layer(AttnBasicBlock, 2) + decoder = De_ResNet18_256x256() + net = {'encoder': encoder, 'bn': bn, 'decoder': decoder} + else: + raise Exception('Unexpected Network Architecture!') + + if network_config.pretrained: + if type(net) is dict: + for subnet, checkpoint in zip(net.values(), + network_config.checkpoint): + if checkpoint is not None: + if checkpoint != 'none': + subnet.load_state_dict(torch.load(checkpoint), + strict=False) + elif network_config.name == 'bit' and not network_config.normal_load: + net.load_from(np.load(network_config.checkpoint)) + elif network_config.name == 'vit': + pass + else: + try: + net.load_state_dict(torch.load(network_config.checkpoint), + strict=False) + except RuntimeError: + # sometimes fc should not be loaded + loaded_pth = torch.load(network_config.checkpoint) + loaded_pth.pop('fc.weight') + loaded_pth.pop('fc.bias') + net.load_state_dict(loaded_pth, strict=False) + print('Model Loading {} Completed!'.format(network_config.name)) + if network_config.num_gpus > 1: + if type(net) is dict: + for key, subnet in zip(net.keys(), net.values()): + net[key] = torch.nn.parallel.DistributedDataParallel( + subnet, + device_ids=[comm.get_local_rank()], + broadcast_buffers=True) + else: + net = torch.nn.parallel.DistributedDataParallel( + net.cuda(), + device_ids=[comm.get_local_rank()], + broadcast_buffers=True) + + if network_config.num_gpus > 0: + if type(net) is dict: + for subnet in net.values(): + subnet.cuda() + else: + net.cuda() + torch.cuda.manual_seed(1) + np.random.seed(1) + cudnn.benchmark = True + return net diff --git a/OpenOOD/openood/networks/npos_net.py b/OpenOOD/openood/networks/npos_net.py new file mode 100644 index 0000000000000000000000000000000000000000..c3a81df124152b9df007ae55f06d3fdc965765ef --- /dev/null +++ b/OpenOOD/openood/networks/npos_net.py @@ -0,0 +1,41 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + + +class NPOSNet(nn.Module): + def __init__(self, backbone, head, feat_dim, num_classes): + super(NPOSNet, self).__init__() + + self.backbone = backbone + if hasattr(self.backbone, 'fc'): + # remove fc otherwise ddp will + # report unused params + self.backbone.fc = nn.Identity() + + try: + feature_size = backbone.feature_size + except AttributeError: + feature_size = backbone.module.feature_size + + self.prototypes = nn.Parameter(torch.zeros(num_classes, feat_dim), + requires_grad=True) + self.mlp = nn.Sequential(nn.Linear(feature_size, feat_dim), + nn.ReLU(inplace=True), nn.Linear(feat_dim, 1)) + + if head == 'linear': + self.head = nn.Linear(feature_size, feat_dim) + elif head == 'mlp': + self.head = nn.Sequential(nn.Linear(feature_size, feature_size), + nn.ReLU(inplace=True), + nn.Linear(feature_size, feat_dim)) + + def forward(self, x): + feat = self.backbone(x).squeeze() + unnorm_features = self.head(feat) + features = F.normalize(unnorm_features, dim=1) + return features + + def intermediate_forward(self, x): + feat = self.backbone(x).squeeze() + return F.normalize(feat, dim=1) diff --git a/OpenOOD/openood/networks/opengan.py b/OpenOOD/openood/networks/opengan.py new file mode 100644 index 0000000000000000000000000000000000000000..db75825f0c7594119b20a5f7334d2e85c90d4883 --- /dev/null +++ b/OpenOOD/openood/networks/opengan.py @@ -0,0 +1,64 @@ +from torch import nn + + +class Generator(nn.Module): + def __init__(self, in_channels=100, feature_size=64, out_channels=512): + super(Generator, self).__init__() + self.nz = in_channels + self.ngf = feature_size + self.nc = out_channels + + self.main = nn.Sequential( + # input is Z, going into a convolution + # Conv2d(in_channels, + # out_channels, + # kernel_size, + # stride=1, + # padding=0, + # dilation=1, + # groups=1, + # bias=True, + # padding_mode='zeros') + nn.Conv2d(self.nz, self.ngf * 8, 1, 1, 0, bias=False), + nn.BatchNorm2d(self.ngf * 8), + nn.ReLU(True), + # state size. (self.ngf*8) x 4 x 4 + nn.Conv2d(self.ngf * 8, self.ngf * 4, 1, 1, 0, bias=False), + nn.BatchNorm2d(self.ngf * 4), + nn.ReLU(True), + # state size. (self.ngf*4) x 8 x 8 + nn.Conv2d(self.ngf * 4, self.ngf * 2, 1, 1, 0, bias=False), + nn.BatchNorm2d(self.ngf * 2), + nn.ReLU(True), + # state size. (self.ngf*2) x 16 x 16 + nn.Conv2d(self.ngf * 2, self.ngf * 4, 1, 1, 0, bias=False), + nn.BatchNorm2d(self.ngf * 4), + nn.ReLU(True), + # state size. (self.ngf) x 32 x 32 + nn.Conv2d(self.ngf * 4, self.nc, 1, 1, 0, bias=True), + # nn.Tanh() + # state size. (self.nc) x 64 x 64 + ) + + def forward(self, input): + return self.main(input) + + +class Discriminator(nn.Module): + def __init__(self, in_channels=512, feature_size=64): + super(Discriminator, self).__init__() + self.nc = in_channels + self.ndf = feature_size + self.main = nn.Sequential( + nn.Conv2d(self.nc, self.ndf * 8, 1, 1, 0, bias=False), + nn.LeakyReLU(0.2, inplace=True), + nn.Conv2d(self.ndf * 8, self.ndf * 4, 1, 1, 0, bias=False), + nn.BatchNorm2d(self.ndf * 4), nn.LeakyReLU(0.2, inplace=True), + nn.Conv2d(self.ndf * 4, self.ndf * 2, 1, 1, 0, bias=False), + nn.BatchNorm2d(self.ndf * 2), nn.LeakyReLU(0.2, inplace=True), + nn.Conv2d(self.ndf * 2, self.ndf, 1, 1, 0, bias=False), + nn.BatchNorm2d(self.ndf), nn.LeakyReLU(0.2, inplace=True), + nn.Conv2d(self.ndf, 1, 1, 1, 0, bias=False), nn.Sigmoid()) + + def forward(self, input): + return self.main(input) diff --git a/OpenOOD/openood/networks/openmax_net.py b/OpenOOD/openood/networks/openmax_net.py new file mode 100644 index 0000000000000000000000000000000000000000..a401b763db269e7a899243f16ca8513940b8e886 --- /dev/null +++ b/OpenOOD/openood/networks/openmax_net.py @@ -0,0 +1,54 @@ +import torch.nn as nn +import torch.nn.functional as F + + +class OpenMax(nn.Module): + def __init__(self, backbone, num_classes=50, embed_dim=None): + super(OpenMax, self).__init__() + self.backbone_name = backbone + self.backbone = backbone + + self.dim = self.get_backbone_last_layer_out_channel() + if embed_dim: + self.embeddingLayer = nn.Sequential( + nn.Linear(self.dim, embed_dim), + nn.PReLU(), + ) + self.dim = embed_dim + self.classifier = nn.Linear(self.dim, num_classes) + + def get_backbone_last_layer_out_channel(self): + if self.backbone_name == 'LeNetPlus': + return 128 * 3 * 3 + last_layer = list(self.backbone.children())[-1] + while (not isinstance(last_layer, nn.Conv2d)) and \ + (not isinstance(last_layer, nn.Linear)) and \ + (not isinstance(last_layer, nn.BatchNorm2d)): + + temp_layer = list(last_layer.children())[-1] + if isinstance(temp_layer, nn.Sequential) and len( + list(temp_layer.children())) == 0: + temp_layer = list(last_layer.children())[-2] + last_layer = temp_layer + if isinstance(last_layer, nn.BatchNorm2d): + return last_layer.num_features + elif isinstance(last_layer, nn.Linear): + return last_layer.out_features + else: + return last_layer.out_channels + + def forward(self, x): + feature = self.backbone(x) + if feature.dim() == 4: + feature = F.adaptive_avg_pool2d(feature, 1) + feature = feature.view(x.size(0), -1) + # if includes embedding layer. + feature = self.embeddingLayer(feature) if hasattr( + self, 'embeddingLayer') else feature + logits = self.classifier(feature) + + return logits + + def get_fc(self): + fc = self.classifier + return fc.weight.cpu().detach().numpy(), fc.bias.cpu().detach().numpy() diff --git a/OpenOOD/openood/networks/p2pnet/Resnet.py b/OpenOOD/openood/networks/p2pnet/Resnet.py new file mode 100644 index 0000000000000000000000000000000000000000..b807617e7364337ebc2cf8da68d5b27b1b075ac3 --- /dev/null +++ b/OpenOOD/openood/networks/p2pnet/Resnet.py @@ -0,0 +1,288 @@ +import torch +import torch.nn as nn +from torch.utils.model_zoo import load_url as load_state_dict_from_url + + +__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101', + 'resnet152', 'resnext50_32x4d', 'resnext101_32x8d'] + + +model_urls = { + 'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth', + 'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth', + 'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth', + 'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth', + 'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth', +} +model_paths ={ + 'resnet18': '../Data/pretrainedmodel/resnet18.pth', + 'resnet34': '../Data/pretrainedmodel/resnet34.pth', + 'resnet50': '../Data/pretrainedmodel/resnet50.pth', + 'resnet101': '../Data/pretrainedmodel/resnet101.pth', + 'resnet152': '../Data/pretrainedmodel/resnet152.pth', +} + + +def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1): + """3x3 convolution with padding""" + return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, + padding=dilation, groups=groups, bias=False, dilation=dilation) + + +def conv1x1(in_planes, out_planes, stride=1): + """1x1 convolution""" + return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False) + + +class BasicBlock(nn.Module): + expansion = 1 + + def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1, + base_width=64, dilation=1, norm_layer=None): + super(BasicBlock, self).__init__() + if norm_layer is None: + norm_layer = nn.BatchNorm2d + if groups != 1 or base_width != 64: + raise ValueError('BasicBlock only supports groups=1 and base_width=64') + if dilation > 1: + raise NotImplementedError("Dilation > 1 not supported in BasicBlock") + # Both self.conv1 and self.downsample layers downsample the input when stride != 1 + self.conv1 = conv3x3(inplanes, planes, stride) + self.bn1 = norm_layer(planes) + self.relu = nn.ReLU(inplace=True) + self.conv2 = conv3x3(planes, planes) + self.bn2 = norm_layer(planes) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + identity = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + out = self.relu(out) + + return out + + +class Bottleneck(nn.Module): + expansion = 4 + + def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1, + base_width=64, dilation=1, norm_layer=None): + super(Bottleneck, self).__init__() + if norm_layer is None: + norm_layer = nn.BatchNorm2d + width = int(planes * (base_width / 64.)) * groups + # Both self.conv2 and self.downsample layers downsample the input when stride != 1 + self.conv1 = conv1x1(inplanes, width) + self.bn1 = norm_layer(width) + self.conv2 = conv3x3(width, width, stride, groups, dilation) + self.bn2 = norm_layer(width) + self.conv3 = conv1x1(width, planes * self.expansion) + self.bn3 = norm_layer(planes * self.expansion) + self.relu = nn.ReLU(inplace=True) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + identity = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + out = self.relu(out) + + out = self.conv3(out) + out = self.bn3(out) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + out = self.relu(out) + + return out + + +class ResNet(nn.Module): + + def __init__(self, block, layers, num_classes=1000, zero_init_residual=False, + groups=1, width_per_group=64, replace_stride_with_dilation=None, + norm_layer=None): + super(ResNet, self).__init__() + if norm_layer is None: + norm_layer = nn.BatchNorm2d + self._norm_layer = norm_layer + + self.inplanes = 64 + self.dilation = 1 + if replace_stride_with_dilation is None: + # each element in the tuple indicates if we should replace + # the 2x2 stride with a dilated convolution instead + replace_stride_with_dilation = [False, False, False] + if len(replace_stride_with_dilation) != 3: + raise ValueError("replace_stride_with_dilation should be None " + "or a 3-element tuple, got {}".format(replace_stride_with_dilation)) + self.groups = groups + self.base_width = width_per_group + self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3, + bias=False) + self.bn1 = norm_layer(self.inplanes) + self.relu = nn.ReLU(inplace=True) + self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + self.layer1 = self._make_layer(block, 64, layers[0]) + self.layer2 = self._make_layer(block, 128, layers[1], stride=2, + dilate=replace_stride_with_dilation[0]) + self.layer3 = self._make_layer(block, 256, layers[2], stride=2, + dilate=replace_stride_with_dilation[1]) + self.layer4 = self._make_layer(block, 512, layers[3], stride=2, + dilate=replace_stride_with_dilation[2]) + self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) + self.fc = nn.Linear(512 * block.expansion, num_classes) + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + + # Zero-initialize the last BN in each residual branch, + # so that the residual branch starts with zeros, and each residual block behaves like an identity. + # This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677 + if zero_init_residual: + for m in self.modules(): + if isinstance(m, Bottleneck): + nn.init.constant_(m.bn3.weight, 0) + elif isinstance(m, BasicBlock): + nn.init.constant_(m.bn2.weight, 0) + + def _make_layer(self, block, planes, blocks, stride=1, dilate=False): + norm_layer = self._norm_layer + downsample = None + previous_dilation = self.dilation + if dilate: + self.dilation *= stride + stride = 1 + if stride != 1 or self.inplanes != planes * block.expansion: + downsample = nn.Sequential( + conv1x1(self.inplanes, planes * block.expansion, stride), + norm_layer(planes * block.expansion), + ) + + layers = [] + layers.append(block(self.inplanes, planes, stride, downsample, self.groups, + self.base_width, previous_dilation, norm_layer)) + self.inplanes = planes * block.expansion + for _ in range(1, blocks): + layers.append(block(self.inplanes, planes, groups=self.groups, + base_width=self.base_width, dilation=self.dilation, + norm_layer=norm_layer)) + + return nn.Sequential(*layers) + + def forward(self, x): + x = self.conv1(x) + x = self.bn1(x) + x = self.relu(x) + x1 = self.maxpool(x) + + x2 = self.layer1(x1) + x3 = self.layer2(x2) + x4 = self.layer3(x3) + x5 = self.layer4(x4) + + x = self.avgpool(x5) + x = x.reshape(x.size(0), -1) + x = self.fc(x) + + return x1, x2, x3, x4, x5 + + +def _resnet(arch, inplanes, planes, pretrained, progress, **kwargs): + model = ResNet(inplanes, planes, **kwargs) + if pretrained: + # load from url + state_dict = load_state_dict_from_url(model_urls[arch], + progress=progress) + model.load_state_dict(state_dict) + # load from local file + # model.load_state_dict(torch.load(model_paths[arch])) + return model + + +def resnet18(pretrained=False, progress=True, **kwargs): + """Constructs a ResNet-18 model. + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _resnet('resnet18', BasicBlock, [2, 2, 2, 2], pretrained, progress, + **kwargs) + + +def resnet34(pretrained=False, progress=True, **kwargs): + """Constructs a ResNet-34 model. + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _resnet('resnet34', BasicBlock, [3, 4, 6, 3], pretrained, progress, + **kwargs) + + +def resnet50(pretrained=False, progress=True, **kwargs): + """Constructs a ResNet-50 model. + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _resnet('resnet50', Bottleneck, [3, 4, 6, 3], pretrained, progress, + **kwargs) + + +def resnet101(pretrained=False, progress=True, **kwargs): + """Constructs a ResNet-101 model. + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _resnet('resnet101', Bottleneck, [3, 4, 23, 3], pretrained, progress, + **kwargs) + + +def resnet152(pretrained=False, progress=True, **kwargs): + """Constructs a ResNet-152 model. + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _resnet('resnet152', Bottleneck, [3, 8, 36, 3], pretrained, progress, + **kwargs) + + +def resnext50_32x4d(**kwargs): + kwargs['groups'] = 32 + kwargs['width_per_group'] = 4 + return _resnet('resnext50_32x4d', Bottleneck, [3, 4, 6, 3], + pretrained=False, progress=True, **kwargs) + + +def resnext101_32x8d(**kwargs): + kwargs['groups'] = 32 + kwargs['width_per_group'] = 8 + return _resnet('resnext101_32x8d', Bottleneck, [3, 4, 23, 3], + pretrained=False, progress=True, **kwargs) diff --git a/OpenOOD/openood/networks/p2pnet/__init__.py b/OpenOOD/openood/networks/p2pnet/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/OpenOOD/openood/networks/p2pnet/__pycache__/Resnet.cpython-311.pyc b/OpenOOD/openood/networks/p2pnet/__pycache__/Resnet.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..29d97aede76d53fd193dba8c33616bb5e9f18a2d Binary files /dev/null and b/OpenOOD/openood/networks/p2pnet/__pycache__/Resnet.cpython-311.pyc differ diff --git a/OpenOOD/openood/networks/p2pnet/__pycache__/Resnet.cpython-37.pyc b/OpenOOD/openood/networks/p2pnet/__pycache__/Resnet.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..065435c85b9d8bf877458cc25afdbf2678eb39b2 Binary files /dev/null and b/OpenOOD/openood/networks/p2pnet/__pycache__/Resnet.cpython-37.pyc differ diff --git a/OpenOOD/openood/networks/p2pnet/__pycache__/__init__.cpython-311.pyc b/OpenOOD/openood/networks/p2pnet/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..83fd668339ed01c7bba23e987001beea7e129324 Binary files /dev/null and b/OpenOOD/openood/networks/p2pnet/__pycache__/__init__.cpython-311.pyc differ diff --git a/OpenOOD/openood/networks/p2pnet/__pycache__/__init__.cpython-37.pyc b/OpenOOD/openood/networks/p2pnet/__pycache__/__init__.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..be58930a549cf29a669a92e1e4206dfee3b977af Binary files /dev/null and b/OpenOOD/openood/networks/p2pnet/__pycache__/__init__.cpython-37.pyc differ diff --git a/OpenOOD/openood/networks/p2pnet/__pycache__/anchors.cpython-311.pyc b/OpenOOD/openood/networks/p2pnet/__pycache__/anchors.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c3ab32430d7280cafd98ded0d8403e2f30007490 Binary files /dev/null and b/OpenOOD/openood/networks/p2pnet/__pycache__/anchors.cpython-311.pyc differ diff --git a/OpenOOD/openood/networks/p2pnet/__pycache__/anchors.cpython-37.pyc b/OpenOOD/openood/networks/p2pnet/__pycache__/anchors.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..31c2487ff18a2b1caf5cd885a50e50eee4476166 Binary files /dev/null and b/OpenOOD/openood/networks/p2pnet/__pycache__/anchors.cpython-37.pyc differ diff --git a/OpenOOD/openood/networks/p2pnet/__pycache__/clustering.cpython-311.pyc b/OpenOOD/openood/networks/p2pnet/__pycache__/clustering.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a577087a67cb030aa1a74d0cf58b7cae4377ecdb Binary files /dev/null and b/OpenOOD/openood/networks/p2pnet/__pycache__/clustering.cpython-311.pyc differ diff --git a/OpenOOD/openood/networks/p2pnet/__pycache__/clustering.cpython-37.pyc b/OpenOOD/openood/networks/p2pnet/__pycache__/clustering.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..09803ea0450bcf162d91ad70c831658f099863c6 Binary files /dev/null and b/OpenOOD/openood/networks/p2pnet/__pycache__/clustering.cpython-37.pyc differ diff --git a/OpenOOD/openood/networks/p2pnet/__pycache__/model.cpython-311.pyc b/OpenOOD/openood/networks/p2pnet/__pycache__/model.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..72e676310e61c17c56010e59bb30029bc00894e1 Binary files /dev/null and b/OpenOOD/openood/networks/p2pnet/__pycache__/model.cpython-311.pyc differ diff --git a/OpenOOD/openood/networks/p2pnet/__pycache__/model.cpython-37.pyc b/OpenOOD/openood/networks/p2pnet/__pycache__/model.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a8ce2da71b522bffb24d1f837e4b595473ab1db7 Binary files /dev/null and b/OpenOOD/openood/networks/p2pnet/__pycache__/model.cpython-37.pyc differ diff --git a/OpenOOD/openood/networks/p2pnet/__pycache__/utils.cpython-311.pyc b/OpenOOD/openood/networks/p2pnet/__pycache__/utils.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fcd2c5ac37b09ddae75001f97189870814468cc7 Binary files /dev/null and b/OpenOOD/openood/networks/p2pnet/__pycache__/utils.cpython-311.pyc differ diff --git a/OpenOOD/openood/networks/p2pnet/__pycache__/utils.cpython-37.pyc b/OpenOOD/openood/networks/p2pnet/__pycache__/utils.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ec6da008741c01dd8f291e9230fcdb18dbe9766b Binary files /dev/null and b/OpenOOD/openood/networks/p2pnet/__pycache__/utils.cpython-37.pyc differ diff --git a/OpenOOD/openood/networks/p2pnet/anchors.py b/OpenOOD/openood/networks/p2pnet/anchors.py new file mode 100644 index 0000000000000000000000000000000000000000..bded25f56cc17c559204f9ef70cd0312bffdd6a1 --- /dev/null +++ b/OpenOOD/openood/networks/p2pnet/anchors.py @@ -0,0 +1,104 @@ +import numpy as np + +_default_anchors_setting = ( + dict(layer='p3', stride=32, size=48, scale=[2 ** (1. / 3.), 2 ** (2. / 3.)], aspect_ratio=[0.667, 1, 1.5]), + dict(layer='p4', stride=64, size=96, scale=[2 ** (1. / 3.), 2 ** (2. / 3.)], aspect_ratio=[0.667, 1, 1.5]), + dict(layer='p5', stride=128, size=192, scale=[1, 2 ** (1. / 3.), 2 ** (2. / 3.)], aspect_ratio=[0.667, 1, 1.5]), +) +# scales = [1, 1.26, 1.59], control the area +# aspect_ratio = w / h = [0.667, 1, 1.5], control the ratio if w and h + + +def generate_default_anchor_maps(anchors_setting=None, input_shape=(448, 448)): + """ + generate default anchor + + :param anchors_setting: all informations of anchors + :param input_shape: shape of input images, e.g. (h, w) + :return: center_anchors: # anchors * 4 (oy, ox, h, w) + edge_anchors: # anchors * 4 (y0, x0, y1, x1) + anchor_area: # anchors * 1 (area) + """ + if anchors_setting is None: + anchors_setting = _default_anchors_setting + + center_anchors = np.zeros((0, 4), dtype=np.float32) + edge_anchors = np.zeros((0, 4), dtype=np.float32) + anchor_areas = np.zeros((0,), dtype=np.float32) + input_shape = np.array(input_shape, dtype=int) + + anchor_paras = np.zeros((0, 3), dtype=np.float32) + + for anchor_info in anchors_setting: + + stride = anchor_info['stride'] + size = anchor_info['size'] + scales = anchor_info['scale'] + aspect_ratios = anchor_info['aspect_ratio'] + + output_map_shape = np.ceil(input_shape.astype(np.float32) / stride) + output_map_shape = output_map_shape.astype(np.int32) + output_shape = tuple(output_map_shape) + (4,) + ostart = stride / 2. + oy = np.arange(ostart, ostart + stride * output_shape[0], stride) + oy = oy.reshape(output_shape[0], 1) + ox = np.arange(ostart, ostart + stride * output_shape[1], stride) + ox = ox.reshape(1, output_shape[1]) + center_anchor_map_template = np.zeros(output_shape, dtype=np.float32) + center_anchor_map_template[:, :, 0] = oy + center_anchor_map_template[:, :, 1] = ox + + for scale in scales: + for aspect_ratio in aspect_ratios: + center_anchor_map = center_anchor_map_template.copy() + center_anchor_map[:, :, 2] = size * scale / float(aspect_ratio) ** 0.5 # h + center_anchor_map[:, :, 3] = size * scale * float(aspect_ratio) ** 0.5 # w + + edge_anchor_map = np.concatenate((center_anchor_map[..., :2] - center_anchor_map[..., 2:4] / 2., + center_anchor_map[..., :2] + center_anchor_map[..., 2:4] / 2.), + axis=-1) + anchor_area_map = center_anchor_map[..., 2] * center_anchor_map[..., 3] + center_anchors = np.concatenate((center_anchors, center_anchor_map.reshape(-1, 4))) + edge_anchors = np.concatenate((edge_anchors, edge_anchor_map.reshape(-1, 4))) + anchor_areas = np.concatenate((anchor_areas, anchor_area_map.reshape(-1))) + + return center_anchors, edge_anchors, anchor_areas + + +def hard_nms(cdds, topn=10, iou_thresh=0.25): + if not (type(cdds).__module__ == 'numpy' and len(cdds.shape) == 2 and cdds.shape[1] >= 5): + raise TypeError('edge_box_map should be N * 5+ ndarray') + + cdds = cdds.copy() + indices = np.argsort(cdds[:, 0]) + cdds = cdds[indices] + cdd_results = [] + + res = cdds + + while res.any(): + cdd = res[-1] + cdd_results.append(cdd) + if len(cdd_results) == topn: + return np.array(cdd_results) + res = res[:-1] + + start_max = np.maximum(res[:, 1:3], cdd[1:3]) + end_min = np.minimum(res[:, 3:5], cdd[3:5]) + lengths = end_min - start_max + intersec_map = lengths[:, 0] * lengths[:, 1] + intersec_map[np.logical_or(lengths[:, 0] < 0, lengths[:, 1] < 0)] = 0 + iou_map_cur = intersec_map / ((res[:, 3] - res[:, 1]) * (res[:, 4] - res[:, 2]) + (cdd[3] - cdd[1]) * ( + cdd[4] - cdd[2]) - intersec_map) + res = res[iou_map_cur < iou_thresh] + + return np.array(cdd_results) + + +if __name__ == '__main__': + a = hard_nms(np.array([ + [0.4, 1, 10, 12, 20], + [0.5, 1, 11, 11, 20], + [0.55, 20, 30, 40, 50] + ]), topn=100, iou_thresh=0.4) + print(a) diff --git a/OpenOOD/openood/networks/p2pnet/clustering.py b/OpenOOD/openood/networks/p2pnet/clustering.py new file mode 100644 index 0000000000000000000000000000000000000000..c4133b592eca7b51d499571b20bce74a45925272 --- /dev/null +++ b/OpenOOD/openood/networks/p2pnet/clustering.py @@ -0,0 +1,70 @@ +import numpy as np +import itertools + +class PartsResort(): + def __init__(self, num_center, feature_dim): + super(PartsResort, self).__init__() + self.num_center = num_center + self.feature_dim = feature_dim + + self.centers = np.zeros([num_center, feature_dim]) + self.count = 0 + + self.permutations = list(itertools.permutations(range(num_center))) + + def update(self, points, order): + batch = points.shape[0] + + # [batch, topN, feature_dim] + resorted_points = np.zeros_like(points) + for i in range(batch): + resorted_points[i] = points[i][order[i], :] + + # [topN, feature_dim] + resorted_points = np.mean(resorted_points, axis=0) + for i in range(self.num_center): + self.centers[i] = (self.centers[i]*self.count*0.9 + resorted_points[i]*batch) / (self.count*0.9 + batch) + self.count += batch + + def classify(self, points, is_train): + # input: points [batch, topN, feature_dim] + # output: [batch, topN] + batch, topN, _ = points.shape + if np.sum(self.count) == 0: + order = np.stack([list(range(topN))]*batch, axis=0) + # self.update(points, order) + else: + order = np.zeros([batch, topN], dtype=np.int32) + for i in range(points.shape[0]): + topn_points = points[i] + order[i] = self.graph_assign(topn_points) + if is_train: + self.update(points, order) + return order + + def graph_assign(self, topn_points): + adj_matrix_center = np.dot(self.centers, self.centers.transpose()) + adj_matrix = np.dot(topn_points, topn_points.transpose()) + adj_matrix_center = adj_matrix_center / adj_matrix_center.max() + adj_matrix = adj_matrix / adj_matrix.max() + + max_similarity = 0 + order = list(range(self.num_center)) + for perm in self.permutations: + adj_matrix = adj_matrix[:, perm][perm, :] + prod = np.sum(adj_matrix_center * adj_matrix) + if prod > max_similarity: + max_similarity = prod + order = list(perm) + # print(max_similarity, prod, order) + return order + + + +if __name__ == "__main__": + PC = PartsResort(6, 105) + + points = np.random.randint(0, 10, size=[2, 6, 105]) + PC.classify(points) + points = np.random.randint(0, 10, size=[2, 6, 105]) + PC.classify(points) diff --git a/OpenOOD/openood/networks/p2pnet/model.py b/OpenOOD/openood/networks/p2pnet/model.py new file mode 100644 index 0000000000000000000000000000000000000000..444d8524c8d8723d71abcdfd1f9d9cc06db7adf3 --- /dev/null +++ b/OpenOOD/openood/networks/p2pnet/model.py @@ -0,0 +1,223 @@ +import torch.nn as nn +import torch +import numpy as np +import torch.nn.functional as F +from anchors import generate_default_anchor_maps, hard_nms +from clustering import PartsResort + + +class PMG(nn.Module): + def __init__(self, model, feature_size, num_ftrs, classes_num, topn): + super(PMG, self).__init__() + + self.backbone = model + self.num_ftrs = num_ftrs + self.topn = topn + self.im_sz = 448 + self.pad_side = 224 + self.PR = PartsResort(self.topn, self.num_ftrs//2) + + self.proposal_net = ProposalNet(self.num_ftrs) + _, edge_anchors, _ = generate_default_anchor_maps() + self.edge_anchors = (edge_anchors+self.pad_side).astype(np.int32) + + # mlp for regularization + self.reg_mlp1 = nn.Sequential( + nn.Linear(self.num_ftrs//2 * self.topn, self.num_ftrs//2), + nn.ELU(inplace=True), + nn.Linear(self.num_ftrs//2, self.num_ftrs//2) + ) + self.reg_mlp2 = nn.Sequential( + nn.Linear(self.num_ftrs//2 * self.topn, self.num_ftrs//2), + nn.ELU(inplace=True), + nn.Linear(self.num_ftrs//2, self.num_ftrs//2) + ) + self.reg_mlp3 = nn.Sequential( + nn.Linear(self.num_ftrs//2 * self.topn, self.num_ftrs//2), + nn.ELU(inplace=True), + nn.Linear(self.num_ftrs//2, self.num_ftrs//2) + ) + + # stage 1 + self.conv_block1 = nn.Sequential( + BasicConv(self.num_ftrs//4, feature_size, kernel_size=1, stride=1, padding=0, relu=True), + BasicConv(feature_size, self.num_ftrs//2, kernel_size=3, stride=1, padding=1, relu=True), + nn.AdaptiveMaxPool2d(1) + ) + self.classifier1 = nn.Sequential( + nn.BatchNorm1d(self.num_ftrs//2), + nn.Linear(self.num_ftrs//2, feature_size), + nn.BatchNorm1d(feature_size), + nn.ELU(inplace=True), + nn.Dropout(0.5), + nn.Linear(feature_size, classes_num), + ) + + # stage 2 + self.conv_block2 = nn.Sequential( + BasicConv(self.num_ftrs//2, feature_size, kernel_size=1, stride=1, padding=0, relu=True), + BasicConv(feature_size, self.num_ftrs//2, kernel_size=3, stride=1, padding=1, relu=True), + nn.AdaptiveMaxPool2d(1) + ) + self.classifier2 = nn.Sequential( + nn.BatchNorm1d(self.num_ftrs//2), + nn.Linear(self.num_ftrs//2, feature_size), + nn.BatchNorm1d(feature_size), + nn.ELU(inplace=True), + nn.Dropout(0.5), + nn.Linear(feature_size, classes_num), + ) + + # stage 3 + self.conv_block3 = nn.Sequential( + BasicConv(self.num_ftrs, feature_size, kernel_size=1, stride=1, padding=0, relu=True), + BasicConv(feature_size, self.num_ftrs//2, kernel_size=3, stride=1, padding=1, relu=True), + nn.AdaptiveMaxPool2d(1) + ) + self.classifier3 = nn.Sequential( + nn.BatchNorm1d(self.num_ftrs//2), + nn.Linear(self.num_ftrs//2, feature_size), + nn.BatchNorm1d(feature_size), + nn.ELU(inplace=True), + nn.Dropout(0.5), + nn.Linear(feature_size, classes_num), + ) + + # concat features from different stages + self.classifier_concat = nn.Sequential( + nn.BatchNorm1d(self.num_ftrs//2 * 3), + nn.Linear(self.num_ftrs//2 * 3, feature_size), + nn.BatchNorm1d(feature_size), + nn.ELU(inplace=True), + nn.Linear(feature_size, classes_num), + ) + + self.pooling = nn.AdaptiveAvgPool2d(1) + + + + def forward(self, x, is_train=False, return_feature=False, return_feature_list=False): + out1, out2, f1, f2, f3 = self.backbone(x) + out3 = f1.clone().detach() + out4 = f2.clone().detach() + out5 = f3.clone().detach() + out5 = self.pooling(out5) + feature_list = [out1, out2, out3, out4, out5] + + + hook_output = [] + def feature_hook(module, input, output): + hook_output.append(output) + bn_hook = self.classifier_concat[2].register_forward_hook(feature_hook) + + batch = x.shape[0] + rpn_score = self.proposal_net(f3.detach()) + all_cdds = [np.concatenate((x.reshape(-1, 1), + self.edge_anchors.copy(), + np.arange(0, len(x)).reshape(-1, 1)), + axis=1) for x in rpn_score.data.cpu().numpy()] + top_n_cdds = np.array([hard_nms(x, self.topn, iou_thresh=0.25) for x in all_cdds]) + top_n_index = top_n_cdds[:, :, -1].astype(np.int32) + top_n_index = torch.from_numpy(top_n_index).long().to(x.device) + top_n_prob = torch.gather(rpn_score, dim=1, index=top_n_index) + + # re-input salient parts + part_imgs = torch.zeros([batch, self.topn, 3, 224, 224]).to(x.device) + x_pad = F.pad(x, (self.pad_side, self.pad_side, self.pad_side, self.pad_side), mode='constant', value=0) + for i in range(batch): + for j in range(self.topn): + [y0, x0, y1, x1] = top_n_cdds[i, j, 1:5].astype(np.int32) + part_imgs[i:i + 1, j] = F.interpolate(x_pad[i:i + 1, :, y0:y1, x0:x1], + size=(224, 224), mode='bilinear', + align_corners=True) + + part_imgs = part_imgs.view(batch*self.topn, 3, 224, 224) + _, _, f1_part, f2_part, f3_part = self.backbone(part_imgs.detach()) + f1_part = self.conv_block1(f1_part).view(batch*self.topn, -1) + f2_part = self.conv_block2(f2_part).view(batch*self.topn, -1) + f3_part = self.conv_block3(f3_part).view(batch*self.topn, -1) + yp1 = self.classifier1(f1_part) + yp2 = self.classifier2(f2_part) + yp3 = self.classifier3(f3_part) + yp4 = self.classifier_concat(torch.cat((f1_part, f2_part, f3_part), -1)) + + + # resort parts + feature_points = f3_part.view(batch, self.topn, -1) + parts_order = self.PR.classify(feature_points.data.cpu().numpy(), is_train) + parts_order = torch.from_numpy(parts_order).long().to(x.device) + parts_order = parts_order.unsqueeze(2).expand(batch, self.topn, self.num_ftrs//2) + + f1_points = torch.gather(f1_part.view(batch, self.topn, -1), dim=1, index=parts_order) + f1_m = self.reg_mlp1(f1_points.view(batch, -1)) + f2_points = torch.gather(f2_part.view(batch, self.topn, -1), dim=1, index=parts_order) + f2_m = self.reg_mlp2(f2_points.view(batch, -1)) + f3_points = torch.gather(f3_part.view(batch, self.topn, -1), dim=1, index=parts_order) + f3_m = self.reg_mlp3(f3_points.view(batch, -1)) + + # stage-wise classification + f1 = self.conv_block1(f1).view(batch, -1) + f2 = self.conv_block2(f2).view(batch, -1) + f3 = self.conv_block3(f3).view(batch, -1) + y1 = self.classifier1(f1) + y2 = self.classifier2(f2) + y3 = self.classifier3(f3) + y4 = self.classifier_concat(torch.cat((f1, f2, f3), -1)) + + bn_hook.remove() + if return_feature: + # bn_hook.remove() + + return y1+y2+y3+y4, hook_output[1] + elif return_feature_list: + return y1+y2+y3+y4, feature_list + + return y1+y2+y3+y4 + + # return y1, y2, y3, y4, yp1, yp2, yp3, yp4, top_n_prob, f1_m, f1, f2_m, f2, f3_m, f3 + def get_fc(self): + fc = self.classifier_concat[-1] + return fc.weight.cpu().detach().numpy(), fc.bias.cpu().detach().numpy() + def get_fc_layer(self): + return self.classifier_concat[-1] + +class BasicConv(nn.Module): + def __init__(self, in_planes, out_planes, kernel_size, stride=1, padding=0, dilation=1, groups=1, relu=True, bn=True, bias=False): + super(BasicConv, self).__init__() + self.out_channels = out_planes + self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, + stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias) + self.bn = nn.BatchNorm2d(out_planes, eps=1e-5, + momentum=0.01, affine=True) if bn else None + self.relu = nn.ReLU() if relu else None + + def forward(self, x): + x = self.conv(x) + if self.bn is not None: + x = self.bn(x) + if self.relu is not None: + x = self.relu(x) + return x + + +class ProposalNet(nn.Module): + def __init__(self, depth): + super(ProposalNet, self).__init__() + self.down1 = nn.Conv2d(depth, 128, 3, 1, 1) + self.down2 = nn.Conv2d(128, 128, 3, 2, 1) + self.down3 = nn.Conv2d(128, 128, 3, 2, 1) + self.ReLU = nn.ReLU() + self.tidy1 = nn.Conv2d(128, 6, 1, 1, 0) + self.tidy2 = nn.Conv2d(128, 6, 1, 1, 0) + self.tidy3 = nn.Conv2d(128, 9, 1, 1, 0) + # proposals: 14x14x6, 7x7x6, 4x4x9 + + def forward(self, x): + batch_size = x.size(0) + d1 = self.ReLU(self.down1(x)) + d2 = self.ReLU(self.down2(d1)) + d3 = self.ReLU(self.down3(d2)) + t1 = self.tidy1(d1).view(batch_size, -1) + t2 = self.tidy2(d2).view(batch_size, -1) + t3 = self.tidy3(d3).view(batch_size, -1) + return torch.cat((t1, t2, t3), dim=1) diff --git a/OpenOOD/openood/networks/p2pnet/utils.py b/OpenOOD/openood/networks/p2pnet/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..465bb6ebab3ce1ca209e2c1fd201dec2de408129 --- /dev/null +++ b/OpenOOD/openood/networks/p2pnet/utils.py @@ -0,0 +1,135 @@ +import numpy as np +import random +import torch +import torchvision +from torch.autograd import Variable +from torchvision import transforms, models +import torch.nn.functional as F + +import sys +sys.path.append('/home/zhourixin/OOD_Folder/CODE/other_methods/openOOD_code/OpenOOD/openood/networks/p2pnet') + +from model import * +from Resnet import * +# from dataset import * + + +def cosine_anneal_schedule(t, nb_epoch, lr): + cos_inner = np.pi * (t % (nb_epoch)) # t - 1 is used when t has 1-based indexing. + cos_inner /= (nb_epoch) + cos_out = np.cos(cos_inner) + 1 + + return float(lr / 2 * cos_out) + + +def load_model(backbone, pretrain=True, require_grad=True, classes_num=200, topn=4): + print('==> Building model..') + feature_size = 512 + if backbone == 'resnet50': + num_ftrs = 2048 + net = resnet50(pretrained=pretrain) + for param in net.parameters(): + param.requires_grad = require_grad + net = PMG(net, feature_size, num_ftrs, classes_num, topn=topn) + elif backbone == 'resnet101': + num_ftrs = 2048 + net = resnet101(pretrained=pretrain) + for param in net.parameters(): + param.requires_grad = require_grad + net = PMG(net, feature_size, num_ftrs, classes_num) + elif backbone == 'resnet34': + num_ftrs = 512 + net = resnet34(pretrained=pretrain) + for param in net.parameters(): + param.requires_grad = require_grad + net = PMG(net, feature_size, num_ftrs, classes_num, topn=topn) + + return net + + +def model_info(model): # Plots a line-by-line description of a PyTorch model + n_p = sum(x.numel() for x in model.parameters()) # number parameters + n_g = sum(x.numel() for x in model.parameters() if x.requires_grad) # number gradients + print('\n%5s %50s %9s %12s %20s %12s %12s' % ('layer', 'name', 'gradient', 'parameters', 'shape', 'mu', 'sigma')) + for i, (name, p) in enumerate(model.named_parameters()): + name = name.replace('module_list.', '') + print('%5g %50s %9s %12g %20s %12.3g %12.3g' % ( + i, name, p.requires_grad, p.numel(), list(p.shape), p.mean(), p.std())) + print('Model Summary: %g layers, %g parameters, %g gradients\n' % (i + 1, n_p, n_g)) + + +def test(net, testset, batch_size): + + device = torch.device('cuda') + num_workers = 16 if torch.cuda.is_available() else 0 + testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size//2, shuffle=False, num_workers=num_workers, drop_last=False) + + net.eval() + num_correct = [0] * 5 + for _, (inputs, targets) in enumerate(testloader): + if torch.cuda.is_available(): + inputs, targets = inputs.to(device), targets.to(device) + y1, y2, y3, y4, _, _, _, _, _, _, _, _, _, _, _ = net(inputs, is_train=False) + + _, p1 = torch.max(y1.data, 1) + _, p2 = torch.max(y2.data, 1) + _, p3 = torch.max(y3.data, 1) + _, p4 = torch.max(y4.data, 1) + _, p5 = torch.max((y1 + y2 + y3 + y4).data, 1) + + num_correct[0] += p1.eq(targets.data).cpu().sum() + num_correct[1] += p2.eq(targets.data).cpu().sum() + num_correct[2] += p3.eq(targets.data).cpu().sum() + num_correct[3] += p4.eq(targets.data).cpu().sum() + num_correct[4] += p5.eq(targets.data).cpu().sum() + + total = len(testset) + acc1 = 100. * float(num_correct[0]) / total + acc2 = 100. * float(num_correct[1]) / total + acc3 = 100. * float(num_correct[2]) / total + acc4 = 100. * float(num_correct[3]) / total + acc_test = 100. * float(num_correct[4]) / total + + return acc1, acc2, acc3, acc4, acc_test + + +def list_loss(logits, targets): + temp = F.log_softmax(logits, -1) + loss = [-temp[i][targets[i].item()] for i in range(logits.size(0))] + return torch.stack(loss) + + +def ranking_loss(score, targets): + if torch.cuda.is_available(): + loss = Variable(torch.zeros(1).cuda()) + else: + loss = Variable(torch.zeros(1)) + batch_size = score.size(0) + + if torch.cuda.is_available(): + data_type = torch.cuda.FloatTensor + else: + data_type = torch.FloatTensor + for i in range(targets.shape[1]): + targets_p = (targets > targets[:, i].unsqueeze(1)).type(data_type) + pivot = score[:, i].unsqueeze(1) + loss_p = (1 - pivot + score) * targets_p + loss_p = torch.sum(F.relu(loss_p)) + loss += loss_p + return loss / batch_size + + +def smooth_CE(logits, label, peak): + # logits - [batch, num_cls] + # label - [batch] + batch, num_cls = logits.shape + label_logits = np.zeros(logits.shape, dtype=np.float32) + (1-peak)/(num_cls-1) + ind = ([i for i in range(batch)], list(label.data.cpu().numpy())) + label_logits[ind] = peak + smooth_label = torch.from_numpy(label_logits).to(logits.device) + + logits = F.log_softmax(logits, -1) + ce = torch.mul(logits, smooth_label) + loss = torch.mean(-torch.sum(ce, -1)) # batch average + + return loss diff --git a/OpenOOD/openood/networks/patchcore_net.py b/OpenOOD/openood/networks/patchcore_net.py new file mode 100644 index 0000000000000000000000000000000000000000..7201b6be41ad550f2577f6c35f3ed9ddd9f98a06 --- /dev/null +++ b/OpenOOD/openood/networks/patchcore_net.py @@ -0,0 +1,43 @@ +import torch +import torch.nn as nn + + +class PatchcoreNet(nn.Module): + def __init__(self, backbone): + super(PatchcoreNet, self).__init__() + + # def hook_t(module, input, output): + # self.features.append(output) + + # path = '/home/pengyunwang/.cache/torch/hub/vision-0.9.0' + # module = torch.hub._load_local(path, + # 'wide_resnet50_2', + # pretrained=True) + # self.module = module + # self.module.layer2[-1].register_forward_hook(hook_t) + # self.module.layer3[-1].register_forward_hook(hook_t) + + self.backbone = backbone + + for param in self.parameters(): + param.requires_grad = False + # self.module.cuda() + backbone.cuda() + self.criterion = torch.nn.MSELoss(reduction='sum') + + def forward(self, x, return_feature): + _, feature_list = self.backbone(x, return_feature_list=True) + return [feature_list[-3], feature_list[-2]] + + # def init_features(self): + # self.features = [] + + # def forward(self, x_t, return_feature): + # x_t = x_t.cuda() + # self.init_features() + # _ = self.module(x_t) + + # import pdb + # pdb.set_trace() + + # return self.features diff --git a/OpenOOD/openood/networks/projection_net.py b/OpenOOD/openood/networks/projection_net.py new file mode 100644 index 0000000000000000000000000000000000000000..43dcdd7cb990b2d6e907b1053fd5d1fc569ff0b2 --- /dev/null +++ b/OpenOOD/openood/networks/projection_net.py @@ -0,0 +1,39 @@ +import torch.nn as nn +from torchvision.models import resnet18 + + +class ProjectionNet(nn.Module): + def __init__(self, + backbone, + head_layers=[512, 512, 512, 512, 512, 512, 512, 512, 128], + num_classes=2): + super(ProjectionNet, self).__init__() + self.backbone = backbone + + # use res18 pretrained model if none is given + # self.backbone=resnet18(pretrained=True) + + # penultimate layer feature size + last_layer = backbone.feature_size + sequential_layers = [] + for num_neurons in head_layers: + sequential_layers.append(nn.Linear(last_layer, num_neurons)) + sequential_layers.append(nn.BatchNorm1d(num_neurons)) + sequential_layers.append(nn.ReLU(inplace=True)) + last_layer = num_neurons + + # the last layer without activation + head = nn.Sequential(*sequential_layers) + self.head = head + self.out = nn.Linear(last_layer, num_classes) + + def forward(self, x): + # penultimate layer feature + _, embeds = self.backbone(x, return_feature=True) + tmp = self.head(embeds) + logits = self.out(tmp) + return embeds, logits + + def get_fc(self): + fc = self.out + return fc.weight.cpu().detach().numpy(), fc.bias.cpu().detach().numpy() diff --git a/OpenOOD/openood/networks/react_net.py b/OpenOOD/openood/networks/react_net.py new file mode 100644 index 0000000000000000000000000000000000000000..1f1ab435c65ed298aa8023a8b3903cd43589ae85 --- /dev/null +++ b/OpenOOD/openood/networks/react_net.py @@ -0,0 +1,24 @@ +import torch.nn as nn + + +class ReactNet(nn.Module): + def __init__(self, backbone): + super(ReactNet, self).__init__() + self.backbone = backbone + + def forward(self, x, return_feature=False, return_feature_list=False): + try: + return self.backbone(x, return_feature=return_feature, return_feature_list=return_feature_list) + except TypeError: + return self.backbone(x, return_feature=return_feature) + + def forward_threshold(self, x, threshold): + _, feature = self.backbone(x, return_feature=True) + feature = feature.clip(max=threshold) + feature = feature.view(feature.size(0), -1) + logits_cls = self.backbone.get_fc_layer()(feature) + return logits_cls + + def get_fc(self): + fc = self.backbone.fc + return fc.weight.cpu().detach().numpy(), fc.bias.cpu().detach().numpy() diff --git a/OpenOOD/openood/networks/regnet_y_16gf.py b/OpenOOD/openood/networks/regnet_y_16gf.py new file mode 100644 index 0000000000000000000000000000000000000000..2c8d7f7917dea4fab694a29ab46ec7be06fbfe11 --- /dev/null +++ b/OpenOOD/openood/networks/regnet_y_16gf.py @@ -0,0 +1,26 @@ +import torch.nn as nn +from torchvision.models.regnet import RegNet, BlockParams +from functools import partial + +class RegNet_Y_16GF(RegNet): + def __init__(self): + block_params = BlockParams.from_init_params( + depth=18, w_0=200, w_a=106.23, w_m=2.48, group_width=112, se_ratio=0.25 + ) + norm_layer = partial(nn.BatchNorm2d, eps=1e-05, momentum=0.1) + super(RegNet_Y_16GF, self).__init__(block_params=block_params, norm_layer=norm_layer) + + + def forward(self, x, return_feature=False): + x = self.stem(x) + x = self.trunk_output(x) + + x = self.avgpool(x) + feas = x.flatten(start_dim=1) + logits = self.fc(feas) + + if return_feature: + return logits, feas + else: + return logits + diff --git a/OpenOOD/openood/networks/resnet18_224x224.py b/OpenOOD/openood/networks/resnet18_224x224.py new file mode 100644 index 0000000000000000000000000000000000000000..dc93006773093a25af6d2182ba933555e1abe4de --- /dev/null +++ b/OpenOOD/openood/networks/resnet18_224x224.py @@ -0,0 +1,74 @@ +from torchvision.models.resnet import BasicBlock, ResNet + + +class ResNet18_224x224(ResNet): + def __init__(self, + block=BasicBlock, + layers=[2, 2, 2, 2], + num_classes=1000): + super(ResNet18_224x224, self).__init__(block=block, + layers=layers, + num_classes=num_classes) + self.feature_size = 512 + + def forward(self, x, return_feature=False, return_feature_list=False): + feature1 = self.relu(self.bn1(self.conv1(x))) + feature1 = self.maxpool(feature1) + feature2 = self.layer1(feature1) + feature3 = self.layer2(feature2) + feature4 = self.layer3(feature3) + feature5 = self.layer4(feature4) + feature5 = self.avgpool(feature5) + feature = feature5.view(feature5.size(0), -1) + logits_cls = self.fc(feature) + + feature_list = [feature1, feature2, feature3, feature4, feature5] + if return_feature: + return logits_cls, feature + elif return_feature_list: + return logits_cls, feature_list + else: + return logits_cls + + def forward_threshold(self, x, threshold): + feature1 = self.relu(self.bn1(self.conv1(x))) + feature1 = self.maxpool(feature1) + feature2 = self.layer1(feature1) + feature3 = self.layer2(feature2) + feature4 = self.layer3(feature3) + feature5 = self.layer4(feature4) + feature5 = self.avgpool(feature5) + feature = feature5.clip(max=threshold) + feature = feature.view(feature.size(0), -1) + logits_cls = self.fc(feature) + + return logits_cls + + def intermediate_forward(self, x, layer_index): + out = self.relu(self.bn1(self.conv1(x))) + out = self.maxpool(out) + + out = self.layer1(out) + if layer_index == 1: + return out + + out = self.layer2(out) + if layer_index == 2: + return out + + out = self.layer3(out) + if layer_index == 3: + return out + + out = self.layer4(out) + if layer_index == 4: + return out + + raise ValueError + + def get_fc(self): + fc = self.fc + return fc.weight.cpu().detach().numpy(), fc.bias.cpu().detach().numpy() + + def get_fc_layer(self): + return self.fc diff --git a/OpenOOD/openood/networks/resnet18_256x256.py b/OpenOOD/openood/networks/resnet18_256x256.py new file mode 100644 index 0000000000000000000000000000000000000000..d3ef5172e3f20e3ccb021cf882a29012ad96855c --- /dev/null +++ b/OpenOOD/openood/networks/resnet18_256x256.py @@ -0,0 +1,224 @@ +import torch.nn as nn +import torch.nn.functional as F + + +class BasicBlock(nn.Module): + expansion = 1 + + def __init__(self, in_planes, planes, stride=1): + super(BasicBlock, self).__init__() + self.conv1 = nn.Conv2d(in_planes, + planes, + kernel_size=3, + stride=stride, + padding=1, + bias=False) + self.bn1 = nn.BatchNorm2d(planes) + self.conv2 = nn.Conv2d(planes, + planes, + kernel_size=3, + stride=1, + padding=1, + bias=False) + self.bn2 = nn.BatchNorm2d(planes) + + self.shortcut = nn.Sequential() + if stride != 1 or in_planes != self.expansion * planes: + self.shortcut = nn.Sequential( + nn.Conv2d(in_planes, + self.expansion * planes, + kernel_size=1, + stride=stride, + bias=False), nn.BatchNorm2d(self.expansion * planes)) + + def forward(self, x): + out = F.relu(self.bn1(self.conv1(x))) + out = self.bn2(self.conv2(out)) + out += self.shortcut(x) + out = F.relu(out) + return out + + +class BasicBlock2(nn.Module): + expansion = 1 + + def __init__( + self, + in_planes: int, + planes: int, + stride: int = 1, + downsample=None, + ) -> None: + super(BasicBlock2, self).__init__() + self.conv1 = nn.Conv2d(in_planes, + planes, + kernel_size=3, + stride=stride, + padding=1, + bias=False) + self.bn1 = nn.BatchNorm2d(planes) + self.relu = nn.ReLU(inplace=True) + self.conv2 = nn.Conv2d(planes, + planes, + kernel_size=3, + stride=1, + padding=1, + bias=False) + self.bn2 = nn.BatchNorm2d(planes) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + identity = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + out = self.conv2(out) + out = self.bn2(out) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + out = self.relu(out) + + return out + + +class Bottleneck(nn.Module): + expansion = 4 + + def __init__(self, in_planes, planes, stride=1): + super(Bottleneck, self).__init__() + self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False) + self.bn1 = nn.BatchNorm2d(planes) + self.conv2 = nn.Conv2d(planes, + planes, + kernel_size=3, + stride=stride, + padding=1, + bias=False) + self.bn2 = nn.BatchNorm2d(planes) + self.conv3 = nn.Conv2d(planes, + self.expansion * planes, + kernel_size=1, + bias=False) + self.bn3 = nn.BatchNorm2d(self.expansion * planes) + + self.shortcut = nn.Sequential() + if stride != 1 or in_planes != self.expansion * planes: + self.shortcut = nn.Sequential( + nn.Conv2d(in_planes, + self.expansion * planes, + kernel_size=1, + stride=stride, + bias=False), nn.BatchNorm2d(self.expansion * planes)) + + def forward(self, x): + out = F.relu(self.bn1(self.conv1(x))) + out = F.relu(self.bn2(self.conv2(out))) + out = self.bn3(self.conv3(out)) + out += self.shortcut(x) + out = F.relu(out) + return out + + +class ResNet18_256x256(nn.Module): + def __init__(self, block=BasicBlock2, num_blocks=None, num_classes=10): + super(ResNet18_256x256, self).__init__() + if num_blocks is None: + num_blocks = [2, 2, 2, 2] + self.in_planes = 64 + self._norm_layer = nn.BatchNorm2d + self.conv1 = nn.Conv2d( + 3, + 64, + kernel_size=7, # origin 3 + stride=2, # origin 1 + padding=3, # origin 1 + bias=False) + self.bn1 = nn.BatchNorm2d(64) + self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1) + self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2) + self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2) + self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2) + # self.avgpool = nn.AvgPool2d(4) + self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, + padding=1) # origin no + self.avgpool = nn.AdaptiveAvgPool2d(1) + self.fc = nn.Linear(512 * block.expansion, num_classes) + self.feature_size = 512 * block.expansion + + # origin no + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, + mode='fan_out', + nonlinearity='relu') + elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + + def _make_layer(self, block, planes, num_blocks, stride): + ''' + strides = [stride] + [1] * (num_blocks - 1) + layers = [] + for stride in strides: + layers.append(block(self.in_planes, planes, stride)) + self.in_planes = planes * block.expansion + ''' + norm_layer = self._norm_layer + downsample = None + if stride != 1 or self.in_planes != planes * block.expansion: + downsample = nn.Sequential( + nn.Conv2d(self.in_planes, + planes * block.expansion, + kernel_size=1, + stride=stride, + bias=False), + norm_layer(planes * block.expansion), + ) + + layers = [] + layers.append(block(self.in_planes, planes, stride, downsample)) + self.in_planes = planes * block.expansion + for _ in range(1, num_blocks): + layers.append(block(self.in_planes, planes)) + + return nn.Sequential(*layers) + + def forward(self, x, return_feature=False, return_feature_list=False): + feature1 = self.maxpool(F.relu(self.bn1( + self.conv1(x)))) # origin no maxpool + feature2 = self.layer1(feature1) + feature3 = self.layer2(feature2) + feature4 = self.layer3(feature3) + feature5 = self.layer4(feature4) + feature5 = self.avgpool(feature5) + feature = feature5.view(feature5.size(0), -1) + logits_cls = self.fc(feature) + feature_list = [feature1, feature2, feature3, feature4, feature5] + if return_feature: + return logits_cls, feature + elif return_feature_list: + return logits_cls, feature_list + else: + return logits_cls + + def forward_threshold(self, x, threshold): + feature1 = F.relu(self.bn1(self.conv1(x))) + feature2 = self.layer1(feature1) + feature3 = self.layer2(feature2) + feature4 = self.layer3(feature3) + feature5 = self.layer4(feature4) + feature5 = self.avgpool(feature5) + feature = feature5.clip(max=threshold) + feature = feature.view(feature.size(0), -1) + logits_cls = self.fc(feature) + + return logits_cls + + def get_fc(self): + fc = self.fc + return fc.weight.cpu().detach().numpy(), fc.bias.cpu().detach().numpy() diff --git a/OpenOOD/openood/networks/resnet18_32x32.py b/OpenOOD/openood/networks/resnet18_32x32.py new file mode 100644 index 0000000000000000000000000000000000000000..f0d4836304ba1b5c07e05d33a222aa0028e1e7df --- /dev/null +++ b/OpenOOD/openood/networks/resnet18_32x32.py @@ -0,0 +1,167 @@ +import torch.nn as nn +import torch.nn.functional as F + + +class BasicBlock(nn.Module): + expansion = 1 + + def __init__(self, in_planes, planes, stride=1): + super(BasicBlock, self).__init__() + self.conv1 = nn.Conv2d(in_planes, + planes, + kernel_size=3, + stride=stride, + padding=1, + bias=False) + self.bn1 = nn.BatchNorm2d(planes) + self.conv2 = nn.Conv2d(planes, + planes, + kernel_size=3, + stride=1, + padding=1, + bias=False) + self.bn2 = nn.BatchNorm2d(planes) + + self.shortcut = nn.Sequential() + if stride != 1 or in_planes != self.expansion * planes: + self.shortcut = nn.Sequential( + nn.Conv2d(in_planes, + self.expansion * planes, + kernel_size=1, + stride=stride, + bias=False), nn.BatchNorm2d(self.expansion * planes)) + + def forward(self, x): + out = F.relu(self.bn1(self.conv1(x))) + out = self.bn2(self.conv2(out)) + out += self.shortcut(x) + out = F.relu(out) + return out + + +class Bottleneck(nn.Module): + expansion = 4 + + def __init__(self, in_planes, planes, stride=1): + super(Bottleneck, self).__init__() + self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False) + self.bn1 = nn.BatchNorm2d(planes) + self.conv2 = nn.Conv2d(planes, + planes, + kernel_size=3, + stride=stride, + padding=1, + bias=False) + self.bn2 = nn.BatchNorm2d(planes) + self.conv3 = nn.Conv2d(planes, + self.expansion * planes, + kernel_size=1, + bias=False) + self.bn3 = nn.BatchNorm2d(self.expansion * planes) + + self.shortcut = nn.Sequential() + if stride != 1 or in_planes != self.expansion * planes: + self.shortcut = nn.Sequential( + nn.Conv2d(in_planes, + self.expansion * planes, + kernel_size=1, + stride=stride, + bias=False), nn.BatchNorm2d(self.expansion * planes)) + + def forward(self, x): + out = F.relu(self.bn1(self.conv1(x))) + out = F.relu(self.bn2(self.conv2(out))) + out = self.bn3(self.conv3(out)) + out += self.shortcut(x) + out = F.relu(out) + return out + + +class ResNet18_32x32(nn.Module): + def __init__(self, block=BasicBlock, num_blocks=None, num_classes=10): + super(ResNet18_32x32, self).__init__() + if num_blocks is None: + num_blocks = [2, 2, 2, 2] + self.in_planes = 64 + + self.conv1 = nn.Conv2d(3, + 64, + kernel_size=3, + stride=1, + padding=1, + bias=False) + self.bn1 = nn.BatchNorm2d(64) + self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1) + self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2) + self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2) + self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2) + # self.avgpool = nn.AvgPool2d(4) + self.avgpool = nn.AdaptiveAvgPool2d(1) + self.fc = nn.Linear(512 * block.expansion, num_classes) + self.feature_size = 512 * block.expansion + + def _make_layer(self, block, planes, num_blocks, stride): + strides = [stride] + [1] * (num_blocks - 1) + layers = [] + for stride in strides: + layers.append(block(self.in_planes, planes, stride)) + self.in_planes = planes * block.expansion + return nn.Sequential(*layers) + + def forward(self, x, return_feature=False, return_feature_list=False): + feature1 = F.relu(self.bn1(self.conv1(x))) # bs,64,32,32 + feature2 = self.layer1(feature1) # bs,64,32,32 + feature3 = self.layer2(feature2) # bs,128,16,16 + feature4 = self.layer3(feature3) # bs,256,8,8 + feature5 = self.layer4(feature4) # bs,512,4,4 + feature5 = self.avgpool(feature5) # bs,512,1,1 + feature = feature5.view(feature5.size(0), -1) + logits_cls = self.fc(feature) + feature_list = [feature1, feature2, feature3, feature4, feature5] + if return_feature: + return logits_cls, feature + elif return_feature_list: + return logits_cls, feature_list + else: + return logits_cls + + def forward_threshold(self, x, threshold): + feature1 = F.relu(self.bn1(self.conv1(x))) + feature2 = self.layer1(feature1) + feature3 = self.layer2(feature2) + feature4 = self.layer3(feature3) + feature5 = self.layer4(feature4) + feature5 = self.avgpool(feature5) + feature = feature5.clip(max=threshold) + feature = feature.view(feature.size(0), -1) + logits_cls = self.fc(feature) + + return logits_cls + + def intermediate_forward(self, x, layer_index): + out = F.relu(self.bn1(self.conv1(x))) + + out = self.layer1(out) + if layer_index == 1: + return out + + out = self.layer2(out) + if layer_index == 2: + return out + + out = self.layer3(out) + if layer_index == 3: + return out + + out = self.layer4(out) + if layer_index == 4: + return out + + raise ValueError + + def get_fc(self): + fc = self.fc + return fc.weight.cpu().detach().numpy(), fc.bias.cpu().detach().numpy() + + def get_fc_layer(self): + return self.fc diff --git a/OpenOOD/openood/networks/resnet18_64x64.py b/OpenOOD/openood/networks/resnet18_64x64.py new file mode 100644 index 0000000000000000000000000000000000000000..ad23a69f8da7509b23dc49aad64c6fd3ae158775 --- /dev/null +++ b/OpenOOD/openood/networks/resnet18_64x64.py @@ -0,0 +1,142 @@ +import torch.nn as nn +import torch.nn.functional as F + + +class BasicBlock(nn.Module): + expansion = 1 + + def __init__(self, in_planes, planes, stride=1): + super(BasicBlock, self).__init__() + self.conv1 = nn.Conv2d(in_planes, + planes, + kernel_size=3, + stride=stride, + padding=1, + bias=False) + self.bn1 = nn.BatchNorm2d(planes) + self.conv2 = nn.Conv2d(planes, + planes, + kernel_size=3, + stride=1, + padding=1, + bias=False) + self.bn2 = nn.BatchNorm2d(planes) + + self.shortcut = nn.Sequential() + if stride != 1 or in_planes != self.expansion * planes: + self.shortcut = nn.Sequential( + nn.Conv2d(in_planes, + self.expansion * planes, + kernel_size=1, + stride=stride, + bias=False), nn.BatchNorm2d(self.expansion * planes)) + + def forward(self, x): + out = F.relu(self.bn1(self.conv1(x))) + out = self.bn2(self.conv2(out)) + out += self.shortcut(x) + out = F.relu(out) + return out + + +class Bottleneck(nn.Module): + expansion = 4 + + def __init__(self, in_planes, planes, stride=1): + super(Bottleneck, self).__init__() + self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False) + self.bn1 = nn.BatchNorm2d(planes) + self.conv2 = nn.Conv2d(planes, + planes, + kernel_size=3, + stride=stride, + padding=1, + bias=False) + self.bn2 = nn.BatchNorm2d(planes) + self.conv3 = nn.Conv2d(planes, + self.expansion * planes, + kernel_size=1, + bias=False) + self.bn3 = nn.BatchNorm2d(self.expansion * planes) + + self.shortcut = nn.Sequential() + if stride != 1 or in_planes != self.expansion * planes: + self.shortcut = nn.Sequential( + nn.Conv2d(in_planes, + self.expansion * planes, + kernel_size=1, + stride=stride, + bias=False), nn.BatchNorm2d(self.expansion * planes)) + + def forward(self, x): + out = F.relu(self.bn1(self.conv1(x))) + out = F.relu(self.bn2(self.conv2(out))) + out = self.bn3(self.conv3(out)) + out += self.shortcut(x) + out = F.relu(out) + return out + + +class ResNet18_64x64(nn.Module): + def __init__(self, block=BasicBlock, num_blocks=None, num_classes=10): + super(ResNet18_64x64, self).__init__() + if num_blocks is None: + num_blocks = [2, 2, 2, 2] + self.in_planes = 64 + + self.conv1 = nn.Conv2d(3, + 64, + kernel_size=3, + stride=1, + padding=1, + bias=False) + self.bn1 = nn.BatchNorm2d(64) + self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1) + self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2) + self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2) + self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2) + self.avgpool = nn.AvgPool2d(8) + self.fc = nn.Linear(512, num_classes) + self.feature_size = 512 + + def _make_layer(self, block, planes, num_blocks, stride): + strides = [stride] + [1] * (num_blocks - 1) + layers = [] + for stride in strides: + layers.append(block(self.in_planes, planes, stride)) + self.in_planes = planes * block.expansion + return nn.Sequential(*layers) + + def forward(self, x, return_feature=False, return_feature_list=False): + feature1 = F.relu(self.bn1(self.conv1(x))) + feature2 = self.layer1(feature1) + feature3 = self.layer2(feature2) + feature4 = self.layer3(feature3) + feature5 = self.layer4(feature4) + feature5 = self.avgpool(feature5) + feature = feature5.view(feature5.size(0), -1) + logits_cls = self.fc(feature) + feature_list = [feature1, feature2, feature3, feature4, feature5] + if return_feature: + return logits_cls, feature + elif return_feature_list: + return logits_cls, feature_list + else: + return logits_cls + + def forward_threshold(self, x, threshold): + feature1 = F.relu(self.bn1(self.conv1(x))) + feature2 = self.layer1(feature1) + feature3 = self.layer2(feature2) + feature4 = self.layer3(feature3) + feature5 = self.layer4(feature4) + feature5 = self.avgpool(feature5) + feature = feature5.clip(max=threshold) + feature = feature.view(feature.size(0), -1) + logits_cls = self.fc(feature) + + return logits_cls + + def get_fc(self): + fc = self.fc + return fc.weight.cpu().detach().numpy(), fc.bias.cpu().detach().numpy() diff --git a/OpenOOD/openood/networks/resnet50.py b/OpenOOD/openood/networks/resnet50.py new file mode 100644 index 0000000000000000000000000000000000000000..5c6d85f28d67f246632ddadb0ef8c024b18dd247 --- /dev/null +++ b/OpenOOD/openood/networks/resnet50.py @@ -0,0 +1,74 @@ +from torchvision.models.resnet import Bottleneck, ResNet + + +class ResNet50(ResNet): + def __init__(self, + block=Bottleneck, + layers=[3, 4, 6, 3], + num_classes=1000): + super(ResNet50, self).__init__(block=block, + layers=layers, + num_classes=num_classes) + self.feature_size = 2048 + + def forward(self, x, return_feature=False, return_feature_list=False): + feature1 = self.relu(self.bn1(self.conv1(x))) + feature1 = self.maxpool(feature1) + feature2 = self.layer1(feature1) + feature3 = self.layer2(feature2) + feature4 = self.layer3(feature3) + feature5 = self.layer4(feature4) + feature5 = self.avgpool(feature5) + feature = feature5.view(feature5.size(0), -1) + logits_cls = self.fc(feature) + + feature_list = [feature1, feature2, feature3, feature4, feature5] + if return_feature: + return logits_cls, feature + elif return_feature_list: + return logits_cls, feature_list + else: + return logits_cls + + def forward_threshold(self, x, threshold): + feature1 = self.relu(self.bn1(self.conv1(x))) + feature1 = self.maxpool(feature1) + feature2 = self.layer1(feature1) + feature3 = self.layer2(feature2) + feature4 = self.layer3(feature3) + feature5 = self.layer4(feature4) + feature5 = self.avgpool(feature5) + feature = feature5.clip(max=threshold) + feature = feature.view(feature.size(0), -1) + logits_cls = self.fc(feature) + + return logits_cls + + def intermediate_forward(self, x, layer_index): + out = self.relu(self.bn1(self.conv1(x))) + out = self.maxpool(out) + + out = self.layer1(out) + if layer_index == 1: + return out + + out = self.layer2(out) + if layer_index == 2: + return out + + out = self.layer3(out) + if layer_index == 3: + return out + + out = self.layer4(out) + if layer_index == 4: + return out + + raise ValueError + + def get_fc(self): + fc = self.fc + return fc.weight.cpu().detach().numpy(), fc.bias.cpu().detach().numpy() + + def get_fc_layer(self): + return self.fc diff --git a/OpenOOD/openood/networks/rot_net.py b/OpenOOD/openood/networks/rot_net.py new file mode 100644 index 0000000000000000000000000000000000000000..375331f91dbf239bd06e7a847d934167f1941093 --- /dev/null +++ b/OpenOOD/openood/networks/rot_net.py @@ -0,0 +1,31 @@ +import torch.nn as nn + + +class RotNet(nn.Module): + def __init__(self, backbone, num_classes): + super(RotNet, self).__init__() + + self.backbone = backbone + if hasattr(self.backbone, 'fc'): + # remove fc otherwise ddp will + # report unused params + self.backbone.fc = nn.Identity() + + try: + feature_size = backbone.feature_size + except AttributeError: + feature_size = backbone.module.feature_size + + self.fc = nn.Linear(feature_size, num_classes) + self.rot_fc = nn.Linear(feature_size, 4) + + def forward(self, x, return_rot_logits=False): + _, feature = self.backbone(x, return_feature=True) + + logits = self.fc(feature) + rot_logits = self.rot_fc(feature) + + if return_rot_logits: + return logits, rot_logits + else: + return logits diff --git a/OpenOOD/openood/networks/rts_net.py b/OpenOOD/openood/networks/rts_net.py new file mode 100644 index 0000000000000000000000000000000000000000..7a74cc58d732574445078a837a303827264eae99 --- /dev/null +++ b/OpenOOD/openood/networks/rts_net.py @@ -0,0 +1,29 @@ +import torch +import torch.nn as nn + + +class RTSNet(nn.Module): + def __init__(self, backbone, feature_size, num_classes, + dof=16): + ''' + dof: degree of freedom of variance + ''' + super(RTSNet, self).__init__() + self.backbone = backbone + self.feature_size = feature_size + self.num_classes = num_classes + self.dof = dof + self.logvar_rts = nn.Sequential( + nn.Linear(feature_size, self.dof), + nn.BatchNorm1d(self.dof), + ) + + def forward(self, x, return_var=False): + logits_cls, feature = self.backbone(x, return_feature=True) + if return_var: + logvar = self.logvar_rts(feature) + variance = logvar.exp() + return logits_cls, variance + else: + return logits_cls + \ No newline at end of file diff --git a/OpenOOD/openood/networks/scale_net.py b/OpenOOD/openood/networks/scale_net.py new file mode 100644 index 0000000000000000000000000000000000000000..aaee34041ade6f0194f82dd0cc3b906bb8174e60 --- /dev/null +++ b/OpenOOD/openood/networks/scale_net.py @@ -0,0 +1,49 @@ +import numpy as np +import torch +import torch.nn as nn + + +class ScaleNet(nn.Module): + def __init__(self, backbone): + super(ScaleNet, self).__init__() + self.backbone = backbone + + def forward(self, x, return_feature=False, return_feature_list=False): + try: + return self.backbone(x, return_feature, return_feature_list) + except TypeError: + return self.backbone(x, return_feature) + + def forward_threshold(self, x, percentile): + _, feature = self.backbone(x, return_feature=True) + feature = scale(feature.view(feature.size(0), -1, 1, 1), percentile) + feature = feature.view(feature.size(0), -1) + logits_cls = self.backbone.get_fc_layer()(feature) + return logits_cls + + def get_fc(self): + fc = self.backbone.fc + return fc.weight.cpu().detach().numpy(), fc.bias.cpu().detach().numpy() + + +def scale(x, percentile=65): + input = x.clone() + assert x.dim() == 4 + assert 0 <= percentile <= 100 + b, c, h, w = x.shape + + # calculate the sum of the input per sample + s1 = x.sum(dim=[1, 2, 3]) + n = x.shape[1:].numel() + k = n - int(np.round(n * percentile / 100.0)) + t = x.view((b, c * h * w)) + v, i = torch.topk(t, k, dim=1) + t.zero_().scatter_(dim=1, index=i, src=v) + + # calculate new sum of the input per sample after pruning + s2 = x.sum(dim=[1, 2, 3]) + + # apply sharpening + scale = s1 / s2 + + return input * torch.exp(scale[:, None, None, None]) diff --git a/OpenOOD/openood/networks/simclr_net.py b/OpenOOD/openood/networks/simclr_net.py new file mode 100644 index 0000000000000000000000000000000000000000..f7676b5d8bb3d3c40a079b01d9e4cbc466a1fa35 --- /dev/null +++ b/OpenOOD/openood/networks/simclr_net.py @@ -0,0 +1,20 @@ +import torch.nn as nn +import torch.nn.functional as F + + +class SimClrNet(nn.Module): + def __init__(self, backbone, out_dim=128) -> None: + super(SimClrNet, self).__init__() + + self.backbone = backbone + feature_dim = backbone.feature_size + self.simclr_head = nn.Sequential( + nn.Linear(feature_dim, feature_dim), + nn.ReLU(inplace=True), + nn.Linear(feature_dim, out_dim) + ) + + def forward(self, x, return_feature=False, return_feature_list=False): + _, feature = self.backbone.forward(x, return_feature=True) + + return _, [F.normalize(self.simclr_head(feature), dim=-1)] \ No newline at end of file diff --git a/OpenOOD/openood/networks/swin_t.py b/OpenOOD/openood/networks/swin_t.py new file mode 100644 index 0000000000000000000000000000000000000000..dc2eaf1e33fe1a30cfe40d7440131bb4b85f27cb --- /dev/null +++ b/OpenOOD/openood/networks/swin_t.py @@ -0,0 +1,52 @@ +from torchvision.models.swin_transformer import SwinTransformer + + +class Swin_T(SwinTransformer): + def __init__(self, + patch_size=[4, 4], + embed_dim=96, + depths=[2, 2, 6, 2], + num_heads=[3, 6, 12, 24], + window_size=[7, 7], + stochastic_depth_prob=0.2, + num_classes=1000): + super(Swin_T, + self).__init__(patch_size=patch_size, + embed_dim=embed_dim, + depths=depths, + num_heads=num_heads, + window_size=window_size, + stochastic_depth_prob=stochastic_depth_prob, + num_classes=num_classes) + self.feature_size = embed_dim * 2**(len(depths) - 1) + + def forward(self, x, return_feature=False): + x = self.features(x) + x = self.norm(x) + x = self.permute(x) + x = self.avgpool(x) + x = self.flatten(x) + + if return_feature: + return self.head(x), x + else: + return self.head(x) + + def forward_threshold(self, x, threshold): + x = self.features(x) + x = self.norm(x) + x = self.permute(x) + x = self.avgpool(x) + x = self.flatten(x) + feature = x.clip(max=threshold) + feature = feature.view(feature.size(0), -1) + logits_cls = self.head(feature) + + return logits_cls + + def get_fc(self): + fc = self.head + return fc.weight.cpu().detach().numpy(), fc.bias.cpu().detach().numpy() + + def get_fc_layer(self): + return self.head diff --git a/OpenOOD/openood/networks/t2fnorm_net.py b/OpenOOD/openood/networks/t2fnorm_net.py new file mode 100644 index 0000000000000000000000000000000000000000..27b5f8e54e643ae454ee3cfd421d6d265805845e --- /dev/null +++ b/OpenOOD/openood/networks/t2fnorm_net.py @@ -0,0 +1,36 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + +class T2FNormNet(nn.Module): + def __init__(self, backbone, tau, num_classes): + super(T2FNormNet, self).__init__() + + self.register_buffer('tau', torch.tensor(tau)) + self.backbone = backbone + if hasattr(self.backbone, 'fc'): + self.backbone.fc = nn.Identity() + + try: + feature_size = backbone.feature_size + except AttributeError: + feature_size = backbone.module.feature_size + + self.new_fc = nn.Linear(feature_size, num_classes) + + def forward(self, x, return_feature=False): + penultimate_features = self.backbone(x) + if self.training: + features = F.normalize(penultimate_features, dim=-1) / self.tau.item() + else: + features = penultimate_features / self.tau.item() + + logits_cls = self.new_fc(features) + if return_feature: + return logits_cls, penultimate_features + else: + return logits_cls + + def intermediate_forward(self, x): + penultimate_features = self.backbone(x).squeeze() + return penultimate_features diff --git a/OpenOOD/openood/networks/temp.py b/OpenOOD/openood/networks/temp.py new file mode 100644 index 0000000000000000000000000000000000000000..0feffc2f00f0c437a585efdcf569cadf302ae88a --- /dev/null +++ b/OpenOOD/openood/networks/temp.py @@ -0,0 +1,194 @@ +"""ResNet in PyTorch. +ImageNet-Style ResNet +[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun + Deep Residual Learning for Image Recognition. arXiv:1512.03385 +Adapted from: https://github.com/bearpaw/pytorch-classification +""" +import torch +import torch.nn as nn +import torch.nn.functional as F + + +class BasicBlock(nn.Module): + expansion = 1 + + def __init__(self, in_planes, planes, stride=1, is_last=False): + super(BasicBlock, self).__init__() + self.is_last = is_last + self.conv1 = nn.Conv2d( + in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False + ) + self.bn1 = nn.BatchNorm2d(planes) + self.conv2 = nn.Conv2d( + planes, planes, kernel_size=3, stride=1, padding=1, bias=False + ) + self.bn2 = nn.BatchNorm2d(planes) + + self.shortcut = nn.Sequential() + if stride != 1 or in_planes != self.expansion * planes: + self.shortcut = nn.Sequential( + nn.Conv2d( + in_planes, + self.expansion * planes, + kernel_size=1, + stride=stride, + bias=False, + ), + nn.BatchNorm2d(self.expansion * planes), + ) + + def forward(self, x): + out = F.relu(self.bn1(self.conv1(x))) + out = self.bn2(self.conv2(out)) + out += self.shortcut(x) + preact = out + out = F.relu(out) + if self.is_last: + return out, preact + else: + return out + + +class Bottleneck(nn.Module): + expansion = 4 + + def __init__(self, in_planes, planes, stride=1, is_last=False): + super(Bottleneck, self).__init__() + self.is_last = is_last + self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False) + self.bn1 = nn.BatchNorm2d(planes) + self.conv2 = nn.Conv2d( + planes, planes, kernel_size=3, stride=stride, padding=1, bias=False + ) + self.bn2 = nn.BatchNorm2d(planes) + self.conv3 = nn.Conv2d( + planes, self.expansion * planes, kernel_size=1, bias=False + ) + self.bn3 = nn.BatchNorm2d(self.expansion * planes) + + self.shortcut = nn.Sequential() + if stride != 1 or in_planes != self.expansion * planes: + self.shortcut = nn.Sequential( + nn.Conv2d( + in_planes, + self.expansion * planes, + kernel_size=1, + stride=stride, + bias=False, + ), + nn.BatchNorm2d(self.expansion * planes), + ) + + def forward(self, x): + out = F.relu(self.bn1(self.conv1(x))) + out = F.relu(self.bn2(self.conv2(out))) + out = self.bn3(self.conv3(out)) + out += self.shortcut(x) + preact = out + out = F.relu(out) + if self.is_last: + return out, preact + else: + return out + + +class ResNet(nn.Module): + def __init__(self, block, num_blocks, in_channel=3, zero_init_residual=False): + super(ResNet, self).__init__() + self.in_planes = 64 + + self.conv1 = nn.Conv2d( + in_channel, 64, kernel_size=3, stride=1, padding=1, bias=False + ) + self.bn1 = nn.BatchNorm2d(64) + self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1) + self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2) + self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2) + self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2) + self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu") + elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + + # Zero-initialize the last BN in each residual branch, + # so that the residual branch starts with zeros, and each residual block behaves + # like an identity. This improves the model by 0.2~0.3% according to: + # https://arxiv.org/abs/1706.02677 + if zero_init_residual: + for m in self.modules(): + if isinstance(m, Bottleneck): + nn.init.constant_(m.bn3.weight, 0) + elif isinstance(m, BasicBlock): + nn.init.constant_(m.bn2.weight, 0) + + def _make_layer(self, block, planes, num_blocks, stride): + strides = [stride] + [1] * (num_blocks - 1) + layers = [] + for i in range(num_blocks): + stride = strides[i] + layers.append(block(self.in_planes, planes, stride)) + self.in_planes = planes * block.expansion + return nn.Sequential(*layers) + + def forward(self, x, layer=100): + out = F.relu(self.bn1(self.conv1(x))) + out = self.layer1(out) + out = self.layer2(out) + out = self.layer3(out) + out = self.layer4(out) + out = self.avgpool(out) + out = torch.flatten(out, 1) + return out + + +def resnet18(**kwargs): + return ResNet(BasicBlock, [2, 2, 2, 2], **kwargs) + + +def resnet34(**kwargs): + return ResNet(BasicBlock, [3, 4, 6, 3], **kwargs) + + +def resnet50(**kwargs): + return ResNet(Bottleneck, [3, 4, 6, 3], **kwargs) + + +def resnet101(**kwargs): + return ResNet(Bottleneck, [3, 4, 23, 3], **kwargs) + + +model_dict = { + "resnet18": [resnet18, 512], + "resnet34": [resnet34, 512], + "resnet50": [resnet50, 2048], + "resnet101": [resnet101, 2048], +} + + +class SupResNet(nn.Module): + def __init__(self, arch="resnet50", num_classes=10, **kwargs): + super(SupResNet, self).__init__() + m, fdim = model_dict[arch] + self.encoder = m() + self.head = nn.Linear(fdim, num_classes) + + def forward(self, x): + return self.head(self.encoder(x)) + + +class SSLResNet(nn.Module): + def __init__(self, arch="resnet50", out_dim=128, **kwargs): + super(SSLResNet, self).__init__() + m, fdim = model_dict[arch] + self.encoder = m() + self.head = nn.Sequential( + nn.Linear(fdim, fdim), nn.ReLU(inplace=True), nn.Linear(fdim, out_dim) + ) + + def forward(self, x, return_feature=False, return_feature_list=False): + temp = F.normalize(self.head(self.encoder(x)), dim=-1) + return temp, [temp] diff --git a/OpenOOD/openood/networks/udg_net.py b/OpenOOD/openood/networks/udg_net.py new file mode 100644 index 0000000000000000000000000000000000000000..ff645ce7a99434239e3949877df83aabeabf0e6c --- /dev/null +++ b/OpenOOD/openood/networks/udg_net.py @@ -0,0 +1,29 @@ +import torch.nn as nn + + +class UDGNet(nn.Module): + def __init__(self, backbone, num_classes, num_clusters): + super(UDGNet, self).__init__() + self.backbone = backbone + if hasattr(self.backbone, 'fc'): + # remove fc otherwise ddp will + # report unused params + self.backbone.fc = nn.Identity() + self.fc = nn.Linear(backbone.feature_size, num_classes) + self.fc_aux = nn.Linear(backbone.feature_size, num_clusters) + + def forward(self, x, return_feature=False, return_aux=False): + _, feature = self.backbone(x, return_feature=True) + logits_cls = self.fc(feature) + logits_aux = self.fc_aux(feature) + + if return_aux: + if return_feature: + return logits_cls, logits_aux, feature + else: + return logits_cls, logits_aux + else: + if return_feature: + return logits_cls, feature + else: + return logits_cls diff --git a/OpenOOD/openood/networks/utils.py b/OpenOOD/openood/networks/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..e19bf05162a1432d2d57333c0fee8d037178f5da --- /dev/null +++ b/OpenOOD/openood/networks/utils.py @@ -0,0 +1,420 @@ +# import mmcv +from copy import deepcopy +import numpy as np +import torch +import torch.backends.cudnn as cudnn +import torch.nn as nn +# from mmcls.apis import init_model + +import openood.utils.comm as comm + +from .bit import KNOWN_MODELS +from .conf_branch_net import ConfBranchNet +from .csi_net import get_csi_linear_layers, CSINet +from .cider_net import CIDERNet +from .t2fnorm_net import T2FNormNet +from .de_resnet18_256x256 import AttnBasicBlock, BN_layer, De_ResNet18_256x256 +from .densenet import DenseNet3 +from .draem_net import DiscriminativeSubNetwork, ReconstructiveSubNetwork +from .dropout_net import DropoutNet +from .dsvdd_net import build_network +from .godin_net import GodinNet +from .lenet import LeNet +from .mcd_net import MCDNet +from .npos_net import NPOSNet +from .openmax_net import OpenMax +from .patchcore_net import PatchcoreNet +from .projection_net import ProjectionNet +from .react_net import ReactNet +from .resnet18_32x32 import ResNet18_32x32 +from .resnet18_64x64 import ResNet18_64x64 +from .resnet18_224x224 import ResNet18_224x224 +from .resnet18_256x256 import ResNet18_256x256 +from .resnet50 import ResNet50 +from .rot_net import RotNet +from .udg_net import UDGNet +from .vit_b_16 import ViT_B_16 +from .wrn import WideResNet +from .rts_net import RTSNet +from .model_bronze import AKG + +def get_network(network_config): + + num_classes = network_config.num_classes + + if hasattr(network_config, 'modification') and network_config.modification == 't2fnorm': + network_config.modification = 'none' + backbone = get_network(network_config) + backbone.fc = nn.Identity() + + net = T2FNormNet(backbone=backbone, + tau=network_config.tau, + num_classes=num_classes) + + elif network_config.name == 'resnet18_32x32': + net = ResNet18_32x32(num_classes=num_classes) + + elif network_config.name == 'resnet18_256x256': + net = ResNet18_256x256(num_classes=num_classes) + + elif network_config.name == 'resnet18_64x64': + net = ResNet18_64x64(num_classes=num_classes) + + elif network_config.name == 'resnet18_224x224': + net = ResNet18_224x224(num_classes=num_classes) + + elif network_config.name == 'resnet50': + net = ResNet50(num_classes=num_classes) + + elif network_config.name == 'OursBronze2': + # backbone = get_network(network_config.backbone) + # net = AKG("bronze", backbone, 1024) + + net = AKG("bronze", ResNet50(num_classes=num_classes), 1024) + # model_path = network_config.model_config.model_path + # net.load_state_dict(torch.load(model_path)) + + elif network_config.name == 'lenet': + net = LeNet(num_classes=num_classes, num_channel=3) + + elif network_config.name == 'wrn': + net = WideResNet(depth=28, + widen_factor=10, + dropRate=0.0, + num_classes=num_classes) + + elif network_config.name == 'densenet': + net = DenseNet3(depth=100, + growth_rate=12, + reduction=0.5, + bottleneck=True, + dropRate=0.0, + num_classes=num_classes) + + elif network_config.name == 'patchcore_net': + # path = '/home/pengyunwang/.cache/torch/hub/vision-0.9.0' + # module = torch.hub._load_local(path, + # 'wide_resnet50_2', + # pretrained=True) + backbone = get_network(network_config.backbone) + net = PatchcoreNet(backbone) + elif network_config.name == 'wide_resnet_50_2': + module = torch.hub.load('pytorch/vision:v0.9.0', + 'wide_resnet50_2', + pretrained=True) + net = PatchcoreNet(module) + + elif network_config.name == 'godin_net': + # don't wrap ddp here cuz we need to modify + # backbone + network_config.backbone.num_gpus = 1 + backbone = get_network(network_config.backbone) + feature_size = backbone.feature_size + # remove fc otherwise ddp will + # report unused params + backbone.fc = nn.Identity() + + net = GodinNet(backbone=backbone, + feature_size=feature_size, + num_classes=num_classes, + similarity_measure=network_config.similarity_measure) + + elif network_config.name == 'cider_net': + # don't wrap ddp here cuz we need to modify + # backbone + network_config.backbone.num_gpus = 1 + backbone = get_network(network_config.backbone) + # remove fc otherwise ddp will + # report unused params + backbone.fc = nn.Identity() + + net = CIDERNet(backbone=backbone, + head=network_config.head, + feat_dim=network_config.feat_dim, + num_classes=num_classes) + + elif network_config.name == 'npos_net': + # don't wrap ddp here cuz we need to modify + # backbone + network_config.backbone.num_gpus = 1 + backbone = get_network(network_config.backbone) + # remove fc otherwise ddp will + # report unused params + backbone.fc = nn.Identity() + + net = NPOSNet(backbone=backbone, + head=network_config.head, + feat_dim=network_config.feat_dim, + num_classes=num_classes) + + elif network_config.name == 'rts_net': + backbone = get_network(network_config.backbone) + try: + feature_size = backbone.feature_size + except AttributeError: + feature_size = backbone.module.feature_size + net = RTSNet(backbone=backbone, + feature_size=feature_size, + num_classes=num_classes, + dof=network_config.dof) + + elif network_config.name == 'react_net': + backbone = get_network(network_config.backbone) + net = ReactNet(backbone) + + elif network_config.name == 'csi_net': + # don't wrap ddp here cuz we need to modify + # backbone + network_config.backbone.num_gpus = 1 + backbone = get_network(network_config.backbone) + feature_size = backbone.feature_size + # remove fc otherwise ddp will + # report unused params + backbone.fc = nn.Identity() + + net = get_csi_linear_layers(feature_size, num_classes, + network_config.simclr_dim, + network_config.shift_trans_type) + net['backbone'] = backbone + + dummy_net = CSINet(deepcopy(backbone), + feature_size=feature_size, + num_classes=num_classes, + simclr_dim=network_config.simclr_dim, + shift_trans_type=network_config.shift_trans_type) + net['dummy_net'] = dummy_net + + elif network_config.name == 'draem': + model = ReconstructiveSubNetwork(in_channels=3, + out_channels=3, + base_width=int( + network_config.image_size / 2)) + model_seg = DiscriminativeSubNetwork( + in_channels=6, + out_channels=2, + base_channels=int(network_config.image_size / 4)) + + net = {'generative': model, 'discriminative': model_seg} + + elif network_config.name == 'openmax_network': + backbone = get_network(network_config.backbone) + net = OpenMax(backbone=backbone, num_classes=num_classes) + + elif network_config.name == 'mcd': + # don't wrap ddp here cuz we need to modify + # backbone + network_config.backbone.num_gpus = 1 + backbone = get_network(network_config.backbone) + feature_size = backbone.feature_size + # remove fc otherwise ddp will + # report unused params + backbone.fc = nn.Identity() + + net = MCDNet(backbone=backbone, num_classes=num_classes) + + elif network_config.name == 'udg': + # don't wrap ddp here cuz we need to modify + # backbone + network_config.backbone.num_gpus = 1 + backbone = get_network(network_config.backbone) + feature_size = backbone.feature_size + # remove fc otherwise ddp will + # report unused params + backbone.fc = nn.Identity() + + net = UDGNet(backbone=backbone, + num_classes=num_classes, + num_clusters=network_config.num_clusters) + + elif network_config.name == 'opengan': + from .opengan import Discriminator, Generator + backbone = get_network(network_config.backbone) + netG = Generator(in_channels=network_config.nz, + feature_size=network_config.ngf, + out_channels=network_config.nc) + netD = Discriminator(in_channels=network_config.nc, + feature_size=network_config.ndf) + + net = {'netG': netG, 'netD': netD, 'backbone': backbone} + + elif network_config.name == 'arpl_gan': + from .arpl_net import (resnet34ABN, Generator, Discriminator, + Generator32, Discriminator32, ARPLayer) + feature_net = resnet34ABN(num_classes=num_classes, num_bns=2) + dim_centers = feature_net.fc.weight.shape[1] + feature_net.fc = nn.Identity() + + criterion = ARPLayer(feat_dim=dim_centers, + num_classes=num_classes, + weight_pl=network_config.weight_pl, + temp=network_config.temp) + + assert network_config.image_size == 32 \ + or network_config.image_size == 64, \ + 'ARPL-GAN only supports 32x32 or 64x64 images!' + + if network_config.image_size == 64: + netG = Generator(1, network_config.nz, network_config.ngf, + network_config.nc) # ngpu, nz, ngf, nc + netD = Discriminator(1, network_config.nc, + network_config.ndf) # ngpu, nc, ndf + else: + netG = Generator32(1, network_config.nz, network_config.ngf, + network_config.nc) # ngpu, nz, ngf, nc + netD = Discriminator32(1, network_config.nc, + network_config.ndf) # ngpu, nc, ndf + + net = { + 'netF': feature_net, + 'criterion': criterion, + 'netG': netG, + 'netD': netD + } + + elif network_config.name == 'arpl_net': + from .arpl_net import ARPLayer + # don't wrap ddp here because we need to modify + # feature_net + network_config.feat_extract_network.num_gpus = 1 + feature_net = get_network(network_config.feat_extract_network) + try: + if isinstance(feature_net, nn.parallel.DistributedDataParallel): + dim_centers = feature_net.module.fc.weight.shape[1] + feature_net.module.fc = nn.Identity() + else: + dim_centers = feature_net.fc.weight.shape[1] + feature_net.fc = nn.Identity() + except Exception: + if isinstance(feature_net, nn.parallel.DistributedDataParallel): + dim_centers = feature_net.module.classifier[0].weight.shape[1] + feature_net.module.classifier = nn.Identity() + else: + dim_centers = feature_net.classifier[0].weight.shape[1] + feature_net.classifier = nn.Identity() + + criterion = ARPLayer(feat_dim=dim_centers, + num_classes=num_classes, + weight_pl=network_config.weight_pl, + temp=network_config.temp) + + net = {'netF': feature_net, 'criterion': criterion} + + elif network_config.name == 'bit': + net = KNOWN_MODELS[network_config.model]( + head_size=network_config.num_logits, + zero_head=True, + num_block_open=network_config.num_block_open) + + elif network_config.name == 'vit-b-16': + net = ViT_B_16(num_classes=num_classes) + + elif network_config.name == 'conf_branch_net': + # don't wrap ddp here cuz we need to modify + # backbone + network_config.backbone.num_gpus = 1 + backbone = get_network(network_config.backbone) + feature_size = backbone.feature_size + # remove fc otherwise ddp will + # report unused params + backbone.fc = nn.Identity() + + net = ConfBranchNet(backbone=backbone, num_classes=num_classes) + + elif network_config.name == 'rot_net': + # don't wrap ddp here cuz we need to modify + # backbone + network_config.backbone.num_gpus = 1 + backbone = get_network(network_config.backbone) + feature_size = backbone.feature_size + # remove fc otherwise ddp will + # report unused params + backbone.fc = nn.Identity() + + net = RotNet(backbone=backbone, num_classes=num_classes) + + elif network_config.name == 'dsvdd': + net = build_network(network_config.type) + + elif network_config.name == 'projectionNet': + backbone = get_network(network_config.backbone) + net = ProjectionNet(backbone=backbone, num_classes=2) + + elif network_config.name == 'dropout_net': + backbone = get_network(network_config.backbone) + net = DropoutNet(backbone=backbone, dropout_p=network_config.dropout_p) + + elif network_config.name == 'simclr_net': + # backbone = get_network(network_config.backbone) + # net = SimClrNet(backbone, out_dim=128) + from .temp import SSLResNet + net = SSLResNet() + net.encoder = nn.DataParallel(net.encoder).cuda() + + elif network_config.name == 'rd4ad_net': + encoder = get_network(network_config.backbone) + bn = BN_layer(AttnBasicBlock, 2) + decoder = De_ResNet18_256x256() + net = {'encoder': encoder, 'bn': bn, 'decoder': decoder} + else: + raise Exception('Unexpected Network Architecture!') + + if network_config.pretrained: + if type(net) is dict: + if isinstance(network_config.checkpoint, list): + for subnet, checkpoint in zip(net.values(), + network_config.checkpoint): + if checkpoint is not None: + if checkpoint != 'none': + subnet.load_state_dict(torch.load(checkpoint), + strict=False) + elif isinstance(network_config.checkpoint, str): + ckpt = torch.load(network_config.checkpoint) + subnet_ckpts = {k: {} for k in net.keys()} + for k, v in ckpt.items(): + for subnet_name in net.keys(): + if k.startwith(subnet_name): + subnet_ckpts[subnet_name][k.replace( + subnet_name + '.', '')] = v + break + + for subnet_name, subnet in net.items(): + subnet.load_state_dict(subnet_ckpts[subnet_name]) + + elif network_config.name == 'bit' and not network_config.normal_load: + net.load_from(np.load(network_config.checkpoint)) + elif network_config.name == 'vit': + pass + else: + try: + net.load_state_dict(torch.load(network_config.checkpoint), + strict=False) + except RuntimeError: + # sometimes fc should not be loaded + loaded_pth = torch.load(network_config.checkpoint) + loaded_pth.pop('fc.weight') + loaded_pth.pop('fc.bias') + net.load_state_dict(loaded_pth, strict=False) + print('Model Loading {} Completed!'.format(network_config.name)) + + if network_config.num_gpus > 1: + if type(net) is dict: + for key, subnet in zip(net.keys(), net.values()): + net[key] = torch.nn.parallel.DistributedDataParallel( + subnet.cuda(), + device_ids=[comm.get_local_rank()], + broadcast_buffers=True) + else: + net = torch.nn.parallel.DistributedDataParallel( + net.cuda(), + device_ids=[comm.get_local_rank()], + broadcast_buffers=True) + + if network_config.num_gpus > 0: + if type(net) is dict: + for subnet in net.values(): + subnet.cuda() + else: + net.cuda() + + cudnn.benchmark = True + return net diff --git a/OpenOOD/openood/networks/vit.py b/OpenOOD/openood/networks/vit.py new file mode 100644 index 0000000000000000000000000000000000000000..ea0c59ca17e21f4117a4c6a93d1d86ab3af02897 --- /dev/null +++ b/OpenOOD/openood/networks/vit.py @@ -0,0 +1,22 @@ +# model settings +model = dict(type='ImageClassifierWithReturnFeature', + backbone=dict(type='VisionTransformer', + arch='b', + img_size=384, + patch_size=16, + drop_rate=0.1, + init_cfg=[ + dict(type='Kaiming', + layer='Conv2d', + mode='fan_in', + nonlinearity='linear') + ]), + neck=None, + head=dict( + type='VisionTransformerClsHead', + num_classes=1000, + in_channels=768, + loss=dict(type='LabelSmoothLoss', + label_smooth_val=0.1, + mode='classy_vision'), + )) diff --git a/OpenOOD/openood/networks/vit_b_16.py b/OpenOOD/openood/networks/vit_b_16.py new file mode 100644 index 0000000000000000000000000000000000000000..c7ed1ba880bf0a7222ba118030267354389b2717 --- /dev/null +++ b/OpenOOD/openood/networks/vit_b_16.py @@ -0,0 +1,66 @@ +import torch +from torchvision.models.vision_transformer import VisionTransformer + + +class ViT_B_16(VisionTransformer): + def __init__(self, + image_size=224, + patch_size=16, + num_layers=12, + num_heads=12, + hidden_dim=768, + mlp_dim=3072, + num_classes=1000): + super(ViT_B_16, self).__init__(image_size=image_size, + patch_size=patch_size, + num_layers=num_layers, + num_heads=num_heads, + hidden_dim=hidden_dim, + mlp_dim=mlp_dim, + num_classes=num_classes) + self.feature_size = hidden_dim + + def forward(self, x, return_feature=False): + # Reshape and permute the input tensor + x = self._process_input(x) + n = x.shape[0] + + # Expand the class token to the full batch + batch_class_token = self.class_token.expand(n, -1, -1) + x = torch.cat([batch_class_token, x], dim=1) + + x = self.encoder(x) + + # Classifier "token" as used by standard language architectures + x = x[:, 0] + + if return_feature: + return self.heads(x), x + else: + return self.heads(x) + + def forward_threshold(self, x, threshold): + # Reshape and permute the input tensor + x = self._process_input(x) + n = x.shape[0] + + # Expand the class token to the full batch + batch_class_token = self.class_token.expand(n, -1, -1) + x = torch.cat([batch_class_token, x], dim=1) + + x = self.encoder(x) + + # Classifier "token" as used by standard language architectures + x = x[:, 0] + + feature = x.clip(max=threshold) + logits_cls = self.heads(feature) + + return logits_cls + + def get_fc(self): + fc = self.heads[0] + return fc.weight.cpu().detach().numpy(), fc.bias.cpu().detach().numpy() + + def get_fc_layer(self): + return self.heads[0] diff --git a/OpenOOD/openood/networks/wrn.py b/OpenOOD/openood/networks/wrn.py new file mode 100644 index 0000000000000000000000000000000000000000..79d25f299e9463f6b8dab5e040f61c3c14c5cd2c --- /dev/null +++ b/OpenOOD/openood/networks/wrn.py @@ -0,0 +1,156 @@ +import math + +import torch +import torch.nn as nn +import torch.nn.functional as F + + +class BasicBlock(nn.Module): + def __init__(self, in_planes, out_planes, stride, dropRate=0.0): + super(BasicBlock, self).__init__() + self.bn1 = nn.BatchNorm2d(in_planes) + self.relu1 = nn.ReLU(inplace=True) + self.conv1 = nn.Conv2d(in_planes, + out_planes, + kernel_size=3, + stride=stride, + padding=1, + bias=False) + self.bn2 = nn.BatchNorm2d(out_planes) + self.relu2 = nn.ReLU(inplace=True) + self.conv2 = nn.Conv2d(out_planes, + out_planes, + kernel_size=3, + stride=1, + padding=1, + bias=False) + self.droprate = dropRate + self.equalInOut = (in_planes == out_planes) + self.convShortcut = (not self.equalInOut) and nn.Conv2d( + in_planes, + out_planes, + kernel_size=1, + stride=stride, + padding=0, + bias=False) or None + + def forward(self, x): + if not self.equalInOut: + x = self.relu1(self.bn1(x)) + else: + out = self.relu1(self.bn1(x)) + if self.equalInOut: + out = self.relu2(self.bn2(self.conv1(out))) + else: + out = self.relu2(self.bn2(self.conv1(x))) + if self.droprate > 0: + out = F.dropout(out, p=self.droprate, training=self.training) + out = self.conv2(out) + if not self.equalInOut: + return torch.add(self.convShortcut(x), out) + else: + return torch.add(x, out) + + +class NetworkBlock(nn.Module): + def __init__(self, + nb_layers, + in_planes, + out_planes, + block, + stride, + dropRate=0.0): + super(NetworkBlock, self).__init__() + self.layer = self._make_layer(block, in_planes, out_planes, nb_layers, + stride, dropRate) + + def _make_layer(self, block, in_planes, out_planes, nb_layers, stride, + dropRate): + layers = [] + for i in range(nb_layers): + layers.append( + block(i == 0 and in_planes or out_planes, out_planes, + i == 0 and stride or 1, dropRate)) + return nn.Sequential(*layers) + + def forward(self, x): + return self.layer(x) + + +class WideResNet(nn.Module): + def __init__(self, depth, num_classes, widen_factor=1, dropRate=0.0): + super(WideResNet, self).__init__() + nChannels = [ + 16, 16 * widen_factor, 32 * widen_factor, 64 * widen_factor + ] + assert ((depth - 4) % 6 == 0) + n = (depth - 4) // 6 + block = BasicBlock + # 1st conv before any network block + self.conv1 = nn.Conv2d(3, + nChannels[0], + kernel_size=3, + stride=1, + padding=1, + bias=False) + # 1st block + self.block1 = NetworkBlock(n, nChannels[0], nChannels[1], block, 1, + dropRate) + # 2nd block + self.block2 = NetworkBlock(n, nChannels[1], nChannels[2], block, 2, + dropRate) + # 3rd block + self.block3 = NetworkBlock(n, nChannels[2], nChannels[3], block, 2, + dropRate) + # global average pooling and classifier + self.bn1 = nn.BatchNorm2d(nChannels[3]) + self.relu = nn.ReLU(inplace=True) + self.fc = nn.Linear(nChannels[3], num_classes) + self.nChannels = nChannels[3] + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels + m.weight.data.normal_(0, math.sqrt(2. / n)) + elif isinstance(m, nn.BatchNorm2d): + m.weight.data.fill_(1) + m.bias.data.zero_() + elif isinstance(m, nn.Linear): + m.bias.data.zero_() + + def forward(self, x, return_feature=False): + feature1 = self.conv1(x) + feature2 = self.block1(feature1) + feature3 = self.block2(feature2) + feature4 = self.block3(feature3) + feature5 = self.relu(self.bn1(feature4)) + out = F.avg_pool2d(feature5, 8) + feature = out.view(-1, self.nChannels) + logits_cls = self.fc(feature) + feature_list = [ + feature, feature1, feature2, feature3, feature4, feature5 + ] + if return_feature: + return logits_cls, feature_list + else: + return logits_cls + + def intermediate_forward(self, x, layer_index): + out = self.conv1(x) + out = self.block1(out) + out = self.block2(out) + out = self.block3(out) + out = self.relu(self.bn1(out)) + return out + + def feature_list(self, x): + out_list = [] + out = self.conv1(x) + out = self.block1(out) + out = self.block2(out) + out = self.block3(out) + out = self.relu(self.bn1(out)) + out_list.append(out) + out = F.avg_pool2d(out, 8) + out = out.view(-1, self.nChannels) + return self.fc(out), out_list diff --git a/OpenOOD/openood/pipelines/__init__.py b/OpenOOD/openood/pipelines/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e4b58a39ddf76fba1cdacbbdaebede71aeca33db --- /dev/null +++ b/OpenOOD/openood/pipelines/__init__.py @@ -0,0 +1 @@ +from .utils import get_pipeline diff --git a/OpenOOD/openood/pipelines/__pycache__/__init__.cpython-311.pyc b/OpenOOD/openood/pipelines/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2b5203aa5f7951069a623845cc968a0ffd6b3618 Binary files /dev/null and b/OpenOOD/openood/pipelines/__pycache__/__init__.cpython-311.pyc differ diff --git a/OpenOOD/openood/pipelines/__pycache__/__init__.cpython-37.pyc b/OpenOOD/openood/pipelines/__pycache__/__init__.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ffb59548eb9b608304a501426d88d30db240ecc8 Binary files /dev/null and b/OpenOOD/openood/pipelines/__pycache__/__init__.cpython-37.pyc differ diff --git a/OpenOOD/openood/pipelines/__pycache__/feat_extract_opengan_pipeline.cpython-311.pyc b/OpenOOD/openood/pipelines/__pycache__/feat_extract_opengan_pipeline.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..85de3fd88eaa569837d42f5d88e523772b978f58 Binary files /dev/null and b/OpenOOD/openood/pipelines/__pycache__/feat_extract_opengan_pipeline.cpython-311.pyc differ diff --git a/OpenOOD/openood/pipelines/__pycache__/feat_extract_opengan_pipeline.cpython-37.pyc b/OpenOOD/openood/pipelines/__pycache__/feat_extract_opengan_pipeline.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..20e7b008b53007d6de64946c8004d881412cdf1d Binary files /dev/null and b/OpenOOD/openood/pipelines/__pycache__/feat_extract_opengan_pipeline.cpython-37.pyc differ diff --git a/OpenOOD/openood/pipelines/__pycache__/feat_extract_pipeline.cpython-311.pyc b/OpenOOD/openood/pipelines/__pycache__/feat_extract_pipeline.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2c2d25ec9f120acb5199285275f6aeec72cb398e Binary files /dev/null and b/OpenOOD/openood/pipelines/__pycache__/feat_extract_pipeline.cpython-311.pyc differ diff --git a/OpenOOD/openood/pipelines/__pycache__/feat_extract_pipeline.cpython-37.pyc b/OpenOOD/openood/pipelines/__pycache__/feat_extract_pipeline.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2ebd2910ffde93d72d0e8925082f1399d664cb82 Binary files /dev/null and b/OpenOOD/openood/pipelines/__pycache__/feat_extract_pipeline.cpython-37.pyc differ diff --git a/OpenOOD/openood/pipelines/__pycache__/finetune_pipeline.cpython-311.pyc b/OpenOOD/openood/pipelines/__pycache__/finetune_pipeline.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4f529628aba22bc99800d1bb3c07a1a4f170fbf4 Binary files /dev/null and b/OpenOOD/openood/pipelines/__pycache__/finetune_pipeline.cpython-311.pyc differ diff --git a/OpenOOD/openood/pipelines/__pycache__/finetune_pipeline.cpython-37.pyc b/OpenOOD/openood/pipelines/__pycache__/finetune_pipeline.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b00b389df45d4d820c5aea0d7c85cd9d0e241679 Binary files /dev/null and b/OpenOOD/openood/pipelines/__pycache__/finetune_pipeline.cpython-37.pyc differ diff --git a/OpenOOD/openood/pipelines/__pycache__/test_acc_pipeline.cpython-311.pyc b/OpenOOD/openood/pipelines/__pycache__/test_acc_pipeline.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a09db4c00149b8d1d0e84532c755cfc2230cc683 Binary files /dev/null and b/OpenOOD/openood/pipelines/__pycache__/test_acc_pipeline.cpython-311.pyc differ diff --git a/OpenOOD/openood/pipelines/__pycache__/test_acc_pipeline.cpython-37.pyc b/OpenOOD/openood/pipelines/__pycache__/test_acc_pipeline.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5945f7cf82f8c1ee2e5e6ec32e1313713b197251 Binary files /dev/null and b/OpenOOD/openood/pipelines/__pycache__/test_acc_pipeline.cpython-37.pyc differ diff --git a/OpenOOD/openood/pipelines/__pycache__/test_ad_pipeline.cpython-311.pyc b/OpenOOD/openood/pipelines/__pycache__/test_ad_pipeline.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..09f60a13f1dded3bd685a20062e919af8f74b5f5 Binary files /dev/null and b/OpenOOD/openood/pipelines/__pycache__/test_ad_pipeline.cpython-311.pyc differ diff --git a/OpenOOD/openood/pipelines/__pycache__/test_ad_pipeline.cpython-37.pyc b/OpenOOD/openood/pipelines/__pycache__/test_ad_pipeline.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..145aa5d3c2e55ad5d78c110a8103de27001191aa Binary files /dev/null and b/OpenOOD/openood/pipelines/__pycache__/test_ad_pipeline.cpython-37.pyc differ diff --git a/OpenOOD/openood/pipelines/__pycache__/test_ood_pipeline.cpython-311.pyc b/OpenOOD/openood/pipelines/__pycache__/test_ood_pipeline.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5d21a56464a44732b0db01d77970754165b72653 Binary files /dev/null and b/OpenOOD/openood/pipelines/__pycache__/test_ood_pipeline.cpython-311.pyc differ diff --git a/OpenOOD/openood/pipelines/__pycache__/test_ood_pipeline.cpython-37.pyc b/OpenOOD/openood/pipelines/__pycache__/test_ood_pipeline.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..01218729d0dd80c12cc5e73e7706b457d7288a08 Binary files /dev/null and b/OpenOOD/openood/pipelines/__pycache__/test_ood_pipeline.cpython-37.pyc differ diff --git a/OpenOOD/openood/pipelines/__pycache__/test_ood_pipeline_aps.cpython-311.pyc b/OpenOOD/openood/pipelines/__pycache__/test_ood_pipeline_aps.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..08ad4e741e6ca487c0119b844b59024316e699c4 Binary files /dev/null and b/OpenOOD/openood/pipelines/__pycache__/test_ood_pipeline_aps.cpython-311.pyc differ diff --git a/OpenOOD/openood/pipelines/__pycache__/test_ood_pipeline_aps.cpython-37.pyc b/OpenOOD/openood/pipelines/__pycache__/test_ood_pipeline_aps.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b3f85ee7e9c3a54cc8c0fdfa57fae792d25d7a23 Binary files /dev/null and b/OpenOOD/openood/pipelines/__pycache__/test_ood_pipeline_aps.cpython-37.pyc differ diff --git a/OpenOOD/openood/pipelines/__pycache__/train_ad_pipeline.cpython-311.pyc b/OpenOOD/openood/pipelines/__pycache__/train_ad_pipeline.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d6e7e9599202d20cf84cfa50fe3aadd18eb5c8ff Binary files /dev/null and b/OpenOOD/openood/pipelines/__pycache__/train_ad_pipeline.cpython-311.pyc differ diff --git a/OpenOOD/openood/pipelines/__pycache__/train_ad_pipeline.cpython-37.pyc b/OpenOOD/openood/pipelines/__pycache__/train_ad_pipeline.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f91098804956d0af3e9ca09bd949eecffbb9d700 Binary files /dev/null and b/OpenOOD/openood/pipelines/__pycache__/train_ad_pipeline.cpython-37.pyc differ diff --git a/OpenOOD/openood/pipelines/__pycache__/train_aux_pipeline.cpython-311.pyc b/OpenOOD/openood/pipelines/__pycache__/train_aux_pipeline.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4fe0206bd69c8074523c6d28357ddc794c767384 Binary files /dev/null and b/OpenOOD/openood/pipelines/__pycache__/train_aux_pipeline.cpython-311.pyc differ diff --git a/OpenOOD/openood/pipelines/__pycache__/train_aux_pipeline.cpython-37.pyc b/OpenOOD/openood/pipelines/__pycache__/train_aux_pipeline.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1dd7c87969e13171de893e12473891b641f98bff Binary files /dev/null and b/OpenOOD/openood/pipelines/__pycache__/train_aux_pipeline.cpython-37.pyc differ diff --git a/OpenOOD/openood/pipelines/__pycache__/train_oe_pipeline.cpython-311.pyc b/OpenOOD/openood/pipelines/__pycache__/train_oe_pipeline.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..21d9f3c90eea41211e74b65ec254aecdee509bfa Binary files /dev/null and b/OpenOOD/openood/pipelines/__pycache__/train_oe_pipeline.cpython-311.pyc differ diff --git a/OpenOOD/openood/pipelines/__pycache__/train_oe_pipeline.cpython-37.pyc b/OpenOOD/openood/pipelines/__pycache__/train_oe_pipeline.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e07b00da0ba19723ff44352814e133ce8d7c8156 Binary files /dev/null and b/OpenOOD/openood/pipelines/__pycache__/train_oe_pipeline.cpython-37.pyc differ diff --git a/OpenOOD/openood/pipelines/__pycache__/train_opengan_pipeline.cpython-311.pyc b/OpenOOD/openood/pipelines/__pycache__/train_opengan_pipeline.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..60f9618913fcb8f832a1461e5518fa5056e41e38 Binary files /dev/null and b/OpenOOD/openood/pipelines/__pycache__/train_opengan_pipeline.cpython-311.pyc differ diff --git a/OpenOOD/openood/pipelines/__pycache__/train_opengan_pipeline.cpython-37.pyc b/OpenOOD/openood/pipelines/__pycache__/train_opengan_pipeline.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e785016350f33559de65f6d5c6d6c86437702ad3 Binary files /dev/null and b/OpenOOD/openood/pipelines/__pycache__/train_opengan_pipeline.cpython-37.pyc differ diff --git a/OpenOOD/openood/pipelines/__pycache__/train_pipeline.cpython-311.pyc b/OpenOOD/openood/pipelines/__pycache__/train_pipeline.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ca47c9ba2777bfaadcaea36e60aa316d60aee1f3 Binary files /dev/null and b/OpenOOD/openood/pipelines/__pycache__/train_pipeline.cpython-311.pyc differ diff --git a/OpenOOD/openood/pipelines/__pycache__/train_pipeline.cpython-37.pyc b/OpenOOD/openood/pipelines/__pycache__/train_pipeline.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..85c1b7f4034ae01cbed5e0dc2611770a5add84df Binary files /dev/null and b/OpenOOD/openood/pipelines/__pycache__/train_pipeline.cpython-37.pyc differ diff --git a/OpenOOD/openood/pipelines/__pycache__/utils.cpython-311.pyc b/OpenOOD/openood/pipelines/__pycache__/utils.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..41a7740ae2208eb243dd9fe7cf69182953331fb3 Binary files /dev/null and b/OpenOOD/openood/pipelines/__pycache__/utils.cpython-311.pyc differ diff --git a/OpenOOD/openood/pipelines/__pycache__/utils.cpython-37.pyc b/OpenOOD/openood/pipelines/__pycache__/utils.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2992e6a2a2d119ecee6bacdfce24cafae788b517 Binary files /dev/null and b/OpenOOD/openood/pipelines/__pycache__/utils.cpython-37.pyc differ diff --git a/OpenOOD/openood/pipelines/feat_extract_opengan_pipeline.py b/OpenOOD/openood/pipelines/feat_extract_opengan_pipeline.py new file mode 100644 index 0000000000000000000000000000000000000000..f46303aa0ff2a2643f54c64caca7eb99bf9f55e4 --- /dev/null +++ b/OpenOOD/openood/pipelines/feat_extract_opengan_pipeline.py @@ -0,0 +1,45 @@ +from openood.datasets import get_dataloader, get_ood_dataloader +from openood.evaluators import get_evaluator +from openood.networks import get_network +from openood.utils import setup_logger + + +class FeatExtractOpenGANPipeline: + def __init__(self, config) -> None: + self.config = config + + def run(self): + # generate output directory and save the full config file + setup_logger(self.config) + + # get dataloader + id_loader_dict = get_dataloader(self.config) + ood_loader_dict = get_ood_dataloader(self.config) + assert 'train' in id_loader_dict + assert 'val' in id_loader_dict + assert 'val' in ood_loader_dict + + # init network + net = get_network(self.config.network) + + # init evaluator + evaluator = get_evaluator(self.config) + + # sanity check on id val accuracy + print('\nStart evaluation on ID val data...', flush=True) + test_metrics = evaluator.eval_acc(net, id_loader_dict['val']) + print('\nComplete Evaluation, accuracy {:.2f}%'.format( + 100 * test_metrics['acc']), + flush=True) + + # start extracting features + print('\nStart Feature Extraction...', flush=True) + print('\t ID training data...') + evaluator.extract(net, id_loader_dict['train'], 'id_train') + + print('\t ID val data...') + evaluator.extract(net, id_loader_dict['val'], 'id_val') + + print('\t OOD val data...') + evaluator.extract(net, ood_loader_dict['val'], 'ood_val') + print('\nComplete Feature Extraction!') diff --git a/OpenOOD/openood/pipelines/feat_extract_pipeline.py b/OpenOOD/openood/pipelines/feat_extract_pipeline.py new file mode 100644 index 0000000000000000000000000000000000000000..bb5f0b872cd8a37b854b0b8780d6e7e59262e25d --- /dev/null +++ b/OpenOOD/openood/pipelines/feat_extract_pipeline.py @@ -0,0 +1,35 @@ +from openood.datasets import get_dataloader +from openood.evaluators import get_evaluator +from openood.networks import get_network +from openood.utils import setup_logger + + +class FeatExtractPipeline: + def __init__(self, config) -> None: + self.config = config + + def run(self): + # generate output directory and save the full config file + setup_logger(self.config) + + # get dataloader + loader_dict = get_dataloader(self.config) + test_loader = loader_dict[self.config.pipeline.extract_target] + + # init network + net = get_network(self.config.network) + + # init evaluator + evaluator = get_evaluator(self.config) + + # start calculating accuracy + print('\nStart evaluation...', flush=True) + test_metrics = evaluator.eval_acc(net, test_loader) + print('\nComplete Evaluation, accuracy {:.2f}%'.format( + 100 * test_metrics['acc']), + flush=True) + + # start extracting features + print('\nStart Feature Extraction...', flush=True) + evaluator.extract(net, test_loader) + print('\nComplete Feature Extraction!') diff --git a/OpenOOD/openood/pipelines/finetune_pipeline.py b/OpenOOD/openood/pipelines/finetune_pipeline.py new file mode 100644 index 0000000000000000000000000000000000000000..c5f6790189cdb56f3e932a7a43c0e2baef8da720 --- /dev/null +++ b/OpenOOD/openood/pipelines/finetune_pipeline.py @@ -0,0 +1,53 @@ +from openood.datasets import get_dataloader +from openood.evaluators import get_evaluator +from openood.networks import get_network +from openood.recorders import get_recorder +from openood.trainers import get_trainer +from openood.utils import setup_logger + + +class FinetunePipeline: + def __init__(self, config) -> None: + self.config = config + + def run(self): + # generate output directory and save the full config file + setup_logger(self.config) + + # get dataloader + loader_dict = get_dataloader(self.config) + train_loader, val_loader = loader_dict['train'], loader_dict['val'] + test_loader = loader_dict['test'] + + # init network + net = get_network(self.config.network) + + # init trainer and evaluator + trainer = get_trainer(net, train_loader, self.config) + evaluator = get_evaluator(self.config) + + # init recorder + recorder = get_recorder(self.config) + + # trainer setup + trainer.setup() + print('\n' + u'\u2500' * 70, flush=True) + + print('Start training...', flush=True) + for epoch_idx in range(1, self.config.optimizer.num_epochs + 1): + # train and eval the model + net, train_metrics = trainer.train_epoch(epoch_idx) + val_metrics = evaluator.eval_acc(net, val_loader, None, epoch_idx) + # save model and report the result + recorder.save_model(net, val_metrics) + recorder.report(train_metrics, val_metrics) + recorder.summary() + print(u'\u2500' * 70, flush=True) + + # evaluate on test set + print('Start testing...', flush=True) + test_metrics = evaluator.eval_acc(net, test_loader) + print('\nComplete Evaluation, accuracy {:.2f}'.format( + 100.0 * test_metrics['acc']), + flush=True) + print('Completed!', flush=True) diff --git a/OpenOOD/openood/pipelines/test_acc_pipeline.py b/OpenOOD/openood/pipelines/test_acc_pipeline.py new file mode 100644 index 0000000000000000000000000000000000000000..6c73f4ad2865ba23731ef9194c32967ca4871b4e --- /dev/null +++ b/OpenOOD/openood/pipelines/test_acc_pipeline.py @@ -0,0 +1,30 @@ +from openood.datasets import get_dataloader +from openood.evaluators import get_evaluator +from openood.networks import get_network +from openood.utils import setup_logger + + +class TestAccPipeline: + def __init__(self, config) -> None: + self.config = config + + def run(self): + # generate output directory and save the full config file + setup_logger(self.config) + + # get dataloader + loader_dict = get_dataloader(self.config) + test_loader = loader_dict['test'] + + # init network + net = get_network(self.config.network) + + # init evaluator + evaluator = get_evaluator(self.config) + + # start calculating accuracy + print('\nStart evaluation...', flush=True) + test_metrics = evaluator.eval_acc(net, test_loader) + print('\nComplete Evaluation, accuracy {:.2f}%'.format( + 100 * test_metrics['acc']), + flush=True) diff --git a/OpenOOD/openood/pipelines/test_ad_pipeline.py b/OpenOOD/openood/pipelines/test_ad_pipeline.py new file mode 100644 index 0000000000000000000000000000000000000000..aa2410431613c993467b64103db28bca0819e8ff --- /dev/null +++ b/OpenOOD/openood/pipelines/test_ad_pipeline.py @@ -0,0 +1,33 @@ +from openood.datasets import get_dataloader, get_ood_dataloader +from openood.evaluators.utils import get_evaluator +from openood.networks.utils import get_network +from openood.postprocessors import get_postprocessor +from openood.utils import setup_logger + + +class TestAdPipeline: + def __init__(self, config) -> None: + self.config = config + + def run(self): + # generate output directory and save the full config file + setup_logger(self.config) + + # get dataloader + id_loader_dict = get_dataloader(self.config) + ood_loader_dict = get_ood_dataloader(self.config) + + # init network + net = get_network(self.config.network) + + # init evaluator + evaluator = get_evaluator(self.config) + + postprocessor = get_postprocessor(self.config) + # setup for distance-based methods + postprocessor.setup(net, id_loader_dict, ood_loader_dict) + + print('Start testing...', flush=True) + test_metrics = evaluator.eval_ood(net, id_loader_dict, ood_loader_dict, + postprocessor) + evaluator.report(test_metrics) diff --git a/OpenOOD/openood/pipelines/test_ood_pipeline.py b/OpenOOD/openood/pipelines/test_ood_pipeline.py new file mode 100644 index 0000000000000000000000000000000000000000..ce2e413295136b4999de200e9039e310914406c0 --- /dev/null +++ b/OpenOOD/openood/pipelines/test_ood_pipeline.py @@ -0,0 +1,63 @@ +import time + +from openood.datasets import get_dataloader, get_ood_dataloader +from openood.evaluators import get_evaluator +from openood.networks import get_network +from openood.postprocessors import get_postprocessor +from openood.utils import setup_logger + + +class TestOODPipeline: + def __init__(self, config) -> None: + self.config = config + + def run(self): + # generate output directory and save the full config file + setup_logger(self.config) + + # get dataloader + id_loader_dict = get_dataloader(self.config) + ood_loader_dict = get_ood_dataloader(self.config) + + # init network + net = get_network(self.config.network) + + # init ood evaluator + evaluator = get_evaluator(self.config) + + # init ood postprocessor + postprocessor = get_postprocessor(self.config) + # setup for distance-based methods + postprocessor.setup(net, id_loader_dict, ood_loader_dict) + print('\n', flush=True) + print(u'\u2500' * 70, flush=True) + + # start calculating accuracy + print('\nStart evaluation...', flush=True) + if self.config.evaluator.ood_scheme == 'fsood': + acc_metrics = evaluator.eval_acc( + net, + id_loader_dict['test'], + postprocessor, + fsood=True, + csid_data_loaders=ood_loader_dict['csid']) + else: + acc_metrics = evaluator.eval_acc(net, id_loader_dict['test'], + postprocessor) + print('\nAccuracy {:.2f}%'.format(100 * acc_metrics['acc']), + flush=True) + print(u'\u2500' * 70, flush=True) + + # start evaluating ood detection methods + timer = time.time() + if self.config.evaluator.ood_scheme == 'fsood': + evaluator.eval_ood(net, + id_loader_dict, + ood_loader_dict, + postprocessor, + fsood=True) + else: + evaluator.eval_ood(net, id_loader_dict, ood_loader_dict, + postprocessor) + print('Time used for eval_ood: {:.0f}s'.format(time.time() - timer)) + print('Completed!', flush=True) diff --git a/OpenOOD/openood/pipelines/test_ood_pipeline_aps.py b/OpenOOD/openood/pipelines/test_ood_pipeline_aps.py new file mode 100644 index 0000000000000000000000000000000000000000..db7040b6c7f0ac718622c511c673fdacc7fc61c4 --- /dev/null +++ b/OpenOOD/openood/pipelines/test_ood_pipeline_aps.py @@ -0,0 +1,43 @@ +from openood.datasets import get_dataloader, get_ood_dataloader +from openood.evaluators import get_evaluator +from openood.networks import get_network +from openood.postprocessors import get_postprocessor +from openood.utils import setup_logger + + +class TestOODPipelineAPS: + def __init__(self, config) -> None: + self.config = config + + def run(self): + # generate output directory and save the full config file + setup_logger(self.config) + + # get dataloader + id_loader_dict = get_dataloader(self.config) + ood_loader_dict = get_ood_dataloader(self.config) + + # init network + net = get_network(self.config.network) + + # init ood evaluator + evaluator = get_evaluator(self.config) + + # init ood postprocessor + postprocessor = get_postprocessor(self.config) + # setup for distance-based methods + postprocessor.setup(net, id_loader_dict, ood_loader_dict) + print('\n', flush=True) + print(u'\u2500' * 70, flush=True) + + # start calculating accuracy + print('\nStart evaluation...', flush=True) + acc_metrics = evaluator.eval_acc(net, id_loader_dict['test'], + postprocessor) + print('\nAccuracy {:.2f}%'.format(100 * acc_metrics['acc']), + flush=True) + print(u'\u2500' * 70, flush=True) + + # start evaluating ood detection methods + evaluator.eval_ood(net, id_loader_dict, ood_loader_dict, postprocessor) + print('Completed!', flush=True) diff --git a/OpenOOD/openood/pipelines/train_ad_pipeline.py b/OpenOOD/openood/pipelines/train_ad_pipeline.py new file mode 100644 index 0000000000000000000000000000000000000000..2ff2b1d7ab77367e323a917d6e660bfa53807170 --- /dev/null +++ b/OpenOOD/openood/pipelines/train_ad_pipeline.py @@ -0,0 +1,58 @@ +from openood.datasets import get_dataloader, get_ood_dataloader +from openood.evaluators import get_evaluator +from openood.networks import get_network +from openood.postprocessors import get_postprocessor +from openood.preprocessors.utils import get_preprocessor +from openood.recorders import get_recorder +from openood.trainers import get_trainer +from openood.utils import setup_logger + + +class TrainAdPipeline: + def __init__(self, config) -> None: + self.config = config + + def run(self): + # generate output directory and save the full config file + setup_logger(self.config) + + # get dataloader + id_loader_dict = get_dataloader(self.config) + ood_loader_dict = get_ood_dataloader(self.config) + train_loader = id_loader_dict['train'] + + # init network + net = get_network(self.config.network) + + # init trainer and evaluator + trainer = get_trainer(net, train_loader, self.config) + evaluator = get_evaluator(self.config) + + postprocessor = get_postprocessor(self.config) + # setup for distance-based methods + postprocessor.setup(net, id_loader_dict, ood_loader_dict) + + # init recorder + recorder = get_recorder(self.config) + + print('Start training...', flush=True) + for epoch_idx in range(1, self.config.optimizer.num_epochs + 1): + # train the model + net, train_metrics = trainer.train_epoch(epoch_idx) + test_metrics = evaluator.eval_ood(net, + id_loader_dict, + ood_loader_dict, + postprocessor=postprocessor, + epoch_idx=epoch_idx) + # save model and report the result + recorder.save_model(net, test_metrics) + recorder.report(train_metrics, test_metrics) + recorder.summary() + + # evaluate on test set + print('Start testing...', flush=True) + test_metrics = evaluator.eval_ood(net, + id_loader_dict, + ood_loader_dict, + postprocessor=postprocessor) + evaluator.report(test_metrics) diff --git a/OpenOOD/openood/pipelines/train_aux_pipeline.py b/OpenOOD/openood/pipelines/train_aux_pipeline.py new file mode 100644 index 0000000000000000000000000000000000000000..345ab9df9014c91d3a973aea4008b88c208eec84 --- /dev/null +++ b/OpenOOD/openood/pipelines/train_aux_pipeline.py @@ -0,0 +1,54 @@ +from openood.datasets import get_dataloader +from openood.evaluators import get_evaluator +from openood.networks import get_network +from openood.recorders import get_recorder +from openood.trainers import get_trainer +from openood.utils import setup_logger + + +class TrainARPLGANPipeline: + def __init__(self, config) -> None: + self.config = config + + def run(self): + # generate output directory and save the full config file + setup_logger(self.config) + + # get dataloader + loader_dict = get_dataloader(self.config) + train_loader, val_loader = loader_dict['train'], loader_dict['val'] + test_loader = loader_dict['test'] + + # init network + net = get_network(self.config.network) + + # init trainer and evaluator + trainer = get_trainer(net, train_loader, self.config) + self.config.trainer.name = 'arpl' + trainer_aux = get_trainer(net, train_loader, self.config) + evaluator = get_evaluator(self.config) + + # init recorder + recorder = get_recorder(self.config) + + print('Start training...', flush=True) + for epoch_idx in range(1, self.config.optimizer.num_epochs + 1): + # train and eval the model + net, train_metrics = trainer.train_epoch(epoch_idx) + net, train_aux_metrics = trainer_aux.train_epoch(epoch_idx) + train_metrics['loss'] = train_aux_metrics['loss'] + val_metrics = evaluator.eval_acc(net, val_loader, None, epoch_idx) + trainer.scheduler.step() + # save model and report the result + recorder.save_model(net, val_metrics) + recorder.report(train_metrics, val_metrics) + recorder.summary() + print(u'\u2500' * 70, flush=True) + + # evaluate on test set + print('Start testing...', flush=True) + test_metrics = evaluator.eval_acc(net, trainer.criterion, test_loader) + print('\nComplete Evaluation, Last accuracy {:.2f}'.format( + 100.0 * test_metrics['acc']), + flush=True) + print('Completed!', flush=True) diff --git a/OpenOOD/openood/pipelines/train_ddt_pipeline.py b/OpenOOD/openood/pipelines/train_ddt_pipeline.py new file mode 100644 index 0000000000000000000000000000000000000000..6e4372c0b57512aa6a71cb069b31953d2d3ac706 --- /dev/null +++ b/OpenOOD/openood/pipelines/train_ddt_pipeline.py @@ -0,0 +1,58 @@ +import openood.utils.comm as comm +from openood.datasets import get_dataloader +from openood.evaluators import get_evaluator +from openood.networks import get_network +from openood.recorders import get_recorder +from openood.trainers import get_trainer +from openood.utils import setup_logger + + +class TrainPipeline: + def __init__(self, config) -> None: + self.config = config + + def run(self): + # generate output directory and save the full config file + setup_logger(self.config) + + # get dataloader + loader_dict = get_dataloader(self.config) + train_loader, val_loader = loader_dict['train'], loader_dict['val'] + test_loader = loader_dict['test'] + + # init network + net = get_network(self.config.network) + + # init trainer and evaluator + trainer = get_trainer(net, train_loader, self.config) + evaluator = get_evaluator(self.config) + + if comm.is_main_process(): + # init recorder + recorder = get_recorder(self.config) + + print('Start training...', flush=True) + for epoch_idx in range(1, self.config.optimizer.num_epochs + 1): + # train and eval the model + net, train_metrics = trainer.train_epoch(epoch_idx) + val_metrics = evaluator.eval_acc(net, val_loader, None, epoch_idx) + comm.synchronize() + if comm.is_main_process(): + # save model and report the result + recorder.save_model(net, val_metrics) + recorder.report(train_metrics, val_metrics) + + if comm.is_main_process(): + recorder.summary() + print(u'\u2500' * 70, flush=True) + + # evaluate on test set + print('Start testing...', flush=True) + + test_metrics = evaluator.eval_acc(net, test_loader) + + if comm.is_main_process(): + print('\nComplete Evaluation, Last accuracy {:.2f}'.format( + 100.0 * test_metrics['acc']), + flush=True) + print('Completed!', flush=True) diff --git a/OpenOOD/openood/pipelines/train_oe_pipeline.py b/OpenOOD/openood/pipelines/train_oe_pipeline.py new file mode 100644 index 0000000000000000000000000000000000000000..a0092540f9f61924d7964f01c574da26b82f1b5d --- /dev/null +++ b/OpenOOD/openood/pipelines/train_oe_pipeline.py @@ -0,0 +1,73 @@ +import numpy as np +import torch + +import openood.utils.comm as comm +from openood.datasets import get_dataloader +from openood.evaluators import get_evaluator +from openood.networks import get_network +from openood.recorders import get_recorder +from openood.trainers import get_trainer +from openood.utils import setup_logger + + +class TrainOEPipeline: + def __init__(self, config) -> None: + self.config = config + + def run(self): + # generate output directory and save the full config file + setup_logger(self.config) + + # set random seed + torch.manual_seed(self.config.seed) + np.random.seed(self.config.seed) + + # get dataloader + loader_dict = get_dataloader(self.config) + train_loader, val_loader = loader_dict['train'], loader_dict['val'] + train_oe_loader = loader_dict['oe'] + test_loader = loader_dict['test'] + + # init network + net = get_network(self.config.network) + if self.config.num_gpus * self.config.num_machines > 1: + net = torch.nn.SyncBatchNorm.convert_sync_batchnorm(net) + + # init trainer and evaluator + trainer = get_trainer(net, [train_loader, train_oe_loader], None, + self.config) + evaluator = get_evaluator(self.config) + + if comm.is_main_process(): + # init recorder + recorder = get_recorder(self.config) + print('Start training...', flush=True) + + for epoch_idx in range(1, self.config.optimizer.num_epochs + 1): + if isinstance(train_loader.sampler, + torch.utils.data.distributed.DistributedSampler): + train_loader.sampler.set_epoch(epoch_idx - 1) + + # train and eval the model + net, train_metrics = trainer.train_epoch(epoch_idx) + val_metrics = evaluator.eval_acc(net, val_loader, None, epoch_idx) + comm.synchronize() + if comm.is_main_process(): + # save model and report the result + recorder.save_model(net, val_metrics) + recorder.report(train_metrics, val_metrics) + + if comm.is_main_process(): + recorder.summary() + print(u'\u2500' * 70, flush=True) + + # evaluate on test set + print('Start testing...', flush=True) + + test_metrics = evaluator.eval_acc(net, test_loader) + + if comm.is_main_process(): + print('\nComplete Evaluation, Last accuracy {:.2f}'.format( + 100.0 * test_metrics['acc']), + flush=True) + print('Completed!', flush=True) diff --git a/OpenOOD/openood/pipelines/train_only_pipeline.py b/OpenOOD/openood/pipelines/train_only_pipeline.py new file mode 100644 index 0000000000000000000000000000000000000000..01bad05f2f207d9b6eea408090821160ed9ed239 --- /dev/null +++ b/OpenOOD/openood/pipelines/train_only_pipeline.py @@ -0,0 +1,36 @@ +from openood.datasets import get_feature_dataloader +from openood.networks import get_network +from openood.recorders import get_recorder +from openood.trainers import get_trainer +from openood.utils import setup_logger + + +class TrainOpenGanPipeline: + def __init__(self, config) -> None: + self.config = config + + def run(self): + # generate output directory and save the full config file + setup_logger(self.config) + + # get dataloader + feat_loader = get_feature_dataloader(self.config.dataset) + + # init network + net = get_network(self.config.network) + + # init trainer + trainer = get_trainer(net, feat_loader, self.config) + + # init recorder + recorder = get_recorder(self.config) + + print('Start training...', flush=True) + for epoch_idx in range(1, self.config.optimizer.num_epochs + 1): + # train the model + net, train_metrics = trainer.train_epoch(epoch_idx) + recorder.save_model(net, train_metrics) + recorder.report(train_metrics) + recorder.summary() + + print('Completed!', flush=True) diff --git a/OpenOOD/openood/pipelines/train_opengan_pipeline.py b/OpenOOD/openood/pipelines/train_opengan_pipeline.py new file mode 100644 index 0000000000000000000000000000000000000000..9087b15f814b82dbc84b53a703edcb5406401eac --- /dev/null +++ b/OpenOOD/openood/pipelines/train_opengan_pipeline.py @@ -0,0 +1,64 @@ +import numpy as np +import torch + +from openood.datasets import get_feature_opengan_dataloader +from openood.evaluators import get_evaluator +from openood.networks import get_network +from openood.postprocessors import get_postprocessor +from openood.recorders import get_recorder +from openood.trainers import get_trainer +from openood.utils import setup_logger + + +class TrainOpenGanPipeline: + def __init__(self, config) -> None: + self.config = config + + def run(self): + # generate output directory and save the full config file + setup_logger(self.config) + + # set random seed + torch.manual_seed(self.config.seed) + np.random.seed(self.config.seed) + + # get dataloader + dataloaders = get_feature_opengan_dataloader(self.config.dataset) + id_loaders = { + 'train': dataloaders['id_train'], + 'val': dataloaders['id_val'] + } # just for consistency with evaluator + ood_loaders = {'val': dataloaders['ood_val']} + + # init network + net = get_network(self.config.network) + if self.config.num_gpus * self.config.num_machines > 1: + net = torch.nn.SyncBatchNorm.convert_sync_batchnorm(net) + + # init trainer + trainer = get_trainer(net, dataloaders['id_train'], + dataloaders['id_val'], self.config) + evaluator = get_evaluator(self.config) + + # init recorder + recorder = get_recorder(self.config) + + # init ood postprocessor + postprocessor = get_postprocessor(self.config) + + print('Start training...', flush=True) + for epoch_idx in range(1, self.config.optimizer.num_epochs + 1): + if isinstance(dataloaders['id_train'].sampler, + torch.utils.data.distributed.DistributedSampler): + dataloaders['id_train'].sampler.set_epoch(epoch_idx - 1) + + # train the model + net, train_metrics = trainer.train_epoch(epoch_idx) + val_metrics = evaluator.eval_ood_val(net, id_loaders, ood_loaders, + postprocessor) + val_metrics['epoch_idx'] = train_metrics['epoch_idx'] + recorder.save_model(net, val_metrics) + recorder.report(train_metrics, val_metrics) + recorder.summary() + + print('Completed!', flush=True) diff --git a/OpenOOD/openood/pipelines/train_pipeline.py b/OpenOOD/openood/pipelines/train_pipeline.py new file mode 100644 index 0000000000000000000000000000000000000000..dc1447dc526d64b5f946404dfe884e257dced9be --- /dev/null +++ b/OpenOOD/openood/pipelines/train_pipeline.py @@ -0,0 +1,87 @@ +import numpy as np +import torch + +import openood.utils.comm as comm +from openood.datasets import get_dataloader +from openood.evaluators import get_evaluator +from openood.networks import get_network +from openood.recorders import get_recorder +from openood.trainers import get_trainer +from openood.utils import setup_logger + + +class TrainPipeline: + def __init__(self, config) -> None: + self.config = config + + def run(self): + # generate output directory and save the full config file + setup_logger(self.config) + + # set random seed + torch.manual_seed(self.config.seed) + np.random.seed(self.config.seed) + + # get dataloader + loader_dict = get_dataloader(self.config) + train_loader, val_loader = loader_dict['train'], loader_dict['val'] + test_loader = loader_dict['test'] + + # init network + net = get_network(self.config.network) + if self.config.num_gpus * self.config.num_machines > 1: + net = torch.nn.SyncBatchNorm.convert_sync_batchnorm(net) + + # init trainer and evaluator + trainer = get_trainer(net, train_loader, val_loader, self.config) + evaluator = get_evaluator(self.config) + + if comm.is_main_process(): + # init recorder + recorder = get_recorder(self.config) + print('Start training...', flush=True) + + for epoch_idx in range(1, self.config.optimizer.num_epochs + 1): + if isinstance(train_loader.sampler, + torch.utils.data.distributed.DistributedSampler): + train_loader.sampler.set_epoch(epoch_idx - 1) + + # train and eval the model + if self.config.trainer.name == 'mos': + net, train_metrics, num_groups, group_slices = \ + trainer.train_epoch(epoch_idx) + val_metrics = evaluator.eval_acc(net, + val_loader, + train_loader, + epoch_idx, + num_groups=num_groups, + group_slices=group_slices) + elif self.config.trainer.name in ['cider', 'npos']: + net, train_metrics = trainer.train_epoch(epoch_idx) + # cider and npos only trains the backbone + # cannot evaluate ID acc without training the fc layer + val_metrics = train_metrics + else: + net, train_metrics = trainer.train_epoch(epoch_idx) + val_metrics = evaluator.eval_acc(net, val_loader, None, + epoch_idx) + comm.synchronize() + if comm.is_main_process(): + # save model and report the result + recorder.save_model(net, val_metrics) + recorder.report(train_metrics, val_metrics) + + if comm.is_main_process(): + recorder.summary() + print(u'\u2500' * 70, flush=True) + + # evaluate on test set + print('Start testing...', flush=True) + + test_metrics = evaluator.eval_acc(net, test_loader) + + if comm.is_main_process(): + print('\nComplete Evaluation, Last accuracy {:.2f}'.format( + 100.0 * test_metrics['acc']), + flush=True) + print('Completed!', flush=True) diff --git a/OpenOOD/openood/pipelines/utils.py b/OpenOOD/openood/pipelines/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..f8c05ffa020cc0ffc8d19986be2a05b03eb39088 --- /dev/null +++ b/OpenOOD/openood/pipelines/utils.py @@ -0,0 +1,34 @@ +from openood.utils import Config + +from .feat_extract_pipeline import FeatExtractPipeline +from .feat_extract_opengan_pipeline import FeatExtractOpenGANPipeline +from .finetune_pipeline import FinetunePipeline +from .test_acc_pipeline import TestAccPipeline +from .test_ad_pipeline import TestAdPipeline +from .test_ood_pipeline import TestOODPipeline +from .train_ad_pipeline import TrainAdPipeline +from .train_aux_pipeline import TrainARPLGANPipeline +from .train_oe_pipeline import TrainOEPipeline +# from .train_only_pipeline import TrainOpenGanPipeline +from .train_opengan_pipeline import TrainOpenGanPipeline +from .train_pipeline import TrainPipeline +from .test_ood_pipeline_aps import TestOODPipelineAPS + + +def get_pipeline(config: Config): + pipelines = { + 'train': TrainPipeline, + 'finetune': FinetunePipeline, + 'test_acc': TestAccPipeline, + 'feat_extract': FeatExtractPipeline, + 'feat_extract_opengan': FeatExtractOpenGANPipeline, + 'test_ood': TestOODPipeline, + 'test_ad': TestAdPipeline, + 'train_ad': TrainAdPipeline, + 'train_oe': TrainOEPipeline, + 'train_opengan': TrainOpenGanPipeline, + 'train_arplgan': TrainARPLGANPipeline, + 'test_ood_aps': TestOODPipelineAPS + } + + return pipelines[config.pipeline.name](config) diff --git a/OpenOOD/openood/postprocessors/BronzeNet2_postprocessor.py b/OpenOOD/openood/postprocessors/BronzeNet2_postprocessor.py new file mode 100644 index 0000000000000000000000000000000000000000..f518b15ae41bc74cf06b79fba374612baaa49084 --- /dev/null +++ b/OpenOOD/openood/postprocessors/BronzeNet2_postprocessor.py @@ -0,0 +1,62 @@ +from typing import Any + +import faiss +import numpy as np +import torch +import torch.nn as nn +from tqdm import tqdm + +from .base_postprocessor import BasePostprocessor + +normalizer = lambda x: x / np.linalg.norm(x, axis=-1, keepdims=True) + 1e-10 + + +class BronzeNet2Postprocessor(BasePostprocessor): + def __init__(self, config): + super(BronzeNet2Postprocessor, self).__init__(config) + self.args = self.config.postprocessor.postprocessor_args + self.K = self.args.K + self.activation_log = None + self.args_dict = self.config.postprocessor.postprocessor_sweep + self.setup_flag = False + + def setup(self, net: nn.Module, id_loader_dict, ood_loader_dict): + if not self.setup_flag: + activation_log = [] + net.eval() + with torch.no_grad(): + for batch in tqdm(id_loader_dict['train'], + desc='Setup: ', + position=0, + leave=True): + data = batch['data'].cuda() + data = data.float() + + _, feature = net(data, return_feature=True) + activation_log.append( + normalizer(feature.data.cpu().numpy())) + + self.activation_log = np.concatenate(activation_log, axis=0) + self.index = faiss.IndexFlatL2(feature.shape[1]) + self.index.add(self.activation_log) + self.setup_flag = True + else: + pass + + @torch.no_grad() + def postprocess(self, net: nn.Module, data: Any): + output, feature = net(data, return_feature=True) + feature_normed = normalizer(feature.data.cpu().numpy()) + D, _ = self.index.search( + feature_normed, + self.K, + ) + kth_dist = -D[:, -1] + _, pred = torch.max(torch.softmax(output, dim=1), dim=1) + return pred, torch.from_numpy(kth_dist) + + def set_hyperparam(self, hyperparam: list): + self.K = hyperparam[0] + + def get_hyperparam(self): + return self.K diff --git a/OpenOOD/openood/postprocessors/__init__.py b/OpenOOD/openood/postprocessors/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..c2eb83676254fca508c3b64896588dd07dcd28ca --- /dev/null +++ b/OpenOOD/openood/postprocessors/__init__.py @@ -0,0 +1,44 @@ +from .ash_postprocessor import ASHPostprocessor +from .base_postprocessor import BasePostprocessor +from .cider_postprocessor import CIDERPostprocessor +from .conf_branch_postprocessor import ConfBranchPostprocessor +from .cutpaste_postprocessor import CutPastePostprocessor +from .dice_postprocessor import DICEPostprocessor +from .draem_postprocessor import DRAEMPostprocessor +from .dropout_postprocessor import DropoutPostProcessor +from .dsvdd_postprocessor import DSVDDPostprocessor +from .ebo_postprocessor import EBOPostprocessor +from .ensemble_postprocessor import EnsemblePostprocessor +from .gmm_postprocessor import GMMPostprocessor +from .godin_postprocessor import GodinPostprocessor +from .gradnorm_postprocessor import GradNormPostprocessor +from .gram_postprocessor import GRAMPostprocessor +from .kl_matching_postprocessor import KLMatchingPostprocessor +from .knn_postprocessor import KNNPostprocessor +from .maxlogit_postprocessor import MaxLogitPostprocessor +from .mcd_postprocessor import MCDPostprocessor +from .mcm_postprocessor import MCMPostprocessor +from .mds_postprocessor import MDSPostprocessor +from .mds_ensemble_postprocessor import MDSEnsemblePostprocessor +from .mos_postprocessor import MOSPostprocessor +from .npos_postprocessor import NPOSPostprocessor +from .odin_postprocessor import ODINPostprocessor +from .opengan_postprocessor import OpenGanPostprocessor +from .openmax_postprocessor import OpenMax +from .patchcore_postprocessor import PatchcorePostprocessor +from .rd4ad_postprocessor import Rd4adPostprocessor +from .react_postprocessor import ReactPostprocessor +from .rmds_postprocessor import RMDSPostprocessor +from .residual_postprocessor import ResidualPostprocessor +from .scale_postprocessor import ScalePostprocessor +from .ssd_postprocessor import SSDPostprocessor +from .she_postprocessor import SHEPostprocessor +from .temp_scaling_postprocessor import TemperatureScalingPostprocessor +from .utils import get_postprocessor +from .vim_postprocessor import VIMPostprocessor +from .rotpred_postprocessor import RotPredPostprocessor +from .rankfeat_postprocessor import RankFeatPostprocessor +from .gen_postprocessor import GENPostprocessor +from .nnguide_postprocessor import NNGuidePostprocessor +from .relation_postprocessor import RelationPostprocessor +from .BronzeNet2_postprocessor import BronzeNet2Postprocessor diff --git a/OpenOOD/openood/postprocessors/__pycache__/BronzeNet2_postprocessor.cpython-311.pyc b/OpenOOD/openood/postprocessors/__pycache__/BronzeNet2_postprocessor.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4785936db29617937740d19d8f37efd9c5b8093d Binary files /dev/null and b/OpenOOD/openood/postprocessors/__pycache__/BronzeNet2_postprocessor.cpython-311.pyc differ diff --git a/OpenOOD/openood/postprocessors/__pycache__/BronzeNet2_postprocessor.cpython-37.pyc b/OpenOOD/openood/postprocessors/__pycache__/BronzeNet2_postprocessor.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5d8a05a9cb13269a911e2f7d530dccc0cd34cc6a Binary files /dev/null and b/OpenOOD/openood/postprocessors/__pycache__/BronzeNet2_postprocessor.cpython-37.pyc differ diff --git a/OpenOOD/openood/postprocessors/__pycache__/__init__.cpython-311.pyc b/OpenOOD/openood/postprocessors/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..190b619d21791a0b7d2d51c9d864176d54e5c5a9 Binary files /dev/null and b/OpenOOD/openood/postprocessors/__pycache__/__init__.cpython-311.pyc differ diff --git a/OpenOOD/openood/postprocessors/__pycache__/__init__.cpython-37.pyc b/OpenOOD/openood/postprocessors/__pycache__/__init__.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f864b746c9662747300be323551448543285945b Binary files /dev/null and b/OpenOOD/openood/postprocessors/__pycache__/__init__.cpython-37.pyc differ diff --git a/OpenOOD/openood/postprocessors/__pycache__/ash_postprocessor.cpython-311.pyc b/OpenOOD/openood/postprocessors/__pycache__/ash_postprocessor.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..38baa00b0b99c87248a8f6051f95ae8eccd44331 Binary files /dev/null and b/OpenOOD/openood/postprocessors/__pycache__/ash_postprocessor.cpython-311.pyc differ diff --git a/OpenOOD/openood/postprocessors/__pycache__/ash_postprocessor.cpython-37.pyc b/OpenOOD/openood/postprocessors/__pycache__/ash_postprocessor.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9d35e6d3f20541b1a8be29e114c02810e46551e9 Binary files /dev/null and b/OpenOOD/openood/postprocessors/__pycache__/ash_postprocessor.cpython-37.pyc differ diff --git a/OpenOOD/openood/postprocessors/__pycache__/base_postprocessor.cpython-311.pyc b/OpenOOD/openood/postprocessors/__pycache__/base_postprocessor.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2b15ae482c44960e6da5dab96f37df03472c94df Binary files /dev/null and b/OpenOOD/openood/postprocessors/__pycache__/base_postprocessor.cpython-311.pyc differ diff --git a/OpenOOD/openood/postprocessors/__pycache__/base_postprocessor.cpython-37.pyc b/OpenOOD/openood/postprocessors/__pycache__/base_postprocessor.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..55d12f455f17a4a97c1524a950987c90324eea5f Binary files /dev/null and b/OpenOOD/openood/postprocessors/__pycache__/base_postprocessor.cpython-37.pyc differ diff --git a/OpenOOD/openood/postprocessors/__pycache__/cider_postprocessor.cpython-311.pyc b/OpenOOD/openood/postprocessors/__pycache__/cider_postprocessor.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0f50730f575546beb826d0d4bf16f3c4cf93cb7a Binary files /dev/null and b/OpenOOD/openood/postprocessors/__pycache__/cider_postprocessor.cpython-311.pyc differ diff --git a/OpenOOD/openood/postprocessors/__pycache__/cider_postprocessor.cpython-37.pyc b/OpenOOD/openood/postprocessors/__pycache__/cider_postprocessor.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bb5f2c88d4fb004ea99e9029cd52b4d8abdf1a30 Binary files /dev/null and b/OpenOOD/openood/postprocessors/__pycache__/cider_postprocessor.cpython-37.pyc differ diff --git a/OpenOOD/openood/postprocessors/__pycache__/conf_branch_postprocessor.cpython-311.pyc b/OpenOOD/openood/postprocessors/__pycache__/conf_branch_postprocessor.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bc12b269b8ba52ef1b6517a1fb4c7af7c1b4d7fe Binary files /dev/null and b/OpenOOD/openood/postprocessors/__pycache__/conf_branch_postprocessor.cpython-311.pyc differ diff --git a/OpenOOD/openood/postprocessors/__pycache__/conf_branch_postprocessor.cpython-37.pyc b/OpenOOD/openood/postprocessors/__pycache__/conf_branch_postprocessor.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..754d954e8b79d3554ff9103816ba950e76a8aa55 Binary files /dev/null and b/OpenOOD/openood/postprocessors/__pycache__/conf_branch_postprocessor.cpython-37.pyc differ diff --git a/OpenOOD/openood/postprocessors/__pycache__/cutpaste_postprocessor.cpython-311.pyc b/OpenOOD/openood/postprocessors/__pycache__/cutpaste_postprocessor.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7cfe3302af5a76a726943262f558f0058009a6e4 Binary files /dev/null and b/OpenOOD/openood/postprocessors/__pycache__/cutpaste_postprocessor.cpython-311.pyc differ diff --git a/OpenOOD/openood/postprocessors/__pycache__/cutpaste_postprocessor.cpython-37.pyc b/OpenOOD/openood/postprocessors/__pycache__/cutpaste_postprocessor.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0e741cddf9954d4ec0ce4eb5157e4a74f5ed03e5 Binary files /dev/null and b/OpenOOD/openood/postprocessors/__pycache__/cutpaste_postprocessor.cpython-37.pyc differ diff --git a/OpenOOD/openood/postprocessors/__pycache__/dice_postprocessor.cpython-311.pyc b/OpenOOD/openood/postprocessors/__pycache__/dice_postprocessor.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..92e4cce87ff0e428f4e3a4c22f1a1e12831a6998 Binary files /dev/null and b/OpenOOD/openood/postprocessors/__pycache__/dice_postprocessor.cpython-311.pyc differ diff --git a/OpenOOD/openood/postprocessors/__pycache__/dice_postprocessor.cpython-37.pyc b/OpenOOD/openood/postprocessors/__pycache__/dice_postprocessor.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..34b2c5d372887a339d572ab1435f5c9f543c9e92 Binary files /dev/null and b/OpenOOD/openood/postprocessors/__pycache__/dice_postprocessor.cpython-37.pyc differ diff --git a/OpenOOD/openood/postprocessors/__pycache__/draem_postprocessor.cpython-311.pyc b/OpenOOD/openood/postprocessors/__pycache__/draem_postprocessor.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4f5d5fbae61f8aca005cb0acab987ea383d72bc5 Binary files /dev/null and b/OpenOOD/openood/postprocessors/__pycache__/draem_postprocessor.cpython-311.pyc differ diff --git a/OpenOOD/openood/postprocessors/__pycache__/draem_postprocessor.cpython-37.pyc b/OpenOOD/openood/postprocessors/__pycache__/draem_postprocessor.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..76638b4ad95f4353dcf9a86a0566f130b7eb291c Binary files /dev/null and b/OpenOOD/openood/postprocessors/__pycache__/draem_postprocessor.cpython-37.pyc differ diff --git a/OpenOOD/openood/postprocessors/__pycache__/dropout_postprocessor.cpython-311.pyc b/OpenOOD/openood/postprocessors/__pycache__/dropout_postprocessor.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1e608a5c606db3cb1e5bc1b3067b475bfe295a7d Binary files /dev/null and b/OpenOOD/openood/postprocessors/__pycache__/dropout_postprocessor.cpython-311.pyc differ diff --git a/OpenOOD/openood/postprocessors/__pycache__/dropout_postprocessor.cpython-37.pyc b/OpenOOD/openood/postprocessors/__pycache__/dropout_postprocessor.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fa48c6802507d5e2351cb003ee973f2a08b1c1a7 Binary files /dev/null and b/OpenOOD/openood/postprocessors/__pycache__/dropout_postprocessor.cpython-37.pyc differ diff --git a/OpenOOD/openood/postprocessors/__pycache__/dsvdd_postprocessor.cpython-311.pyc b/OpenOOD/openood/postprocessors/__pycache__/dsvdd_postprocessor.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..757583617343c1641c4ac352b349b0b909e9a228 Binary files /dev/null and b/OpenOOD/openood/postprocessors/__pycache__/dsvdd_postprocessor.cpython-311.pyc differ diff --git a/OpenOOD/openood/postprocessors/__pycache__/dsvdd_postprocessor.cpython-37.pyc b/OpenOOD/openood/postprocessors/__pycache__/dsvdd_postprocessor.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1b5f45e546ba5ebd442c850d18e82c4f8eec5054 Binary files /dev/null and b/OpenOOD/openood/postprocessors/__pycache__/dsvdd_postprocessor.cpython-37.pyc differ diff --git a/OpenOOD/openood/postprocessors/__pycache__/ebo_postprocessor.cpython-311.pyc b/OpenOOD/openood/postprocessors/__pycache__/ebo_postprocessor.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3d25ffe83d4dcce013c0d4b5c1a9b530800267ff Binary files /dev/null and b/OpenOOD/openood/postprocessors/__pycache__/ebo_postprocessor.cpython-311.pyc differ diff --git a/OpenOOD/openood/postprocessors/__pycache__/ebo_postprocessor.cpython-37.pyc b/OpenOOD/openood/postprocessors/__pycache__/ebo_postprocessor.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b28e895f70660a160c2aab7c10866ac6850815b9 Binary files /dev/null and b/OpenOOD/openood/postprocessors/__pycache__/ebo_postprocessor.cpython-37.pyc differ diff --git a/OpenOOD/openood/postprocessors/__pycache__/ensemble_postprocessor.cpython-311.pyc b/OpenOOD/openood/postprocessors/__pycache__/ensemble_postprocessor.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..359ec5eea12b41810916fd95c3909e36058d65b1 Binary files /dev/null and b/OpenOOD/openood/postprocessors/__pycache__/ensemble_postprocessor.cpython-311.pyc differ diff --git a/OpenOOD/openood/postprocessors/__pycache__/ensemble_postprocessor.cpython-37.pyc b/OpenOOD/openood/postprocessors/__pycache__/ensemble_postprocessor.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4bbb08e3b4c693828e6c76310d51ef4b6c285a27 Binary files /dev/null and b/OpenOOD/openood/postprocessors/__pycache__/ensemble_postprocessor.cpython-37.pyc differ diff --git a/OpenOOD/openood/postprocessors/__pycache__/gen_postprocessor.cpython-311.pyc b/OpenOOD/openood/postprocessors/__pycache__/gen_postprocessor.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b964d933046f58a326ddb87e30daa5ce7f0611e7 Binary files /dev/null and b/OpenOOD/openood/postprocessors/__pycache__/gen_postprocessor.cpython-311.pyc differ diff --git a/OpenOOD/openood/postprocessors/__pycache__/gen_postprocessor.cpython-37.pyc b/OpenOOD/openood/postprocessors/__pycache__/gen_postprocessor.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..12774321d5cf61fb905831ff674486e05ca2be84 Binary files /dev/null and b/OpenOOD/openood/postprocessors/__pycache__/gen_postprocessor.cpython-37.pyc differ diff --git a/OpenOOD/openood/postprocessors/__pycache__/gmm_postprocessor.cpython-311.pyc b/OpenOOD/openood/postprocessors/__pycache__/gmm_postprocessor.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d2a7f33aad92e23c21feea2bffec3d9b8bdf2595 Binary files /dev/null and b/OpenOOD/openood/postprocessors/__pycache__/gmm_postprocessor.cpython-311.pyc differ diff --git a/OpenOOD/openood/postprocessors/__pycache__/gmm_postprocessor.cpython-37.pyc b/OpenOOD/openood/postprocessors/__pycache__/gmm_postprocessor.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..353f1262664f31219f1e763af2e49fbf6c40cc88 Binary files /dev/null and b/OpenOOD/openood/postprocessors/__pycache__/gmm_postprocessor.cpython-37.pyc differ diff --git a/OpenOOD/openood/postprocessors/__pycache__/godin_postprocessor.cpython-311.pyc b/OpenOOD/openood/postprocessors/__pycache__/godin_postprocessor.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..84335b3a5e799c018e59c5168001be0f0ad87161 Binary files /dev/null and b/OpenOOD/openood/postprocessors/__pycache__/godin_postprocessor.cpython-311.pyc differ diff --git a/OpenOOD/openood/postprocessors/__pycache__/godin_postprocessor.cpython-37.pyc b/OpenOOD/openood/postprocessors/__pycache__/godin_postprocessor.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ca4cce68f307df1add91d6a5621fbede5e9587d2 Binary files /dev/null and b/OpenOOD/openood/postprocessors/__pycache__/godin_postprocessor.cpython-37.pyc differ diff --git a/OpenOOD/openood/postprocessors/__pycache__/gradnorm_postprocessor.cpython-311.pyc b/OpenOOD/openood/postprocessors/__pycache__/gradnorm_postprocessor.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..20b9cae6c7abd19c85163cd4f73e88f662129a16 Binary files /dev/null and b/OpenOOD/openood/postprocessors/__pycache__/gradnorm_postprocessor.cpython-311.pyc differ diff --git a/OpenOOD/openood/postprocessors/__pycache__/gradnorm_postprocessor.cpython-37.pyc b/OpenOOD/openood/postprocessors/__pycache__/gradnorm_postprocessor.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bdf02253c435c665dc65a1f588ca4419ed1b62aa Binary files /dev/null and b/OpenOOD/openood/postprocessors/__pycache__/gradnorm_postprocessor.cpython-37.pyc differ diff --git a/OpenOOD/openood/postprocessors/__pycache__/gram_postprocessor.cpython-311.pyc b/OpenOOD/openood/postprocessors/__pycache__/gram_postprocessor.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a59e966a9ad68169cfd9b8745592c7024ee55ad6 Binary files /dev/null and b/OpenOOD/openood/postprocessors/__pycache__/gram_postprocessor.cpython-311.pyc differ diff --git a/OpenOOD/openood/postprocessors/__pycache__/gram_postprocessor.cpython-37.pyc b/OpenOOD/openood/postprocessors/__pycache__/gram_postprocessor.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2dccdadb8f366c72c8ffafa2d59fdce04ca54afb Binary files /dev/null and b/OpenOOD/openood/postprocessors/__pycache__/gram_postprocessor.cpython-37.pyc differ diff --git a/OpenOOD/openood/postprocessors/__pycache__/info.cpython-311.pyc b/OpenOOD/openood/postprocessors/__pycache__/info.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a04b48af02631a2049d7402cdc594abf61d3f1f2 Binary files /dev/null and b/OpenOOD/openood/postprocessors/__pycache__/info.cpython-311.pyc differ diff --git a/OpenOOD/openood/postprocessors/__pycache__/info.cpython-37.pyc b/OpenOOD/openood/postprocessors/__pycache__/info.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..55cd96f124557dfa07483ecac2e0a452b47e287f Binary files /dev/null and b/OpenOOD/openood/postprocessors/__pycache__/info.cpython-37.pyc differ diff --git a/OpenOOD/openood/postprocessors/__pycache__/kl_matching_postprocessor.cpython-311.pyc b/OpenOOD/openood/postprocessors/__pycache__/kl_matching_postprocessor.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..42d1d196a3d006ace475649ce1a61b78ecc0572c Binary files /dev/null and b/OpenOOD/openood/postprocessors/__pycache__/kl_matching_postprocessor.cpython-311.pyc differ diff --git a/OpenOOD/openood/postprocessors/__pycache__/kl_matching_postprocessor.cpython-37.pyc b/OpenOOD/openood/postprocessors/__pycache__/kl_matching_postprocessor.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f41d4fd94f89848faf715262bd860f5139337fad Binary files /dev/null and b/OpenOOD/openood/postprocessors/__pycache__/kl_matching_postprocessor.cpython-37.pyc differ diff --git a/OpenOOD/openood/postprocessors/__pycache__/knn_postprocessor.cpython-311.pyc b/OpenOOD/openood/postprocessors/__pycache__/knn_postprocessor.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..15786c5cb80dd04be0c781bfb9446ac98ca03a31 Binary files /dev/null and b/OpenOOD/openood/postprocessors/__pycache__/knn_postprocessor.cpython-311.pyc differ diff --git a/OpenOOD/openood/postprocessors/__pycache__/knn_postprocessor.cpython-37.pyc b/OpenOOD/openood/postprocessors/__pycache__/knn_postprocessor.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..16b5abd55905cc0edb5c306f36667a6967f58354 Binary files /dev/null and b/OpenOOD/openood/postprocessors/__pycache__/knn_postprocessor.cpython-37.pyc differ diff --git a/OpenOOD/openood/postprocessors/__pycache__/maxlogit_postprocessor.cpython-311.pyc b/OpenOOD/openood/postprocessors/__pycache__/maxlogit_postprocessor.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..257203e508803fdb6708f9f9496c836cb4906631 Binary files /dev/null and b/OpenOOD/openood/postprocessors/__pycache__/maxlogit_postprocessor.cpython-311.pyc differ diff --git a/OpenOOD/openood/postprocessors/__pycache__/maxlogit_postprocessor.cpython-37.pyc b/OpenOOD/openood/postprocessors/__pycache__/maxlogit_postprocessor.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6e2b81bf217262896d46143b9c2e7c66e65792a5 Binary files /dev/null and b/OpenOOD/openood/postprocessors/__pycache__/maxlogit_postprocessor.cpython-37.pyc differ diff --git a/OpenOOD/openood/postprocessors/__pycache__/mcd_postprocessor.cpython-311.pyc b/OpenOOD/openood/postprocessors/__pycache__/mcd_postprocessor.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..34765b954fcb2aa284aa128e7e55a6aea6b2f979 Binary files /dev/null and b/OpenOOD/openood/postprocessors/__pycache__/mcd_postprocessor.cpython-311.pyc differ diff --git a/OpenOOD/openood/postprocessors/__pycache__/mcd_postprocessor.cpython-37.pyc b/OpenOOD/openood/postprocessors/__pycache__/mcd_postprocessor.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c8811fce2f961ed41104d169a02f4c50442254ab Binary files /dev/null and b/OpenOOD/openood/postprocessors/__pycache__/mcd_postprocessor.cpython-37.pyc differ diff --git a/OpenOOD/openood/postprocessors/__pycache__/mcm_postprocessor.cpython-311.pyc b/OpenOOD/openood/postprocessors/__pycache__/mcm_postprocessor.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..95028e205e1a030cdd2b3e78867de2629047acab Binary files /dev/null and b/OpenOOD/openood/postprocessors/__pycache__/mcm_postprocessor.cpython-311.pyc differ diff --git a/OpenOOD/openood/postprocessors/__pycache__/mcm_postprocessor.cpython-37.pyc b/OpenOOD/openood/postprocessors/__pycache__/mcm_postprocessor.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dbd5629f34a6a3cf20e8792cbef1b34900bed322 Binary files /dev/null and b/OpenOOD/openood/postprocessors/__pycache__/mcm_postprocessor.cpython-37.pyc differ diff --git a/OpenOOD/openood/postprocessors/__pycache__/mds_ensemble_postprocessor.cpython-311.pyc b/OpenOOD/openood/postprocessors/__pycache__/mds_ensemble_postprocessor.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0b315bb304449e258f3ba8a94733102dc74e2675 Binary files /dev/null and b/OpenOOD/openood/postprocessors/__pycache__/mds_ensemble_postprocessor.cpython-311.pyc differ diff --git a/OpenOOD/openood/postprocessors/__pycache__/mds_ensemble_postprocessor.cpython-37.pyc b/OpenOOD/openood/postprocessors/__pycache__/mds_ensemble_postprocessor.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d45ef66af7f6d3868257c39746a5feefe23de882 Binary files /dev/null and b/OpenOOD/openood/postprocessors/__pycache__/mds_ensemble_postprocessor.cpython-37.pyc differ diff --git a/OpenOOD/openood/postprocessors/__pycache__/mds_postprocessor.cpython-311.pyc b/OpenOOD/openood/postprocessors/__pycache__/mds_postprocessor.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ce121e0728f8215d03b3940b727a42491df2bcba Binary files /dev/null and b/OpenOOD/openood/postprocessors/__pycache__/mds_postprocessor.cpython-311.pyc differ diff --git a/OpenOOD/openood/postprocessors/__pycache__/mds_postprocessor.cpython-37.pyc b/OpenOOD/openood/postprocessors/__pycache__/mds_postprocessor.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7574660e609d3157f8c3ee2690a2d8fcafb79d02 Binary files /dev/null and b/OpenOOD/openood/postprocessors/__pycache__/mds_postprocessor.cpython-37.pyc differ diff --git a/OpenOOD/openood/postprocessors/__pycache__/mos_postprocessor.cpython-311.pyc b/OpenOOD/openood/postprocessors/__pycache__/mos_postprocessor.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d05af6d7ade173349adcc0c238fbb2e85e8ad5bb Binary files /dev/null and b/OpenOOD/openood/postprocessors/__pycache__/mos_postprocessor.cpython-311.pyc differ diff --git a/OpenOOD/openood/postprocessors/__pycache__/mos_postprocessor.cpython-37.pyc b/OpenOOD/openood/postprocessors/__pycache__/mos_postprocessor.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..007cb5bd9feb702c3406452aad9c1f81f5caf7b3 Binary files /dev/null and b/OpenOOD/openood/postprocessors/__pycache__/mos_postprocessor.cpython-37.pyc differ diff --git a/OpenOOD/openood/postprocessors/__pycache__/nnguide_postprocessor.cpython-311.pyc b/OpenOOD/openood/postprocessors/__pycache__/nnguide_postprocessor.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1d571dddc538aeb07b4108aab542025ea77a3f01 Binary files /dev/null and b/OpenOOD/openood/postprocessors/__pycache__/nnguide_postprocessor.cpython-311.pyc differ diff --git a/OpenOOD/openood/postprocessors/__pycache__/nnguide_postprocessor.cpython-37.pyc b/OpenOOD/openood/postprocessors/__pycache__/nnguide_postprocessor.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b658a317cbd4f1dea4a766cc5a6265bbad7779ab Binary files /dev/null and b/OpenOOD/openood/postprocessors/__pycache__/nnguide_postprocessor.cpython-37.pyc differ diff --git a/OpenOOD/openood/postprocessors/__pycache__/npos_postprocessor.cpython-311.pyc b/OpenOOD/openood/postprocessors/__pycache__/npos_postprocessor.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..20c98794cab37e6b8cc95bbe33fe6d1b55a7493f Binary files /dev/null and b/OpenOOD/openood/postprocessors/__pycache__/npos_postprocessor.cpython-311.pyc differ diff --git a/OpenOOD/openood/postprocessors/__pycache__/npos_postprocessor.cpython-37.pyc b/OpenOOD/openood/postprocessors/__pycache__/npos_postprocessor.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..553d1a0eed44f42dcb6a1b726bc95a966e9a1c93 Binary files /dev/null and b/OpenOOD/openood/postprocessors/__pycache__/npos_postprocessor.cpython-37.pyc differ diff --git a/OpenOOD/openood/postprocessors/__pycache__/odin_postprocessor.cpython-311.pyc b/OpenOOD/openood/postprocessors/__pycache__/odin_postprocessor.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3d4057d746d43adb9f3d7fefbb2af4257e4133e1 Binary files /dev/null and b/OpenOOD/openood/postprocessors/__pycache__/odin_postprocessor.cpython-311.pyc differ diff --git a/OpenOOD/openood/postprocessors/__pycache__/odin_postprocessor.cpython-37.pyc b/OpenOOD/openood/postprocessors/__pycache__/odin_postprocessor.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1e1bff705104482967e33bccd589da0f9be86535 Binary files /dev/null and b/OpenOOD/openood/postprocessors/__pycache__/odin_postprocessor.cpython-37.pyc differ diff --git a/OpenOOD/openood/postprocessors/__pycache__/opengan_postprocessor.cpython-311.pyc b/OpenOOD/openood/postprocessors/__pycache__/opengan_postprocessor.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b6a959d3d9a0530882e01ccd0f3887d68159688b Binary files /dev/null and b/OpenOOD/openood/postprocessors/__pycache__/opengan_postprocessor.cpython-311.pyc differ diff --git a/OpenOOD/openood/postprocessors/__pycache__/opengan_postprocessor.cpython-37.pyc b/OpenOOD/openood/postprocessors/__pycache__/opengan_postprocessor.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f3918ae1decb4b140bdcab7786630e756ae373da Binary files /dev/null and b/OpenOOD/openood/postprocessors/__pycache__/opengan_postprocessor.cpython-37.pyc differ diff --git a/OpenOOD/openood/postprocessors/__pycache__/openmax_postprocessor.cpython-311.pyc b/OpenOOD/openood/postprocessors/__pycache__/openmax_postprocessor.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d76a3d5ed3556b150373b7afd43ebaebdc8351b1 Binary files /dev/null and b/OpenOOD/openood/postprocessors/__pycache__/openmax_postprocessor.cpython-311.pyc differ diff --git a/OpenOOD/openood/postprocessors/__pycache__/openmax_postprocessor.cpython-37.pyc b/OpenOOD/openood/postprocessors/__pycache__/openmax_postprocessor.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9f2dc81e8c50cd54850f613c3d0df6c861f2070c Binary files /dev/null and b/OpenOOD/openood/postprocessors/__pycache__/openmax_postprocessor.cpython-37.pyc differ diff --git a/OpenOOD/openood/postprocessors/__pycache__/patchcore_postprocessor.cpython-311.pyc b/OpenOOD/openood/postprocessors/__pycache__/patchcore_postprocessor.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3e96988f48da269e93070e6d65b4ac2ec485163d Binary files /dev/null and b/OpenOOD/openood/postprocessors/__pycache__/patchcore_postprocessor.cpython-311.pyc differ diff --git a/OpenOOD/openood/postprocessors/__pycache__/patchcore_postprocessor.cpython-37.pyc b/OpenOOD/openood/postprocessors/__pycache__/patchcore_postprocessor.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8e8e6219769bb2c5b298d8baa8c96034b5c1dbb1 Binary files /dev/null and b/OpenOOD/openood/postprocessors/__pycache__/patchcore_postprocessor.cpython-37.pyc differ diff --git a/OpenOOD/openood/postprocessors/__pycache__/rankfeat_postprocessor.cpython-311.pyc b/OpenOOD/openood/postprocessors/__pycache__/rankfeat_postprocessor.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7d07ed8ae0c4a3a949f6c912ac5e71997b57bb8a Binary files /dev/null and b/OpenOOD/openood/postprocessors/__pycache__/rankfeat_postprocessor.cpython-311.pyc differ diff --git a/OpenOOD/openood/postprocessors/__pycache__/rankfeat_postprocessor.cpython-37.pyc b/OpenOOD/openood/postprocessors/__pycache__/rankfeat_postprocessor.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..77104d4aa2087bff7837c5722a7f1fad933717b4 Binary files /dev/null and b/OpenOOD/openood/postprocessors/__pycache__/rankfeat_postprocessor.cpython-37.pyc differ diff --git a/OpenOOD/openood/postprocessors/__pycache__/rd4ad_postprocessor.cpython-311.pyc b/OpenOOD/openood/postprocessors/__pycache__/rd4ad_postprocessor.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5e446ca9e83a2c4d5fdc8219cc1febdbb907ced1 Binary files /dev/null and b/OpenOOD/openood/postprocessors/__pycache__/rd4ad_postprocessor.cpython-311.pyc differ diff --git a/OpenOOD/openood/postprocessors/__pycache__/rd4ad_postprocessor.cpython-37.pyc b/OpenOOD/openood/postprocessors/__pycache__/rd4ad_postprocessor.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5fe5ee2efe62af3e46f519069dc948c8fdb53724 Binary files /dev/null and b/OpenOOD/openood/postprocessors/__pycache__/rd4ad_postprocessor.cpython-37.pyc differ diff --git a/OpenOOD/openood/postprocessors/__pycache__/react_postprocessor.cpython-311.pyc b/OpenOOD/openood/postprocessors/__pycache__/react_postprocessor.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1b20c2ffcd00fbd4274abe38a122c6d13eab2978 Binary files /dev/null and b/OpenOOD/openood/postprocessors/__pycache__/react_postprocessor.cpython-311.pyc differ diff --git a/OpenOOD/openood/postprocessors/__pycache__/react_postprocessor.cpython-37.pyc b/OpenOOD/openood/postprocessors/__pycache__/react_postprocessor.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bb0e7c11516b022d909b48af40e3823ef7fad133 Binary files /dev/null and b/OpenOOD/openood/postprocessors/__pycache__/react_postprocessor.cpython-37.pyc differ diff --git a/OpenOOD/openood/postprocessors/__pycache__/relation_postprocessor.cpython-311.pyc b/OpenOOD/openood/postprocessors/__pycache__/relation_postprocessor.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fdfc5a44db53d644fd7e1bb028ff3e5931f050c8 Binary files /dev/null and b/OpenOOD/openood/postprocessors/__pycache__/relation_postprocessor.cpython-311.pyc differ diff --git a/OpenOOD/openood/postprocessors/__pycache__/relation_postprocessor.cpython-37.pyc b/OpenOOD/openood/postprocessors/__pycache__/relation_postprocessor.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..205ecab8f7c53a28a4d9a2aa3bf3e199a8254da2 Binary files /dev/null and b/OpenOOD/openood/postprocessors/__pycache__/relation_postprocessor.cpython-37.pyc differ diff --git a/OpenOOD/openood/postprocessors/__pycache__/residual_postprocessor.cpython-311.pyc b/OpenOOD/openood/postprocessors/__pycache__/residual_postprocessor.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6d08da3ac8f12f6cddae8930c34b1924ae6e0622 Binary files /dev/null and b/OpenOOD/openood/postprocessors/__pycache__/residual_postprocessor.cpython-311.pyc differ diff --git a/OpenOOD/openood/postprocessors/__pycache__/residual_postprocessor.cpython-37.pyc b/OpenOOD/openood/postprocessors/__pycache__/residual_postprocessor.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..715392778381af92d205db8c2a394adc03070891 Binary files /dev/null and b/OpenOOD/openood/postprocessors/__pycache__/residual_postprocessor.cpython-37.pyc differ diff --git a/OpenOOD/openood/postprocessors/__pycache__/rmds_postprocessor.cpython-311.pyc b/OpenOOD/openood/postprocessors/__pycache__/rmds_postprocessor.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6bf73132db13800cc27731060d6746660af1ca3f Binary files /dev/null and b/OpenOOD/openood/postprocessors/__pycache__/rmds_postprocessor.cpython-311.pyc differ diff --git a/OpenOOD/openood/postprocessors/__pycache__/rmds_postprocessor.cpython-37.pyc b/OpenOOD/openood/postprocessors/__pycache__/rmds_postprocessor.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8c1cc33f5422328e5d498dce6ed3c3c57f367ecf Binary files /dev/null and b/OpenOOD/openood/postprocessors/__pycache__/rmds_postprocessor.cpython-37.pyc differ diff --git a/OpenOOD/openood/postprocessors/__pycache__/rotpred_postprocessor.cpython-311.pyc b/OpenOOD/openood/postprocessors/__pycache__/rotpred_postprocessor.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5051d257ad0f9e539e75a2664cc16242d0a5893a Binary files /dev/null and b/OpenOOD/openood/postprocessors/__pycache__/rotpred_postprocessor.cpython-311.pyc differ diff --git a/OpenOOD/openood/postprocessors/__pycache__/rotpred_postprocessor.cpython-37.pyc b/OpenOOD/openood/postprocessors/__pycache__/rotpred_postprocessor.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f8424d5d76ee868f4607abbdc2a95c0cdad9eee9 Binary files /dev/null and b/OpenOOD/openood/postprocessors/__pycache__/rotpred_postprocessor.cpython-37.pyc differ diff --git a/OpenOOD/openood/postprocessors/__pycache__/rts_postprocessor.cpython-311.pyc b/OpenOOD/openood/postprocessors/__pycache__/rts_postprocessor.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1c4c7dea5563600b9403f31534c31b3ef58afab9 Binary files /dev/null and b/OpenOOD/openood/postprocessors/__pycache__/rts_postprocessor.cpython-311.pyc differ diff --git a/OpenOOD/openood/postprocessors/__pycache__/rts_postprocessor.cpython-37.pyc b/OpenOOD/openood/postprocessors/__pycache__/rts_postprocessor.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ef09e563d4ad174adbabd6967a428e2feed12480 Binary files /dev/null and b/OpenOOD/openood/postprocessors/__pycache__/rts_postprocessor.cpython-37.pyc differ diff --git a/OpenOOD/openood/postprocessors/__pycache__/scale_postprocessor.cpython-311.pyc b/OpenOOD/openood/postprocessors/__pycache__/scale_postprocessor.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..63bfa292e1a806803e241f2bd3030814ae8263bf Binary files /dev/null and b/OpenOOD/openood/postprocessors/__pycache__/scale_postprocessor.cpython-311.pyc differ diff --git a/OpenOOD/openood/postprocessors/__pycache__/scale_postprocessor.cpython-37.pyc b/OpenOOD/openood/postprocessors/__pycache__/scale_postprocessor.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5ffa52dd9e24cf999e3de73ca9c2dca2704efe20 Binary files /dev/null and b/OpenOOD/openood/postprocessors/__pycache__/scale_postprocessor.cpython-37.pyc differ diff --git a/OpenOOD/openood/postprocessors/__pycache__/she_postprocessor.cpython-311.pyc b/OpenOOD/openood/postprocessors/__pycache__/she_postprocessor.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8ad3ec00a93baffde1e64e24253a98b74f0b0a21 Binary files /dev/null and b/OpenOOD/openood/postprocessors/__pycache__/she_postprocessor.cpython-311.pyc differ diff --git a/OpenOOD/openood/postprocessors/__pycache__/she_postprocessor.cpython-37.pyc b/OpenOOD/openood/postprocessors/__pycache__/she_postprocessor.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..02618719e9db4cf5e86a0e577f56a6d4f1bd5d77 Binary files /dev/null and b/OpenOOD/openood/postprocessors/__pycache__/she_postprocessor.cpython-37.pyc differ diff --git a/OpenOOD/openood/postprocessors/__pycache__/ssd_postprocessor.cpython-311.pyc b/OpenOOD/openood/postprocessors/__pycache__/ssd_postprocessor.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a676cd8bfcd74176bbac04f06dd5af6eb8947359 Binary files /dev/null and b/OpenOOD/openood/postprocessors/__pycache__/ssd_postprocessor.cpython-311.pyc differ diff --git a/OpenOOD/openood/postprocessors/__pycache__/ssd_postprocessor.cpython-37.pyc b/OpenOOD/openood/postprocessors/__pycache__/ssd_postprocessor.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4d1c7c922e13a8570fd7b40a56e2c33c85cfa65f Binary files /dev/null and b/OpenOOD/openood/postprocessors/__pycache__/ssd_postprocessor.cpython-37.pyc differ diff --git a/OpenOOD/openood/postprocessors/__pycache__/temp_scaling_postprocessor.cpython-311.pyc b/OpenOOD/openood/postprocessors/__pycache__/temp_scaling_postprocessor.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d47b5fa959c0db9f685f313bcfbd4ed1007522e8 Binary files /dev/null and b/OpenOOD/openood/postprocessors/__pycache__/temp_scaling_postprocessor.cpython-311.pyc differ diff --git a/OpenOOD/openood/postprocessors/__pycache__/temp_scaling_postprocessor.cpython-37.pyc b/OpenOOD/openood/postprocessors/__pycache__/temp_scaling_postprocessor.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f0463a386a6c61d7947bf17eb21095994907eb8c Binary files /dev/null and b/OpenOOD/openood/postprocessors/__pycache__/temp_scaling_postprocessor.cpython-37.pyc differ diff --git a/OpenOOD/openood/postprocessors/__pycache__/utils.cpython-311.pyc b/OpenOOD/openood/postprocessors/__pycache__/utils.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a9319612a480bd6124c967439d0aaca9b6310483 Binary files /dev/null and b/OpenOOD/openood/postprocessors/__pycache__/utils.cpython-311.pyc differ diff --git a/OpenOOD/openood/postprocessors/__pycache__/utils.cpython-37.pyc b/OpenOOD/openood/postprocessors/__pycache__/utils.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1e7f1cddac8269f00d9a3043206bea50ec6c0fc9 Binary files /dev/null and b/OpenOOD/openood/postprocessors/__pycache__/utils.cpython-37.pyc differ diff --git a/OpenOOD/openood/postprocessors/__pycache__/vim_postprocessor.cpython-311.pyc b/OpenOOD/openood/postprocessors/__pycache__/vim_postprocessor.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d6a625c0c184c241e368b9a7b180b352ae494fef Binary files /dev/null and b/OpenOOD/openood/postprocessors/__pycache__/vim_postprocessor.cpython-311.pyc differ diff --git a/OpenOOD/openood/postprocessors/__pycache__/vim_postprocessor.cpython-37.pyc b/OpenOOD/openood/postprocessors/__pycache__/vim_postprocessor.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a0ea563799e0252ca576244b479dd1d0fcb52d80 Binary files /dev/null and b/OpenOOD/openood/postprocessors/__pycache__/vim_postprocessor.cpython-37.pyc differ diff --git a/OpenOOD/openood/postprocessors/ash_postprocessor.py b/OpenOOD/openood/postprocessors/ash_postprocessor.py new file mode 100644 index 0000000000000000000000000000000000000000..33c651855aa584a53804848c74edf8426d016e9f --- /dev/null +++ b/OpenOOD/openood/postprocessors/ash_postprocessor.py @@ -0,0 +1,29 @@ +from typing import Any + +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F + +from .base_postprocessor import BasePostprocessor + + +class ASHPostprocessor(BasePostprocessor): + def __init__(self, config): + super(ASHPostprocessor, self).__init__(config) + self.args = self.config.postprocessor.postprocessor_args + self.percentile = self.args.percentile + self.args_dict = self.config.postprocessor.postprocessor_sweep + + @torch.no_grad() + def postprocess(self, net: nn.Module, data: Any): + output = net.forward_threshold(data, self.percentile) + _, pred = torch.max(output, dim=1) + energyconf = torch.logsumexp(output.data.cpu(), dim=1) + return pred, energyconf + + def set_hyperparam(self, hyperparam: list): + self.percentile = hyperparam[0] + + def get_hyperparam(self): + return self.percentile diff --git a/OpenOOD/openood/postprocessors/base_postprocessor.py b/OpenOOD/openood/postprocessors/base_postprocessor.py new file mode 100644 index 0000000000000000000000000000000000000000..5c6659acc1793cba8bad2b06e90b03c150b88f62 --- /dev/null +++ b/OpenOOD/openood/postprocessors/base_postprocessor.py @@ -0,0 +1,45 @@ +from typing import Any +from tqdm import tqdm + +import torch +import torch.nn as nn +from torch.utils.data import DataLoader + +import openood.utils.comm as comm + + +class BasePostprocessor: + def __init__(self, config): + self.config = config + + def setup(self, net: nn.Module, id_loader_dict, ood_loader_dict): + pass + + @torch.no_grad() + def postprocess(self, net: nn.Module, data: Any): + output = net(data) + score = torch.softmax(output, dim=1) + conf, pred = torch.max(score, dim=1) + return pred, conf + + def inference(self, + net: nn.Module, + data_loader: DataLoader, + progress: bool = True): + pred_list, conf_list, label_list = [], [], [] + for batch in tqdm(data_loader, + disable=not progress or not comm.is_main_process()): + data = batch['data'].cuda() + label = batch['label'].cuda() + pred, conf = self.postprocess(net, data) + + pred_list.append(pred.cpu()) + conf_list.append(conf.cpu()) + label_list.append(label.cpu()) + + # convert values into numpy array + pred_list = torch.cat(pred_list).numpy().astype(int) + conf_list = torch.cat(conf_list).numpy() + label_list = torch.cat(label_list).numpy().astype(int) + + return pred_list, conf_list, label_list diff --git a/OpenOOD/openood/postprocessors/cider_postprocessor.py b/OpenOOD/openood/postprocessors/cider_postprocessor.py new file mode 100644 index 0000000000000000000000000000000000000000..d96bba63f157bcbc3dc58bce46ba017a4d8278d4 --- /dev/null +++ b/OpenOOD/openood/postprocessors/cider_postprocessor.py @@ -0,0 +1,59 @@ +from typing import Any + +import faiss +import numpy as np +import torch +import torch.nn as nn +from tqdm import tqdm + +from .base_postprocessor import BasePostprocessor + + +class CIDERPostprocessor(BasePostprocessor): + def __init__(self, config): + super(CIDERPostprocessor, self).__init__(config) + self.args = self.config.postprocessor.postprocessor_args + self.K = self.args.K + self.activation_log = None + self.args_dict = self.config.postprocessor.postprocessor_sweep + self.setup_flag = False + + def setup(self, net: nn.Module, id_loader_dict, ood_loader_dict): + if not self.setup_flag: + activation_log = [] + net.eval() + with torch.no_grad(): + for batch in tqdm(id_loader_dict['train'], + desc='Setup: ', + position=0, + leave=True): + data = batch['data'].cuda() + + feature = net.intermediate_forward(data) + activation_log.append(feature.data.cpu().numpy()) + + self.activation_log = np.concatenate(activation_log, axis=0) + self.index = faiss.IndexFlatL2(feature.shape[1]) + self.index.add(self.activation_log) + self.setup_flag = True + else: + pass + + @torch.no_grad() + def postprocess(self, net: nn.Module, data: Any): + feature = net.intermediate_forward(data) + D, _ = self.index.search( + feature.cpu().numpy(), # feature is already normalized within net + self.K, + ) + kth_dist = -D[:, -1] + # put dummy prediction here + # as cider only trains the feature extractor + pred = torch.zeros(len(kth_dist)) + return pred, torch.from_numpy(kth_dist) + + def set_hyperparam(self, hyperparam: list): + self.K = hyperparam[0] + + def get_hyperparam(self): + return self.K diff --git a/OpenOOD/openood/postprocessors/conf_branch_postprocessor.py b/OpenOOD/openood/postprocessors/conf_branch_postprocessor.py new file mode 100644 index 0000000000000000000000000000000000000000..88868b4c18199bc8341694731cc79e4074d033b4 --- /dev/null +++ b/OpenOOD/openood/postprocessors/conf_branch_postprocessor.py @@ -0,0 +1,19 @@ +from typing import Any + +import torch +import torch.nn as nn + +from .base_postprocessor import BasePostprocessor + + +class ConfBranchPostprocessor(BasePostprocessor): + def __init__(self, config): + super(ConfBranchPostprocessor, self).__init__(config) + self.config = config + + @torch.no_grad() + def postprocess(self, net: nn.Module, data: Any): + output, conf = net(data, return_confidence=True) + conf = torch.sigmoid(conf) + _, pred = torch.max(output, dim=1) + return pred, conf diff --git a/OpenOOD/openood/postprocessors/cutpaste_postprocessor.py b/OpenOOD/openood/postprocessors/cutpaste_postprocessor.py new file mode 100644 index 0000000000000000000000000000000000000000..85a8ee88f3feb1e8d5453cf9ad57dd1f123529df --- /dev/null +++ b/OpenOOD/openood/postprocessors/cutpaste_postprocessor.py @@ -0,0 +1,116 @@ +from __future__ import division, print_function + +from typing import Any + +import numpy as np +import torch +import torch.nn as nn +from sklearn.covariance import LedoitWolf as LW +from torch.utils.data import DataLoader +from tqdm import tqdm + + +class CutPastePostprocessor: + def __init__(self, config): + self.config = config + + def setup(self, net: nn.Module, id_loader_dict, ood_loader_dict): + # get train embeds + train_loader = id_loader_dict['train'] + train_embed = [] + train_dataiter = iter(train_loader) + with torch.no_grad(): + for train_step in tqdm(range(1, + len(train_dataiter) + 1), + desc='Train embeds'): + batch = next(train_dataiter) + data = torch.cat(batch['data'], 0) + if (np.array(data).shape[0] == 4): + data = data.numpy().tolist() + data = data[0:len(data) // 2] + data = torch.Tensor(data) + data = data.cuda() + embed, logit = net(data) + train_embed.append(embed.cuda()) + train_embeds = torch.cat(train_embed) + self.train_embeds = torch.nn.functional.normalize(train_embeds, + p=2, + dim=1) + + @torch.no_grad() + def postprocess(self, net: nn.Module, data: Any): + # get embeds + embeds = [] + embed, output = net(data) + embeds.append(embed.cuda()) + embeds = torch.cat(embeds) + embeds = torch.nn.functional.normalize(embeds, p=2, dim=1) + score = torch.softmax(output, dim=1) + conf, pred = torch.max(score, dim=1) + # compute distances + density = GaussianDensityTorch() + density.fit(self.train_embeds) + distances = density.predict(embeds) + distances = 200 - distances + return pred, distances + + def inference(self, net: nn.Module, data_loader: DataLoader): + pred_list, conf_list, label_list = [], [], [] + for batch in data_loader: + data = torch.cat(batch['data'], 0) + data = data.cuda() + # label = torch.arange(2) + label = torch.tensor([0, -1]) + label = label.repeat_interleave(len(batch['data'][0])).cuda() + pred, conf = self.postprocess(net, data) + for idx in range(len(data)): + pred_list.append(pred[idx].cpu().tolist()) + conf_list.append(conf[idx].cpu().tolist()) + label_list.append(label[idx].cpu().tolist()) + + # convert values into numpy array + pred_list = np.array(pred_list, dtype=int) + conf_list = np.array(conf_list) + label_list = np.array(label_list, dtype=int) + + return pred_list, conf_list, label_list + + +class Density(object): + def fit(self, embeddings): + raise NotImplementedError + + def predict(self, embeddings): + raise NotImplementedError + + +class GaussianDensityTorch(Density): + def fit(self, embeddings): + self.mean = torch.mean(embeddings, axis=0) + self.inv_cov = torch.Tensor(LW().fit(embeddings.cpu()).precision_, + device='cpu') + + def predict(self, embeddings): + distances = self.mahalanobis_distance(embeddings, self.mean, + self.inv_cov) + return distances + + @staticmethod + def mahalanobis_distance(values: torch.Tensor, mean: torch.Tensor, + inv_covariance: torch.Tensor) -> torch.Tensor: + + assert values.dim() == 2 + assert 1 <= mean.dim() <= 2 + assert len(inv_covariance.shape) == 2 + assert values.shape[1] == mean.shape[-1] + assert mean.shape[-1] == inv_covariance.shape[0] + assert inv_covariance.shape[0] == inv_covariance.shape[1] + + if mean.dim() == 1: # Distribution mean. + mean = mean.unsqueeze(0) + x_mu = values - mean # batch x features + # Same as dist = x_mu.t() * inv_covariance * x_mu batch wise + inv_covariance = inv_covariance.cuda() + dist = torch.einsum('im,mn,in->i', x_mu, inv_covariance, x_mu) + + return dist.sqrt() diff --git a/OpenOOD/openood/postprocessors/dice_postprocessor.py b/OpenOOD/openood/postprocessors/dice_postprocessor.py new file mode 100644 index 0000000000000000000000000000000000000000..ef340d086342e492035b9342467c0012286569e0 --- /dev/null +++ b/OpenOOD/openood/postprocessors/dice_postprocessor.py @@ -0,0 +1,66 @@ +from typing import Any + +import numpy as np +import torch +import torch.nn as nn +from tqdm import tqdm + +from .base_postprocessor import BasePostprocessor + +normalizer = lambda x: x / np.linalg.norm(x, axis=-1, keepdims=True) + 1e-10 + + +class DICEPostprocessor(BasePostprocessor): + def __init__(self, config): + super(DICEPostprocessor, self).__init__(config) + self.args = self.config.postprocessor.postprocessor_args + self.p = self.args.p + self.mean_act = None + self.masked_w = None + self.args_dict = self.config.postprocessor.postprocessor_sweep + self.setup_flag = False + + def setup(self, net: nn.Module, id_loader_dict, ood_loader_dict): + if not self.setup_flag: + activation_log = [] + net.eval() + with torch.no_grad(): + for batch in tqdm(id_loader_dict['train'], + desc='Setup: ', + position=0, + leave=True): + data = batch['data'].cuda() + data = data.float() + + _, feature = net(data, return_feature=True) + activation_log.append(feature.data.cpu().numpy()) + + activation_log = np.concatenate(activation_log, axis=0) + self.mean_act = activation_log.mean(0) + self.setup_flag = True + else: + pass + + def calculate_mask(self, w): + contrib = self.mean_act[None, :] * w.data.squeeze().cpu().numpy() + self.thresh = np.percentile(contrib, self.p) + mask = torch.Tensor((contrib > self.thresh)).cuda() + self.masked_w = w * mask + + @torch.no_grad() + def postprocess(self, net: nn.Module, data: Any): + fc_weight, fc_bias = net.get_fc() + if self.masked_w is None: + self.calculate_mask(torch.from_numpy(fc_weight).cuda()) + _, feature = net(data, return_feature=True) + vote = feature[:, None, :] * self.masked_w + output = vote.sum(2) + torch.from_numpy(fc_bias).cuda() + _, pred = torch.max(torch.softmax(output, dim=1), dim=1) + energyconf = torch.logsumexp(output.data.cpu(), dim=1) + return pred, energyconf + + def set_hyperparam(self, hyperparam: list): + self.p = hyperparam[0] + + def get_hyperparam(self): + return self.p diff --git a/OpenOOD/openood/postprocessors/draem_postprocessor.py b/OpenOOD/openood/postprocessors/draem_postprocessor.py new file mode 100644 index 0000000000000000000000000000000000000000..03830d9879db73de9af13261eb3e11e16ac3f124 --- /dev/null +++ b/OpenOOD/openood/postprocessors/draem_postprocessor.py @@ -0,0 +1,31 @@ +from typing import Any + +import numpy as np +import torch +import torch.nn as nn + +from .base_postprocessor import BasePostprocessor + + +class DRAEMPostprocessor(BasePostprocessor): + def __init__(self, config): + super(DRAEMPostprocessor, self).__init__(config) + + @torch.no_grad() + def postprocess(self, net: nn.Module, data: Any): + # forward + gray_rec = net['generative'](data) + joined_in = torch.cat((gray_rec.detach(), data), dim=1) + + out_mask = net['discriminative'](joined_in) + out_mask_sm = torch.softmax(out_mask, dim=1) + + # calculate image level scores + out_mask_averaged = torch.nn.functional.avg_pool2d( + out_mask_sm[:, 1:, :, :], 21, stride=1, + padding=21 // 2).cpu().detach().numpy() + + image_score = np.max(out_mask_averaged, axis=(1, 2, 3)) + + return -1 * torch.ones(data.shape[0]), torch.tensor( + [-image_score]).reshape((data.shape[0])) diff --git a/OpenOOD/openood/postprocessors/dropout_postprocessor.py b/OpenOOD/openood/postprocessors/dropout_postprocessor.py new file mode 100644 index 0000000000000000000000000000000000000000..f7ec4fa92e6fb7590f3e00805652e7dbe7ca992c --- /dev/null +++ b/OpenOOD/openood/postprocessors/dropout_postprocessor.py @@ -0,0 +1,24 @@ +from typing import Any + +import torch +from torch import nn + +from .base_postprocessor import BasePostprocessor + + +class DropoutPostProcessor(BasePostprocessor): + def __init__(self, config): + self.config = config + self.args = config.postprocessor.postprocessor_args + self.dropout_times = self.args.dropout_times + + @torch.no_grad() + def postprocess(self, net: nn.Module, data: Any): + logits_list = [net.forward(data) for i in range(self.dropout_times)] + logits_mean = torch.zeros_like(logits_list[0], dtype=torch.float32) + for i in range(self.dropout_times): + logits_mean += logits_list[i] + logits_mean /= self.dropout_times + score = torch.softmax(logits_mean, dim=1) + conf, pred = torch.max(score, dim=1) + return pred, conf diff --git a/OpenOOD/openood/postprocessors/dsvdd_postprocessor.py b/OpenOOD/openood/postprocessors/dsvdd_postprocessor.py new file mode 100644 index 0000000000000000000000000000000000000000..7c01e990c411c943cbe24e6c8fef4ee1142e857d --- /dev/null +++ b/OpenOOD/openood/postprocessors/dsvdd_postprocessor.py @@ -0,0 +1,36 @@ +from typing import Any + +import torch +import torch.nn as nn + +from openood.trainers.dsvdd_trainer import init_center_c + +from .base_postprocessor import BasePostprocessor + + +class DSVDDPostprocessor(BasePostprocessor): + def __init__(self, config): + super(DSVDDPostprocessor, self).__init__(config) + self.hyperpara = {} + + def setup(self, net: nn.Module, id_loader_dict, ood_loader_dict): + if self.config.c == 'None' and self.config.network.name != 'dcae': + self.c = init_center_c(id_loader_dict['train'], net) + else: + self.c = self.config.c + + @torch.no_grad() + def postprocess(self, net: nn.Module, data: Any): + outputs = net(data) + if self.config.network.name != 'dcae': + conf = torch.sum((outputs - self.c)**2, + dim=tuple(range(1, outputs.dim()))) + + # this is for pre-training the dcae network from the original paper + elif self.config.network.name == 'dcae': + conf = torch.sum((outputs - data)**2, + dim=tuple(range(1, outputs.dim()))) + else: + raise NotImplementedError + + return -1 * torch.ones(data.shape[0]), conf diff --git a/OpenOOD/openood/postprocessors/ebo_postprocessor.py b/OpenOOD/openood/postprocessors/ebo_postprocessor.py new file mode 100644 index 0000000000000000000000000000000000000000..cc0ef25f59ad7f8f1712ebffff9630f2c8422867 --- /dev/null +++ b/OpenOOD/openood/postprocessors/ebo_postprocessor.py @@ -0,0 +1,30 @@ +from typing import Any + +import torch +import torch.nn as nn + +from .base_postprocessor import BasePostprocessor + + +class EBOPostprocessor(BasePostprocessor): + def __init__(self, config): + super().__init__(config) + self.args = self.config.postprocessor.postprocessor_args + self.temperature = self.args.temperature + self.args_dict = self.config.postprocessor.postprocessor_sweep + + @torch.no_grad() + def postprocess(self, net: nn.Module, data: Any): + output = net(data) + score = torch.softmax(output, dim=1) + _, pred = torch.max(score, dim=1) + conf = self.temperature * torch.logsumexp(output / self.temperature, + dim=1) + return pred, conf + + def set_hyperparam(self, hyperparam:list): + self.temperature =hyperparam[0] + + def get_hyperparam(self): + return self.temperature + \ No newline at end of file diff --git a/OpenOOD/openood/postprocessors/ensemble_postprocessor.py b/OpenOOD/openood/postprocessors/ensemble_postprocessor.py new file mode 100644 index 0000000000000000000000000000000000000000..7a8786f73f9153e2b3f72a59181f760275fc4779 --- /dev/null +++ b/OpenOOD/openood/postprocessors/ensemble_postprocessor.py @@ -0,0 +1,52 @@ +import os.path as osp +from copy import deepcopy +from typing import Any + +import torch +from torch import nn + +from .base_postprocessor import BasePostprocessor + + +class EnsemblePostprocessor(BasePostprocessor): + def __init__(self, config): + super(EnsemblePostprocessor, self).__init__(config) + self.config = config + self.postprocess_config = config.postprocessor + self.postprocessor_args = self.postprocess_config.postprocessor_args + assert self.postprocessor_args.network_name == \ + self.config.network.name,\ + 'checkpoint network type and model type do not align!' + # get ensemble args + self.checkpoint_root = self.postprocessor_args.checkpoint_root + + # list of trained network checkpoints + self.checkpoints = self.postprocessor_args.checkpoints + # number of networks to esembel + self.num_networks = self.postprocessor_args.num_networks + # get networks + self.checkpoint_dirs = [ + osp.join(self.checkpoint_root, path, 'best.ckpt') + for path in self.checkpoints + ] + + def setup(self, net: nn.Module, id_loader_dict, ood_loader_dict): + self.networks = [deepcopy(net) for i in range(self.num_networks)] + for i in range(self.num_networks): + self.networks[i].load_state_dict(torch.load( + self.checkpoint_dirs[i]), + strict=False) + self.networks[i].eval() + + def postprocess(self, net: nn.Module, data: Any): + logits_list = [ + self.networks[i](data) for i in range(self.num_networks) + ] + logits_mean = torch.zeros_like(logits_list[0], dtype=torch.float32) + for i in range(self.num_networks): + logits_mean += logits_list[i] + logits_mean /= self.num_networks + + score = torch.softmax(logits_mean, dim=1) + conf, pred = torch.max(score, dim=1) + return pred, conf diff --git a/OpenOOD/openood/postprocessors/gen_postprocessor.py b/OpenOOD/openood/postprocessors/gen_postprocessor.py new file mode 100644 index 0000000000000000000000000000000000000000..5a1d010575e2aadec52cb96012d94037c512553a --- /dev/null +++ b/OpenOOD/openood/postprocessors/gen_postprocessor.py @@ -0,0 +1,38 @@ +from typing import Any + +import torch +import torch.nn as nn + +from .base_postprocessor import BasePostprocessor + + +class GENPostprocessor(BasePostprocessor): + def __init__(self, config): + super().__init__(config) + self.args = self.config.postprocessor.postprocessor_args + self.gamma = self.args.gamma + self.M = self.args.M + self.args_dict = self.config.postprocessor.postprocessor_sweep + + @torch.no_grad() + def postprocess(self, net: nn.Module, data: Any): + output = net(data) + score = torch.softmax(output, dim=1) + _, pred = torch.max(score, dim=1) + conf = self.generalized_entropy(score, self.gamma, self.M) + return pred, conf + + def set_hyperparam(self, hyperparam: list): + self.gamma = hyperparam[0] + self.M = hyperparam[1] + + def get_hyperparam(self): + return [self.gamma, self.M] + + def generalized_entropy(self, softmax_id_val, gamma=0.1, M=100): + probs = softmax_id_val + probs_sorted = torch.sort(probs, dim=1)[0][:, -M:] + scores = torch.sum(probs_sorted**gamma * (1 - probs_sorted)**(gamma), + dim=1) + + return -scores diff --git a/OpenOOD/openood/postprocessors/gmm_postprocessor.py b/OpenOOD/openood/postprocessors/gmm_postprocessor.py new file mode 100644 index 0000000000000000000000000000000000000000..d968dfdd14294cdc64d8462789dacb85a9655a77 --- /dev/null +++ b/OpenOOD/openood/postprocessors/gmm_postprocessor.py @@ -0,0 +1,200 @@ +from __future__ import print_function + +from typing import Any + +import numpy as np +import torch +import torch.nn as nn +from sklearn.mixture import GaussianMixture +from tqdm import tqdm + +from .base_postprocessor import BasePostprocessor +from .mds_ensemble_postprocessor import (process_feature_type, + reduce_feature_dim, tensor2list) + + +class GMMPostprocessor(BasePostprocessor): + def __init__(self, config): + self.config = config + self.postprocessor_args = config.postprocessor.postprocessor_args + self.feature_type_list = self.postprocessor_args.feature_type_list + self.reduce_dim_list = self.postprocessor_args.reduce_dim_list + self.num_clusters_list = self.postprocessor_args.num_clusters_list + self.alpha_list = self.postprocessor_args.alpha_list + + self.num_layer = len(self.feature_type_list) + self.feature_mean, self.feature_prec = None, None + self.component_weight_list, self.transform_matrix_list = None, None + + def setup(self, net: nn.Module, id_loader_dict, ood_loader_dict): + self.feature_mean, self.feature_prec, self.component_weight_list, \ + self.transform_matrix_list = get_GMM_stat(net, + id_loader_dict['train'], + self.num_clusters_list, + self.feature_type_list, + self.reduce_dim_list) + + def postprocess(self, net: nn.Module, data: Any): + for layer_index in range(self.num_layer): + pred, score = compute_GMM_score(net, + data, + self.feature_mean, + self.feature_prec, + self.component_weight_list, + self.transform_matrix_list, + layer_index, + self.feature_type_list, + return_pred=True) + if layer_index == 0: + score_list = score.view([-1, 1]) + else: + score_list = torch.cat((score_list, score.view([-1, 1])), 1) + alpha = torch.cuda.FloatTensor(self.alpha_list) + # import pdb; pdb.set_trace(); + # conf = torch.matmul(score_list, alpha) + conf = torch.matmul(torch.log(score_list + 1e-45), alpha) + return pred, conf + + +@torch.no_grad() +def get_GMM_stat(model, train_loader, num_clusters_list, feature_type_list, + reduce_dim_list): + """ Compute GMM. + Args: + model (nn.Module): pretrained model to extract features + train_loader (DataLoader): use all training data to perform GMM + num_clusters_list (list): number of clusters for each layer + feature_type_list (list): feature type for each layer + reduce_dim_list (list): dim-reduce method for each layer + + return: feature_mean: list of class mean + feature_prec: list of precisions + component_weight_list: list of component + transform_matrix_list: list of transform_matrix + """ + feature_mean_list, feature_prec_list = [], [] + component_weight_list, transform_matrix_list = [], [] + num_layer = len(num_clusters_list) + feature_all = [None for x in range(num_layer)] + label_list = [] + # collect features + for batch in tqdm(train_loader, desc='Compute GMM Stats [Collecting]'): + data = batch['data_aux'].cuda() + label = batch['label'] + _, feature_list = model(data, return_feature_list=True) + label_list.extend(tensor2list(label)) + for layer_idx in range(num_layer): + feature_type = feature_type_list[layer_idx] + feature_processed = process_feature_type(feature_list[layer_idx], + feature_type) + if isinstance(feature_all[layer_idx], type(None)): + feature_all[layer_idx] = tensor2list(feature_processed) + else: + feature_all[layer_idx].extend(tensor2list(feature_processed)) + label_list = np.array(label_list) + # reduce feature dim and perform gmm estimation + for layer_idx in tqdm(range(num_layer), + desc='Compute GMM Stats [Estimating]'): + feature_sub = np.array(feature_all[layer_idx]) + transform_matrix = reduce_feature_dim(feature_sub, label_list, + reduce_dim_list[layer_idx]) + feature_sub = np.dot(feature_sub, transform_matrix) + # GMM estimation + gm = GaussianMixture( + n_components=num_clusters_list[layer_idx], + random_state=0, + covariance_type='tied', + ).fit(feature_sub) + feature_mean = gm.means_ + feature_prec = gm.precisions_ + component_weight = gm.weights_ + + feature_mean_list.append(torch.Tensor(feature_mean).cuda()) + feature_prec_list.append(torch.Tensor(feature_prec).cuda()) + component_weight_list.append(torch.Tensor(component_weight).cuda()) + transform_matrix_list.append(torch.Tensor(transform_matrix).cuda()) + + return feature_mean_list, feature_prec_list, \ + component_weight_list, transform_matrix_list + + +def compute_GMM_score(model, + data, + feature_mean, + feature_prec, + component_weight, + transform_matrix, + layer_idx, + feature_type_list, + return_pred=False): + """ Compute GMM. + Args: + model (nn.Module): pretrained model to extract features + data (DataLoader): input one training batch + feature_mean (list): a list of torch.cuda.Tensor() + feature_prec (list): a list of torch.cuda.Tensor() + component_weight (list): a list of torch.cuda.Tensor() + transform_matrix (list): a list of torch.cuda.Tensor() + layer_idx (int): index of layer in interest + feature_type_list (list): a list of strings to indicate feature type + return_pred (bool): return prediction and confidence, or only conf. + + return: + pred (torch.cuda.Tensor): + prob (torch.cuda.Tensor): + """ + # extract features + pred_list, feature_list = model(data, return_feature_list=True) + pred = torch.argmax(pred_list, dim=1) + feature_list = process_feature_type(feature_list[layer_idx], + feature_type_list[layer_idx]) + feature_list = torch.mm(feature_list, transform_matrix[layer_idx]) + # compute prob + for cluster_idx in range(len(feature_mean[layer_idx])): + zero_f = feature_list - feature_mean[layer_idx][cluster_idx] + term_gau = -0.5 * torch.mm(torch.mm(zero_f, feature_prec[layer_idx]), + zero_f.t()).diag() + prob_gau = torch.exp(term_gau) + if cluster_idx == 0: + prob_matrix = prob_gau.view([-1, 1]) + else: + prob_matrix = torch.cat((prob_matrix, prob_gau.view(-1, 1)), 1) + + prob = torch.mm(prob_matrix, component_weight[layer_idx].view(-1, 1)) + + if return_pred: + return pred, prob + else: + return prob + + +def compute_single_GMM_score(model, + data, + feature_mean, + feature_prec, + component_weight, + transform_matrix, + layer_idx, + feature_type_list, + return_pred=False): + # extract features + pred_list, feature_list = model(data, return_feature_list=True) + pred = torch.argmax(pred_list, dim=1) + feature_list = process_feature_type(feature_list[layer_idx], + feature_type_list) + feature_list = torch.mm(feature_list, transform_matrix) + # compute prob + for cluster_idx in range(len(feature_mean)): + zero_f = feature_list - feature_mean[cluster_idx] + term_gau = -0.5 * torch.mm(torch.mm(zero_f, feature_prec), + zero_f.t()).diag() + prob_gau = torch.exp(term_gau) + if cluster_idx == 0: + prob_matrix = prob_gau.view([-1, 1]) + else: + prob_matrix = torch.cat((prob_matrix, prob_gau.view(-1, 1)), 1) + prob = torch.mm(prob_matrix, component_weight.view(-1, 1)) + if return_pred: + return pred, prob + else: + return prob diff --git a/OpenOOD/openood/postprocessors/godin_postprocessor.py b/OpenOOD/openood/postprocessors/godin_postprocessor.py new file mode 100644 index 0000000000000000000000000000000000000000..3db4e7c8091f88dfbe5ee044d061d2886631e12f --- /dev/null +++ b/OpenOOD/openood/postprocessors/godin_postprocessor.py @@ -0,0 +1,55 @@ +from typing import Any + +import torch +import torch.nn as nn + +from .base_postprocessor import BasePostprocessor +from openood.preprocessors.transform import normalization_dict + + +class GodinPostprocessor(BasePostprocessor): + def __init__(self, config): + super(GodinPostprocessor, self).__init__(config) + self.args = self.config.postprocessor.postprocessor_args + + self.score_func = self.args.score_func + self.noise_magnitude = self.args.noise_magnitude + try: + self.input_std = normalization_dict[self.config.dataset.name][1] + except KeyError: + self.input_std = [0.5, 0.5, 0.5] + + def postprocess(self, net: nn.Module, data: Any): + data.requires_grad = True + output = net(data, inference=True) + + # Calculating the perturbation we need to add, that is, + # the sign of gradient of cross entropy loss w.r.t. input + max_scores, _ = torch.max(output, dim=1) + max_scores.backward(torch.ones(len(max_scores)).cuda()) + + # Normalizing the gradient to binary in {0, 1} + gradient = torch.ge(data.grad.detach(), 0) + gradient = (gradient.float() - 0.5) * 2 + + # Scaling values taken from original code + gradient[:, 0] = (gradient[:, 0]) / self.input_std[0] + gradient[:, 1] = (gradient[:, 1]) / self.input_std[1] + gradient[:, 2] = (gradient[:, 2]) / self.input_std[2] + + # Adding small perturbations to images + tempInputs = torch.add(data.detach(), + gradient, + alpha=self.noise_magnitude) + + # calculate score + output = net(tempInputs, inference=True, score_func=self.score_func) + + # Calculating the confidence after adding perturbations + nnOutput = output.detach() + nnOutput = nnOutput - nnOutput.max(dim=1, keepdims=True).values + nnOutput = nnOutput.exp() / nnOutput.exp().sum(dim=1, keepdims=True) + + conf, pred = nnOutput.max(dim=1) + + return pred, conf diff --git a/OpenOOD/openood/postprocessors/gradnorm_postprocessor.py b/OpenOOD/openood/postprocessors/gradnorm_postprocessor.py new file mode 100644 index 0000000000000000000000000000000000000000..1f17ec079470b32091820b4d74aec979aef6e4cc --- /dev/null +++ b/OpenOOD/openood/postprocessors/gradnorm_postprocessor.py @@ -0,0 +1,49 @@ +from typing import Any + +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F + +from .base_postprocessor import BasePostprocessor +from .info import num_classes_dict + + +class GradNormPostprocessor(BasePostprocessor): + def __init__(self, config): + super().__init__(config) + self.args = self.config.postprocessor.postprocessor_args + self.num_classes = num_classes_dict[self.config.dataset.name] + + def gradnorm(self, x, w, b): + fc = torch.nn.Linear(*w.shape[::-1]) + fc.weight.data[...] = torch.from_numpy(w) + fc.bias.data[...] = torch.from_numpy(b) + fc.cuda() + + targets = torch.ones((1, self.num_classes)).cuda() + + confs = [] + for i in x: + fc.zero_grad() + loss = torch.mean( + torch.sum(-targets * F.log_softmax(fc(i[None]), dim=-1), + dim=-1)) + loss.backward() + layer_grad_norm = torch.sum(torch.abs( + fc.weight.grad.data)).cpu().numpy() + confs.append(layer_grad_norm) + + return np.array(confs) + + def setup(self, net: nn.Module, id_loader_dict, ood_loader_dict): + pass + + @torch.no_grad() + def postprocess(self, net: nn.Module, data: Any): + w, b = net.get_fc() + logits, features = net.forward(data, return_feature=True) + with torch.enable_grad(): + scores = self.gradnorm(features, w, b) + _, preds = torch.max(logits, dim=1) + return preds, torch.from_numpy(scores) diff --git a/OpenOOD/openood/postprocessors/gram_postprocessor.py b/OpenOOD/openood/postprocessors/gram_postprocessor.py new file mode 100644 index 0000000000000000000000000000000000000000..669bd9771a124f2cc4fd291f5f30b535463669f6 --- /dev/null +++ b/OpenOOD/openood/postprocessors/gram_postprocessor.py @@ -0,0 +1,176 @@ +from __future__ import division, print_function + +from typing import Any + +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F +from tqdm import tqdm + +from .base_postprocessor import BasePostprocessor +from .info import num_classes_dict + + +class GRAMPostprocessor(BasePostprocessor): + def __init__(self, config): + self.config = config + self.postprocessor_args = config.postprocessor.postprocessor_args + self.num_classes = num_classes_dict[self.config.dataset.name] + self.powers = self.postprocessor_args.powers + + self.feature_min, self.feature_max = None, None + self.args_dict = self.config.postprocessor.postprocessor_sweep + self.setup_flag = False + + def setup(self, net: nn.Module, id_loader_dict, ood_loader_dict): + if not self.setup_flag: + self.feature_min, self.feature_max = sample_estimator( + net, id_loader_dict['train'], self.num_classes, self.powers) + self.setup_flag = True + else: + pass + + def postprocess(self, net: nn.Module, data: Any): + preds, deviations = get_deviations(net, data, self.feature_min, + self.feature_max, self.num_classes, + self.powers) + return preds, deviations + + def set_hyperparam(self, hyperparam: list): + self.powers = hyperparam[0] + + def get_hyperparam(self): + return self.powers + + +def tensor2list(x): + return x.data.cuda().tolist() + + +@torch.no_grad() +def sample_estimator(model, train_loader, num_classes, powers): + + model.eval() + + num_layer = 5 # 4 for lenet + num_poles_list = powers + num_poles = len(num_poles_list) + feature_class = [[[None for x in range(num_poles)] + for y in range(num_layer)] for z in range(num_classes)] + label_list = [] + mins = [[[None for x in range(num_poles)] for y in range(num_layer)] + for z in range(num_classes)] + maxs = [[[None for x in range(num_poles)] for y in range(num_layer)] + for z in range(num_classes)] + + # collect features and compute gram metrix + for batch in tqdm(train_loader, desc='Compute min/max'): + data = batch['data'].cuda() + label = batch['label'].cuda() + + leaf_labels = torch.nonzero(label < 999, as_tuple=False) + data = torch.index_select(data, 0, leaf_labels.squeeze()) + label = torch.index_select(label, 0, leaf_labels.squeeze()).cpu() + + + _, feature_list = model(data, return_feature_list=True) + label_list = tensor2list(label) + + + + for layer_idx in range(num_layer): + + for pole_idx, p in enumerate(num_poles_list): + temp = feature_list[layer_idx].detach() + + temp = temp**p + temp = temp.reshape(temp.shape[0], temp.shape[1], -1) + temp = ((torch.matmul(temp, + temp.transpose(dim0=2, + dim1=1)))).sum(dim=2) + temp = (temp.sign() * torch.abs(temp)**(1 / p)).reshape( + temp.shape[0], -1) + + temp = tensor2list(temp) + for feature, label in zip(temp, label_list): + if isinstance(feature_class[label][layer_idx][pole_idx], + type(None)): + feature_class[label][layer_idx][pole_idx] = feature + else: + feature_class[label][layer_idx][pole_idx].extend( + feature) + # compute mins/maxs + for label in range(num_classes): + for layer_idx in range(num_layer): + for poles_idx in range(num_poles): + feature = torch.tensor( + np.array(feature_class[label][layer_idx][poles_idx])) + current_min = feature.min(dim=0, keepdim=True)[0] + current_max = feature.max(dim=0, keepdim=True)[0] + + if mins[label][layer_idx][poles_idx] is None: + mins[label][layer_idx][poles_idx] = current_min + maxs[label][layer_idx][poles_idx] = current_max + else: + mins[label][layer_idx][poles_idx] = torch.min( + current_min, mins[label][layer_idx][poles_idx]) + maxs[label][layer_idx][poles_idx] = torch.max( + current_min, maxs[label][layer_idx][poles_idx]) + + return mins, maxs + + +def get_deviations(model, data, mins, maxs, num_classes, powers): + model.eval() + + num_layer = 5 # 4 for lenet + num_poles_list = powers + exist = 1 + pred_list = [] + dev = [0 for x in range(data.shape[0])] + + # get predictions + logits, feature_list = model(data, return_feature_list=True) + confs = F.softmax(logits, dim=1).cpu().detach().numpy() + preds = np.argmax(confs, axis=1) + predsList = preds.tolist() + preds = torch.tensor(preds) + + for pred in predsList: + exist = 1 + if len(pred_list) == 0: + pred_list.extend([pred]) + else: + for pred_now in pred_list: + if pred_now == pred: + exist = 0 + if exist == 1: + pred_list.extend([pred]) + + # compute sample level deviation + for layer_idx in range(num_layer): + for pole_idx, p in enumerate(num_poles_list): + # get gram metirx + temp = feature_list[layer_idx].detach() + temp = temp**p + temp = temp.reshape(temp.shape[0], temp.shape[1], -1) + temp = ((torch.matmul(temp, temp.transpose(dim0=2, + dim1=1)))).sum(dim=2) + temp = (temp.sign() * torch.abs(temp)**(1 / p)).reshape( + temp.shape[0], -1) + temp = tensor2list(temp) + + # compute the deviations with train data + for idx in range(len(temp)): + dev[idx] += (F.relu(mins[preds[idx]][layer_idx][pole_idx] - + sum(temp[idx])) / + torch.abs(mins[preds[idx]][layer_idx][pole_idx] + + 10**-6)).sum() + dev[idx] += (F.relu( + sum(temp[idx]) - maxs[preds[idx]][layer_idx][pole_idx]) / + torch.abs(maxs[preds[idx]][layer_idx][pole_idx] + + 10**-6)).sum() + conf = [i / 50 for i in dev] + + return preds, torch.tensor(conf) diff --git a/OpenOOD/openood/postprocessors/info.py b/OpenOOD/openood/postprocessors/info.py new file mode 100644 index 0000000000000000000000000000000000000000..4dd5a256df9d98e7a38fc569e32a7f267a2865a7 --- /dev/null +++ b/OpenOOD/openood/postprocessors/info.py @@ -0,0 +1,7 @@ +num_classes_dict = { + 'cifar10': 10, + 'cifar100': 100, + 'imagenet200': 200, + 'imagenet': 1000, + 'bronze2':11, +} diff --git a/OpenOOD/openood/postprocessors/kl_matching_postprocessor.py b/OpenOOD/openood/postprocessors/kl_matching_postprocessor.py new file mode 100644 index 0000000000000000000000000000000000000000..28efab95154543566fd61b32b0f2ba23f2e75f4d --- /dev/null +++ b/OpenOOD/openood/postprocessors/kl_matching_postprocessor.py @@ -0,0 +1,67 @@ +from typing import Any + +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F +from sklearn.metrics import pairwise_distances_argmin_min +import scipy +from tqdm import tqdm + +from .base_postprocessor import BasePostprocessor +from .info import num_classes_dict + + +class KLMatchingPostprocessor(BasePostprocessor): + def __init__(self, config): + super().__init__(config) + self.num_classes = num_classes_dict[self.config.dataset.name] + self.setup_flag = False + + def kl(self, p, q): + return scipy.stats.entropy(p, q) + + def setup(self, net: nn.Module, id_loader_dict, ood_loader_dict): + if not self.setup_flag: + net.eval() + + print('Extracting id validation softmax posterior distributions') + all_softmax = [] + preds = [] + with torch.no_grad(): + for batch in tqdm(id_loader_dict['val'], + desc='Setup: ', + position=0, + leave=True): + data = batch['data'].cuda() + logits = net(data) + all_softmax.append(F.softmax(logits, 1).cpu()) + preds.append(logits.argmax(1).cpu()) + + all_softmax = torch.cat(all_softmax) + preds = torch.cat(preds) + + self.mean_softmax_val = [] + for i in tqdm(range(self.num_classes)): + # if there are no validation samples + # for this category + if torch.sum(preds.eq(i).float()) == 0: + temp = np.zeros((self.num_classes, )) + temp[i] = 1 + self.mean_softmax_val.append(temp) + else: + self.mean_softmax_val.append( + all_softmax[preds.eq(i)].mean(0).numpy()) + + self.setup_flag = True + else: + pass + + @torch.no_grad() + def postprocess(self, net: nn.Module, data: Any): + logits = net(data) + preds = logits.argmax(1) + softmax = F.softmax(logits, 1).cpu().numpy() + scores = -pairwise_distances_argmin_min( + softmax, np.array(self.mean_softmax_val), metric=self.kl)[1] + return preds, torch.from_numpy(scores) diff --git a/OpenOOD/openood/postprocessors/knn_postprocessor.py b/OpenOOD/openood/postprocessors/knn_postprocessor.py new file mode 100644 index 0000000000000000000000000000000000000000..766949bd689b1c647af775e54bb68c957d0ac2ba --- /dev/null +++ b/OpenOOD/openood/postprocessors/knn_postprocessor.py @@ -0,0 +1,62 @@ +from typing import Any + +import faiss +import numpy as np +import torch +import torch.nn as nn +from tqdm import tqdm + +from .base_postprocessor import BasePostprocessor + +normalizer = lambda x: x / np.linalg.norm(x, axis=-1, keepdims=True) + 1e-10 + + +class KNNPostprocessor(BasePostprocessor): + def __init__(self, config): + super(KNNPostprocessor, self).__init__(config) + self.args = self.config.postprocessor.postprocessor_args + self.K = self.args.K + self.activation_log = None + self.args_dict = self.config.postprocessor.postprocessor_sweep + self.setup_flag = False + + def setup(self, net: nn.Module, id_loader_dict, ood_loader_dict): + if not self.setup_flag: + activation_log = [] + net.eval() + with torch.no_grad(): + for batch in tqdm(id_loader_dict['train'], + desc='Setup: ', + position=0, + leave=True): + data = batch['data'].cuda() + data = data.float() + + _, feature = net(data, return_feature=True) + activation_log.append( + normalizer(feature.data.cpu().numpy())) + + self.activation_log = np.concatenate(activation_log, axis=0) + self.index = faiss.IndexFlatL2(feature.shape[1]) + self.index.add(self.activation_log) + self.setup_flag = True + else: + pass + + @torch.no_grad() + def postprocess(self, net: nn.Module, data: Any): + output, feature = net(data, return_feature=True) + feature_normed = normalizer(feature.data.cpu().numpy()) + D, _ = self.index.search( + feature_normed, + self.K, + ) + kth_dist = -D[:, -1] + _, pred = torch.max(torch.softmax(output, dim=1), dim=1) + return pred, torch.from_numpy(kth_dist) + + def set_hyperparam(self, hyperparam: list): + self.K = hyperparam[0] + + def get_hyperparam(self): + return self.K diff --git a/OpenOOD/openood/postprocessors/maxlogit_postprocessor.py b/OpenOOD/openood/postprocessors/maxlogit_postprocessor.py new file mode 100644 index 0000000000000000000000000000000000000000..3944bd36b71e4fadddbd96700d01503693568e01 --- /dev/null +++ b/OpenOOD/openood/postprocessors/maxlogit_postprocessor.py @@ -0,0 +1,20 @@ +from typing import Any + +import numpy as np +import torch +import torch.nn as nn +from tqdm import tqdm + +from .base_postprocessor import BasePostprocessor + + +class MaxLogitPostprocessor(BasePostprocessor): + def __init__(self, config): + super().__init__(config) + self.args = self.config.postprocessor.postprocessor_args + + @torch.no_grad() + def postprocess(self, net: nn.Module, data: Any): + output = net(data) + conf, pred = torch.max(output, dim=1) + return pred, conf diff --git a/OpenOOD/openood/postprocessors/mcd_postprocessor.py b/OpenOOD/openood/postprocessors/mcd_postprocessor.py new file mode 100644 index 0000000000000000000000000000000000000000..56be5003bae4f0754bbdf59bab8ff50c0cd6425d --- /dev/null +++ b/OpenOOD/openood/postprocessors/mcd_postprocessor.py @@ -0,0 +1,17 @@ +from typing import Any + +import torch +import torch.nn as nn + +from .base_postprocessor import BasePostprocessor + + +class MCDPostprocessor(BasePostprocessor): + @torch.no_grad() + def postprocess(self, net: nn.Module, data: Any): + logits1, logits2 = net(data, return_double=True) + score1 = torch.softmax(logits1, dim=1) + score2 = torch.softmax(logits2, dim=1) + conf = -torch.sum(torch.abs(score1 - score2), dim=1) + _, pred = torch.max(score1, dim=1) + return pred, conf diff --git a/OpenOOD/openood/postprocessors/mcm_postprocessor.py b/OpenOOD/openood/postprocessors/mcm_postprocessor.py new file mode 100644 index 0000000000000000000000000000000000000000..b48a0b6404bcde1cbbfd7c249d809051fb2e7c1e --- /dev/null +++ b/OpenOOD/openood/postprocessors/mcm_postprocessor.py @@ -0,0 +1,28 @@ +from typing import Any + +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F + +from .base_postprocessor import BasePostprocessor + + +class MCMPostprocessor(BasePostprocessor): + def __init__(self, config): + super(MCMPostprocessor, self).__init__(config) + self.args = self.config.postprocessor.postprocessor_args + self.tau = self.args.tau + + @torch.no_grad() + def postprocess(self, net: nn.Module, data: Any): + output = net(data) + score = torch.softmax(output / self.tau, dim=1) + conf, pred = torch.max(score, dim=1) + return pred, conf + + def set_hyperparam(self, hyperparam: list): + self.tau = hyperparam[0] + + def get_hyperparam(self): + return self.tau diff --git a/OpenOOD/openood/postprocessors/mds_ensemble_postprocessor.py b/OpenOOD/openood/postprocessors/mds_ensemble_postprocessor.py new file mode 100644 index 0000000000000000000000000000000000000000..15d13ea2007b83f5439dc7a4181ca4003435cb18 --- /dev/null +++ b/OpenOOD/openood/postprocessors/mds_ensemble_postprocessor.py @@ -0,0 +1,521 @@ +from typing import Any + +import numpy as np +import torch +import torch.nn as nn +from scipy import linalg +from sklearn.covariance import (empirical_covariance, ledoit_wolf, + shrunk_covariance) +from sklearn.decomposition import PCA +from sklearn.discriminant_analysis import LinearDiscriminantAnalysis +from sklearn.linear_model import LogisticRegressionCV +from sklearn.preprocessing import StandardScaler +from torch.autograd import Variable +from tqdm import tqdm + +from .base_postprocessor import BasePostprocessor +from .info import num_classes_dict + + +class MDSEnsemblePostprocessor(BasePostprocessor): + def __init__(self, config): + self.config = config + self.postprocessor_args = config.postprocessor.postprocessor_args + self.magnitude = self.postprocessor_args.noise + self.feature_type_list = self.postprocessor_args.feature_type_list + self.reduce_dim_list = self.postprocessor_args.reduce_dim_list + + self.num_classes = num_classes_dict[self.config.dataset.name] + self.num_layer = len(self.feature_type_list) + + self.feature_mean, self.feature_prec = None, None + self.alpha_list = None + self.args_dict = self.config.postprocessor.postprocessor_sweep + self.setup_flag = False + + def setup(self, net: nn.Module, id_loader_dict, ood_loader_dict): + if not self.setup_flag: + # step 1: estimate initial mean and variance from training set + self.feature_mean, self.feature_prec, self.transform_matrix = \ + get_MDS_stat(net, id_loader_dict['train'], self.num_classes, + self.feature_type_list, self.reduce_dim_list) + + # step 2: input process and hyperparam searching for alpha + if self.postprocessor_args.alpha_list: + print('\n Load predefined alpha list...') + self.alpha_list = self.postprocessor_args.alpha_list + else: + print('\n Searching for optimal alpha list...') + # get in-distribution scores + for layer_index in range(self.num_layer): + M_in = get_Mahalanobis_scores( + net, id_loader_dict['val'], self.num_classes, + self.feature_mean, self.feature_prec, + self.transform_matrix, layer_index, + self.feature_type_list, self.magnitude) + M_in = np.asarray(M_in, dtype=np.float32) + if layer_index == 0: + Mahalanobis_in = M_in.reshape((M_in.shape[0], -1)) + else: + Mahalanobis_in = np.concatenate( + (Mahalanobis_in, M_in.reshape( + (M_in.shape[0], -1))), + axis=1) + # get out-of-distribution scores + for layer_index in range(self.num_layer): + M_out = get_Mahalanobis_scores( + net, ood_loader_dict['val'], self.num_classes, + self.feature_mean, self.feature_prec, + self.transform_matrix, layer_index, + self.feature_type_list, self.magnitude) + M_out = np.asarray(M_out, dtype=np.float32) + if layer_index == 0: + Mahalanobis_out = M_out.reshape((M_out.shape[0], -1)) + else: + Mahalanobis_out = np.concatenate( + (Mahalanobis_out, + M_out.reshape((M_out.shape[0], -1))), + axis=1) + Mahalanobis_in = np.asarray(Mahalanobis_in, dtype=np.float32) + Mahalanobis_out = np.asarray(Mahalanobis_out, dtype=np.float32) + + # logistic regression for optimal alpha + self.alpha_list = alpha_selector(Mahalanobis_in, + Mahalanobis_out) + self.setup_flag = True + else: + pass + + def postprocess(self, net: nn.Module, data: Any): + for layer_index in range(self.num_layer): + + pred, score = compute_Mahalanobis_score(net, + Variable( + data, + requires_grad=True), + self.num_classes, + self.feature_mean, + self.feature_prec, + self.transform_matrix, + layer_index, + self.feature_type_list, + self.magnitude, + return_pred=True) + if layer_index == 0: + score_list = score.view([-1, 1]) + else: + score_list = torch.cat((score_list, score.view([-1, 1])), 1) + alpha = torch.cuda.FloatTensor(self.alpha_list) + conf = torch.matmul(score_list, alpha) + return pred, conf + + def set_hyperparam(self, hyperparam: list): + self.magnitude = hyperparam[0] + + def get_hyperparam(self): + return self.magnitude + + +def tensor2list(x): + return x.data.cpu().tolist() + + +def get_torch_feature_stat(feature, only_mean=False): + feature = feature.view([feature.size(0), feature.size(1), -1]) + feature_mean = torch.mean(feature, dim=-1) + feature_var = torch.var(feature, dim=-1) + if feature.size(-2) * feature.size(-1) == 1 or only_mean: + # [N, C, 1, 1] does not need variance for kernel + feature_stat = feature_mean + else: + feature_stat = torch.cat((feature_mean, feature_var), 1) + return feature_stat + + +def process_feature_type(feature_temp, feature_type): + if feature_type == 'flat': + feature_temp = feature_temp.view([feature_temp.size(0), -1]) + elif feature_type == 'stat': + feature_temp = get_torch_feature_stat(feature_temp) + elif feature_type == 'mean': + feature_temp = get_torch_feature_stat(feature_temp, only_mean=True) + else: + raise ValueError('Unknown feature type') + return feature_temp + + +def reduce_feature_dim(feature_list_full, label_list_full, feature_process): + if feature_process == 'none': + transform_matrix = np.eye(feature_list_full.shape[1]) + else: + feature_process, kept_dim = feature_process.split('_') + kept_dim = int(kept_dim) + if feature_process == 'capca': + lda = InverseLDA(solver='eigen') + lda.fit(feature_list_full, label_list_full) + transform_matrix = lda.scalings_[:, :kept_dim] + elif feature_process == 'pca': + pca = PCA(n_components=kept_dim) + pca.fit(feature_list_full) + transform_matrix = pca.components_.T + elif feature_process == 'lda': + lda = LinearDiscriminantAnalysis(solver='eigen') + lda.fit(feature_list_full, label_list_full) + transform_matrix = lda.scalings_[:, :kept_dim] + else: + raise Exception('Unknown Process Type') + return transform_matrix + + +@torch.no_grad() +def get_MDS_stat(model, train_loader, num_classes, feature_type_list, + reduce_dim_list): + """ Compute sample mean and precision (inverse of covariance) + return: sample_class_mean: list of class mean + precision: list of precisions + transform_matrix_list: list of transform_matrix + """ + import sklearn.covariance + group_lasso = sklearn.covariance.EmpiricalCovariance(assume_centered=False) + model.eval() + num_layer = len(feature_type_list) + feature_class = [[None for x in range(num_classes)] + for y in range(num_layer)] + feature_all = [None for x in range(num_layer)] + label_list = [] + # collect features + for batch in tqdm(train_loader, desc='Compute mean/std'): + data = batch['data_aux'].cuda() + label = batch['label'] + _, feature_list = model(data, return_feature_list=True) + label_list.extend(tensor2list(label)) + for layer_idx in range(num_layer): + feature_type = feature_type_list[layer_idx] + feature_processed = process_feature_type(feature_list[layer_idx], + feature_type) + if isinstance(feature_all[layer_idx], type(None)): + feature_all[layer_idx] = tensor2list(feature_processed) + else: + feature_all[layer_idx].extend(tensor2list(feature_processed)) + label_list = np.array(label_list) + # reduce feature dim and split by classes + transform_matrix_list = [] + for layer_idx in range(num_layer): + feature_sub = np.array(feature_all[layer_idx]) + transform_matrix = reduce_feature_dim(feature_sub, label_list, + reduce_dim_list[layer_idx]) + transform_matrix_list.append(torch.Tensor(transform_matrix).cuda()) + feature_sub = np.dot(feature_sub, transform_matrix) + for feature, label in zip(feature_sub, label_list): + feature = feature.reshape([-1, len(feature)]) + if isinstance(feature_class[layer_idx][label], type(None)): + feature_class[layer_idx][label] = feature + else: + feature_class[layer_idx][label] = np.concatenate( + (feature_class[layer_idx][label], feature), axis=0) + # calculate feature mean + feature_mean_list = [[ + np.mean(feature_by_class, axis=0) + for feature_by_class in feature_by_layer + ] for feature_by_layer in feature_class] + + # calculate precision + precision_list = [] + for layer in range(num_layer): + X = [] + for k in range(num_classes): + X.append(feature_class[layer][k] - feature_mean_list[layer][k]) + X = np.concatenate(X, axis=0) + # find inverse + group_lasso.fit(X) + precision = group_lasso.precision_ + precision_list.append(precision) + + # put mean and precision to cuda + feature_mean_list = [torch.Tensor(i).cuda() for i in feature_mean_list] + precision_list = [torch.Tensor(p).cuda() for p in precision_list] + + return feature_mean_list, precision_list, transform_matrix_list + + +def get_Mahalanobis_scores(model, test_loader, num_classes, sample_mean, + precision, transform_matrix, layer_index, + feature_type_list, magnitude): + ''' + Compute the proposed Mahalanobis confidence score on input dataset + return: Mahalanobis score from layer_index + ''' + model.eval() + Mahalanobis = [] + for batch in tqdm(test_loader, + desc=f'{test_loader.dataset.name}_layer{layer_index}'): + data = batch['data'].cuda() + data = Variable(data, requires_grad=True) + noise_gaussian_score = compute_Mahalanobis_score( + model, data, num_classes, sample_mean, precision, transform_matrix, + layer_index, feature_type_list, magnitude) + Mahalanobis.extend(noise_gaussian_score.cpu().numpy()) + return Mahalanobis + + +def compute_Mahalanobis_score(model, + data, + num_classes, + sample_mean, + precision, + transform_matrix, + layer_index, + feature_type_list, + magnitude, + return_pred=False): + # extract features + _, out_features = model(data, return_feature_list=True) + out_features = process_feature_type(out_features[layer_index], + feature_type_list[layer_index]) + out_features = torch.mm(out_features, transform_matrix[layer_index]) + + # compute Mahalanobis score + gaussian_score = 0 + for i in range(num_classes): + batch_sample_mean = sample_mean[layer_index][i] + zero_f = out_features.data - batch_sample_mean + term_gau = -0.5 * torch.mm(torch.mm(zero_f, precision[layer_index]), + zero_f.t()).diag() + if i == 0: + gaussian_score = term_gau.view(-1, 1) + else: + gaussian_score = torch.cat((gaussian_score, term_gau.view(-1, 1)), + 1) + + # Input_processing + sample_pred = gaussian_score.max(1)[1] + batch_sample_mean = sample_mean[layer_index].index_select(0, sample_pred) + zero_f = out_features - Variable(batch_sample_mean) + pure_gau = -0.5 * torch.mm( + torch.mm(zero_f, Variable(precision[layer_index])), zero_f.t()).diag() + loss = torch.mean(-pure_gau) + loss.backward() + + gradient = torch.ge(data.grad.data, 0) + gradient = (gradient.float() - 0.5) * 2 + + # here we use the default value of 0.5 + gradient.index_copy_( + 1, + torch.LongTensor([0]).cuda(), + gradient.index_select(1, + torch.LongTensor([0]).cuda()) / 0.5) + gradient.index_copy_( + 1, + torch.LongTensor([1]).cuda(), + gradient.index_select(1, + torch.LongTensor([1]).cuda()) / 0.5) + gradient.index_copy_( + 1, + torch.LongTensor([2]).cuda(), + gradient.index_select(1, + torch.LongTensor([2]).cuda()) / 0.5) + tempInputs = torch.add( + data.data, gradient, + alpha=-magnitude) # updated input data with perturbation + + with torch.no_grad(): + _, noise_out_features = model(Variable(tempInputs), + return_feature_list=True) + noise_out_features = process_feature_type( + noise_out_features[layer_index], feature_type_list[layer_index]) + noise_out_features = torch.mm(noise_out_features, + transform_matrix[layer_index]) + + noise_gaussian_score = 0 + for i in range(num_classes): + batch_sample_mean = sample_mean[layer_index][i] + zero_f = noise_out_features.data - batch_sample_mean + term_gau = -0.5 * torch.mm(torch.mm(zero_f, precision[layer_index]), + zero_f.t()).diag() + if i == 0: + noise_gaussian_score = term_gau.view(-1, 1) + else: + noise_gaussian_score = torch.cat( + (noise_gaussian_score, term_gau.view(-1, 1)), 1) + + noise_gaussian_score, _ = torch.max(noise_gaussian_score, dim=1) + if return_pred: + return sample_pred, noise_gaussian_score + else: + return noise_gaussian_score + + +def alpha_selector(data_in, data_out): + label_in = np.ones(len(data_in)) + label_out = np.zeros(len(data_out)) + data = np.concatenate([data_in, data_out]) + label = np.concatenate([label_in, label_out]) + # skip the last-layer flattened feature (duplicated with the last feature) + lr = LogisticRegressionCV(n_jobs=-1).fit(data, label) + alpha_list = lr.coef_.reshape(-1) + print(f'Optimal Alpha List: {alpha_list}') + return alpha_list + + +def _cov(X, shrinkage=None, covariance_estimator=None): + """Estimate covariance matrix (using optional covariance_estimator). + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Input data. + shrinkage : {'empirical', 'auto'} or float, default=None + Shrinkage parameter, possible values: + - None or 'empirical': no shrinkage (default). + - 'auto': automatic shrinkage using the Ledoit-Wolf lemma. + - float between 0 and 1: fixed shrinkage parameter. + Shrinkage parameter is ignored if `covariance_estimator` + is not None. + covariance_estimator : estimator, default=None + If not None, `covariance_estimator` is used to estimate + the covariance matrices instead of relying on the empirical + covariance estimator (with potential shrinkage). + The object should have a fit method and a ``covariance_`` attribute + like the estimators in :mod:`sklearn.covariance``. + if None the shrinkage parameter drives the estimate. + .. versionadded:: 0.24 + Returns + ------- + s : ndarray of shape (n_features, n_features) + Estimated covariance matrix. + """ + if covariance_estimator is None: + shrinkage = 'empirical' if shrinkage is None else shrinkage + if isinstance(shrinkage, str): + if shrinkage == 'auto': + sc = StandardScaler() # standardize features + X = sc.fit_transform(X) + s = ledoit_wolf(X)[0] + # rescale + s = sc.scale_[:, np.newaxis] * s * sc.scale_[np.newaxis, :] + elif shrinkage == 'empirical': + s = empirical_covariance(X) + else: + raise ValueError('unknown shrinkage parameter') + elif isinstance(shrinkage, float) or isinstance(shrinkage, int): + if shrinkage < 0 or shrinkage > 1: + raise ValueError('shrinkage parameter must be between 0 and 1') + s = shrunk_covariance(empirical_covariance(X), shrinkage) + else: + raise TypeError('shrinkage must be a float or a string') + else: + if shrinkage is not None and shrinkage != 0: + raise ValueError('covariance_estimator and shrinkage parameters ' + 'are not None. Only one of the two can be set.') + covariance_estimator.fit(X) + if not hasattr(covariance_estimator, 'covariance_'): + raise ValueError('%s does not have a covariance_ attribute' % + covariance_estimator.__class__.__name__) + s = covariance_estimator.covariance_ + return s + + +def _class_means(X, y): + """Compute class means. + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Input data. + y : array-like of shape (n_samples,) or (n_samples, n_targets) + Target values. + Returns + ------- + means : array-like of shape (n_classes, n_features) + Class means. + """ + classes, y = np.unique(y, return_inverse=True) + cnt = np.bincount(y) + means = np.zeros(shape=(len(classes), X.shape[1])) + np.add.at(means, y, X) + means /= cnt[:, None] + return means + + +def _class_cov(X, y, priors, shrinkage=None, covariance_estimator=None): + """Compute weighted within-class covariance matrix. + The per-class covariance are weighted by the class priors. + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Input data. + y : array-like of shape (n_samples,) or (n_samples, n_targets) + Target values. + priors : array-like of shape (n_classes,) + Class priors. + shrinkage : 'auto' or float, default=None + Shrinkage parameter, possible values: + - None: no shrinkage (default). + - 'auto': automatic shrinkage using the Ledoit-Wolf lemma. + - float between 0 and 1: fixed shrinkage parameter. + Shrinkage parameter is ignored if `covariance_estimator` is not None. + covariance_estimator : estimator, default=None + If not None, `covariance_estimator` is used to estimate + the covariance matrices instead of relying the empirical + covariance estimator (with potential shrinkage). + The object should have a fit method and a ``covariance_`` attribute + like the estimators in sklearn.covariance. + If None, the shrinkage parameter drives the estimate. + .. versionadded:: 0.24 + Returns + ------- + cov : array-like of shape (n_features, n_features) + Weighted within-class covariance matrix + """ + classes = np.unique(y) + cov = np.zeros(shape=(X.shape[1], X.shape[1])) + for idx, group in enumerate(classes): + Xg = X[y == group, :] + cov += priors[idx] * np.atleast_2d( + _cov(Xg, shrinkage, covariance_estimator)) + return cov + + +class InverseLDA(LinearDiscriminantAnalysis): + def _solve_eigen(self, X, y, shrinkage): + """Eigenvalue solver. + The eigenvalue solver computes the optimal solution of the Rayleigh + coefficient (basically the ratio of between class scatter to within + class scatter). This solver supports both classification and + dimensionality reduction (with optional shrinkage). + Parameters + ---------- + X : array-like, shape (n_samples, n_features) + Training data. + y : array-like, shape (n_samples,) or (n_samples, n_targets) + Target values. + shrinkage : string or float, optional + Shrinkage parameter, possible values: + - None: no shrinkage (default). + - 'auto': automatic shrinkage using the Ledoit-Wolf lemma. + - float between 0 and 1: fixed shrinkage constant. + Notes + ----- + This solver is based on [1]_, section 3.8.3, pp. 121-124. + References + ---------- + """ + self.means_ = _class_means(X, y) + self.covariance_ = _class_cov(X, y, self.priors_, shrinkage) + + Sw = self.covariance_ # within scatter + # St = _cov(X, shrinkage) # total scatter + # Sb = St - Sw # between scatter + + # Standard LDA: evals, evecs = linalg.eigh(Sb, Sw) + # Here we hope to find a mapping + # to maximize Sw with minimum Sb for class agnostic. + evals, evecs = linalg.eigh(Sw) + + self.explained_variance_ratio_ = np.sort( + evals / np.sum(evals))[::-1][:self._max_components] + evecs = evecs[:, np.argsort(evals)[::-1]] # sort eigenvectors + + self.scalings_ = evecs + self.coef_ = np.dot(self.means_, evecs).dot(evecs.T) + self.intercept_ = (-0.5 * np.diag(np.dot(self.means_, self.coef_.T)) + + np.log(self.priors_)) diff --git a/OpenOOD/openood/postprocessors/mds_postprocessor.py b/OpenOOD/openood/postprocessors/mds_postprocessor.py new file mode 100644 index 0000000000000000000000000000000000000000..79df5fbd7f6ae942ea8e70d8f799644aefd84e1e --- /dev/null +++ b/OpenOOD/openood/postprocessors/mds_postprocessor.py @@ -0,0 +1,79 @@ +from typing import Any +from copy import deepcopy + +import numpy as np +import torch +import torch.nn as nn +import sklearn.covariance +from tqdm import tqdm + +from .base_postprocessor import BasePostprocessor +from .info import num_classes_dict + + +class MDSPostprocessor(BasePostprocessor): + def __init__(self, config): + self.config = config + self.num_classes = num_classes_dict[self.config.dataset.name] + self.setup_flag = False + + def setup(self, net: nn.Module, id_loader_dict, ood_loader_dict): + if not self.setup_flag: + # estimate mean and variance from training set + print('\n Estimating mean and variance from training set...') + all_feats = [] + all_labels = [] + all_preds = [] + with torch.no_grad(): + for batch in tqdm(id_loader_dict['train'], + desc='Setup: ', + position=0, + leave=True): + data, labels = batch['data'].cuda(), batch['label'] + logits, features = net(data, return_feature=True) + all_feats.append(features.cpu()) + all_labels.append(deepcopy(labels)) + all_preds.append(logits.argmax(1).cpu()) + + all_feats = torch.cat(all_feats) + all_labels = torch.cat(all_labels) + all_preds = torch.cat(all_preds) + # sanity check on train acc + train_acc = all_preds.eq(all_labels).float().mean() + print(f' Train acc: {train_acc:.2%}') + + # compute class-conditional statistics + self.class_mean = [] + centered_data = [] + for c in range(self.num_classes): + class_samples = all_feats[all_labels.eq(c)].data + self.class_mean.append(class_samples.mean(0)) + centered_data.append(class_samples - + self.class_mean[c].view(1, -1)) + + self.class_mean = torch.stack( + self.class_mean) # shape [#classes, feature dim] + + group_lasso = sklearn.covariance.EmpiricalCovariance( + assume_centered=False) + group_lasso.fit( + torch.cat(centered_data).cpu().numpy().astype(np.float32)) + # inverse of covariance + self.precision = torch.from_numpy(group_lasso.precision_).float() + self.setup_flag = True + else: + pass + + @torch.no_grad() + def postprocess(self, net: nn.Module, data: Any): + logits, features = net(data, return_feature=True) + pred = logits.argmax(1) + + class_scores = torch.zeros((logits.shape[0], self.num_classes)) + for c in range(self.num_classes): + tensor = features.cpu() - self.class_mean[c].view(1, -1) + class_scores[:, c] = -torch.matmul( + torch.matmul(tensor, self.precision), tensor.t()).diag() + + conf = torch.max(class_scores, dim=1)[0] + return pred, conf diff --git a/OpenOOD/openood/postprocessors/mos_postprocessor.py b/OpenOOD/openood/postprocessors/mos_postprocessor.py new file mode 100644 index 0000000000000000000000000000000000000000..73ae62a5b240771ae45ea5bb279b6d43dc61d818 --- /dev/null +++ b/OpenOOD/openood/postprocessors/mos_postprocessor.py @@ -0,0 +1,107 @@ +from __future__ import absolute_import, division, print_function + +import numpy as np +import torch +from torch import nn +from tqdm import tqdm + +from .base_postprocessor import BasePostprocessor + + +def get_group_slices(classes_per_group): + group_slices = [] + start = 0 + for num_cls in classes_per_group: + end = start + num_cls + 1 + group_slices.append([start, end]) + start = end + return torch.LongTensor(group_slices) + + +def cal_ood_score(logits, group_slices): + num_groups = group_slices.shape[0] + + all_group_ood_score_MOS = [] + + smax = torch.nn.Softmax(dim=-1).cuda() + for i in range(num_groups): + group_logit = logits[:, group_slices[i][0]:group_slices[i][1]] + + group_softmax = smax(group_logit) + group_others_score = group_softmax[:, 0] + + all_group_ood_score_MOS.append(-group_others_score) + + all_group_ood_score_MOS = torch.stack(all_group_ood_score_MOS, dim=1) + final_max_score_MOS, _ = torch.max(all_group_ood_score_MOS, dim=1) + return final_max_score_MOS.data.cpu().numpy() + + +class MOSPostprocessor(BasePostprocessor): + def __init__(self, config): + super(MOSPostprocessor, self).__init__(config) + self.config = config + self.setup_flag = False + + def cal_group_slices(self, train_loader): + config = self.config + # if specified group_config + if (config.trainer.group_config.endswith('npy')): + classes_per_group = np.load(config.trainer.group_config) + elif (config.trainer.group_config.endswith('txt')): + classes_per_group = np.loadtxt(config.trainer.group_config, + dtype=int) + else: + # cal group config + config = self.config + group = {} + train_dataiter = iter(train_loader) + for train_step in tqdm(range(1, + len(train_dataiter) + 1), + desc='cal group_config', + position=0, + leave=True): + batch = next(train_dataiter) + group_label = batch['group_label'].cuda() + class_label = batch['class_label'].cuda() + + for i in range(len(class_label)): + try: + group[str( + group_label[i].cpu().detach().numpy().tolist())] + except: + group[str(group_label[i].cpu().detach().numpy().tolist( + ))] = [] + + if class_label[i].cpu().detach().numpy().tolist() \ + not in group[str(group_label[i].cpu().detach().numpy().tolist())]: + group[str(group_label[i].cpu().detach().numpy().tolist( + ))].append( + class_label[i].cpu().detach().numpy().tolist()) + + classes_per_group = [] + for i in range(len(group)): + classes_per_group.append(max(group[str(i)]) + 1) + + self.num_groups = len(classes_per_group) + self.group_slices = get_group_slices(classes_per_group) + self.group_slices = self.group_slices.cuda() + + def setup(self, net: nn.Module, id_loader_dict, ood_loader_dict): + # this postprocessor does not really do anything + # the inference is done in the mos_evaluator + pass + + def postprocess(self, net: nn.Module, data): + net.eval() + confs_mos = [] + with torch.no_grad(): + + logits = net(data) + conf_mos = cal_ood_score(logits, self.group_slices) + confs_mos.extend(conf_mos) + + # conf = np.array(confs_mos) + conf = torch.tensor(confs_mos) + pred = logits.data.max(1)[1] + return pred, conf diff --git a/OpenOOD/openood/postprocessors/nnguide_postprocessor.py b/OpenOOD/openood/postprocessors/nnguide_postprocessor.py new file mode 100644 index 0000000000000000000000000000000000000000..c6347b2ba945179053fd998a4077062529333af1 --- /dev/null +++ b/OpenOOD/openood/postprocessors/nnguide_postprocessor.py @@ -0,0 +1,88 @@ +from typing import Any + +import faiss +import numpy as np +import torch +import torch.nn as nn +from tqdm import tqdm +from scipy.special import logsumexp +from copy import deepcopy +from .base_postprocessor import BasePostprocessor + +normalizer = lambda x: x / np.linalg.norm(x, axis=-1, keepdims=True) + 1e-10 + + +def knn_score(bankfeas, queryfeas, k=100, min=False): + + bankfeas = deepcopy(np.array(bankfeas)) + queryfeas = deepcopy(np.array(queryfeas)) + + index = faiss.IndexFlatIP(bankfeas.shape[-1]) + index.add(bankfeas) + D, _ = index.search(queryfeas, k) + if min: + scores = np.array(D.min(axis=1)) + else: + scores = np.array(D.mean(axis=1)) + return scores + + +class NNGuidePostprocessor(BasePostprocessor): + def __init__(self, config): + super(NNGuidePostprocessor, self).__init__(config) + self.args = self.config.postprocessor.postprocessor_args + self.K = self.args.K + self.alpha = self.args.alpha + self.activation_log = None + self.args_dict = self.config.postprocessor.postprocessor_sweep + self.setup_flag = False + + def setup(self, net: nn.Module, id_loader_dict, ood_loader_dict): + if not self.setup_flag: + net.eval() + bank_feas = [] + bank_logits = [] + with torch.no_grad(): + for batch in tqdm(id_loader_dict['train'], + desc='Setup: ', + position=0, + leave=True): + data = batch['data'].cuda() + data = data.float() + + logit, feature = net(data, return_feature=True) + bank_feas.append(normalizer(feature.data.cpu().numpy())) + bank_logits.append(logit.data.cpu().numpy()) + if len(bank_feas + ) * id_loader_dict['train'].batch_size > int( + len(id_loader_dict['train'].dataset) * + self.alpha): + break + + bank_feas = np.concatenate(bank_feas, axis=0) + bank_confs = logsumexp(np.concatenate(bank_logits, axis=0), + axis=-1) + self.bank_guide = bank_feas * bank_confs[:, None] + + self.setup_flag = True + else: + pass + + @torch.no_grad() + def postprocess(self, net: nn.Module, data: Any): + logit, feature = net(data, return_feature=True) + feas_norm = normalizer(feature.data.cpu().numpy()) + energy = logsumexp(logit.data.cpu().numpy(), axis=-1) + + conf = knn_score(self.bank_guide, feas_norm, k=self.K) + score = conf * energy + + _, pred = torch.max(torch.softmax(logit, dim=1), dim=1) + return pred, torch.from_numpy(score) + + def set_hyperparam(self, hyperparam: list): + self.K = hyperparam[0] + self.alpha = hyperparam[1] + + def get_hyperparam(self): + return [self.K, self.alpha] diff --git a/OpenOOD/openood/postprocessors/npos_postprocessor.py b/OpenOOD/openood/postprocessors/npos_postprocessor.py new file mode 100644 index 0000000000000000000000000000000000000000..6235bc18c0dbb1d4b74a6960889240a161d94cb8 --- /dev/null +++ b/OpenOOD/openood/postprocessors/npos_postprocessor.py @@ -0,0 +1,59 @@ +from typing import Any + +import faiss +import numpy as np +import torch +import torch.nn as nn +from tqdm import tqdm + +from .base_postprocessor import BasePostprocessor + + +class NPOSPostprocessor(BasePostprocessor): + def __init__(self, config): + super(NPOSPostprocessor, self).__init__(config) + self.args = self.config.postprocessor.postprocessor_args + self.K = self.args.K + self.activation_log = None + self.args_dict = self.config.postprocessor.postprocessor_sweep + self.setup_flag = False + + def setup(self, net: nn.Module, id_loader_dict, ood_loader_dict): + if not self.setup_flag: + activation_log = [] + net.eval() + with torch.no_grad(): + for batch in tqdm(id_loader_dict['train'], + desc='Setup: ', + position=0, + leave=True): + data = batch['data'].cuda() + + feature = net.intermediate_forward(data) + activation_log.append(feature.data.cpu().numpy()) + + self.activation_log = np.concatenate(activation_log, axis=0) + self.index = faiss.IndexFlatL2(feature.shape[1]) + self.index.add(self.activation_log) + self.setup_flag = True + else: + pass + + @torch.no_grad() + def postprocess(self, net: nn.Module, data: Any): + feature = net.intermediate_forward(data) + D, _ = self.index.search( + feature.cpu().numpy(), # feature is already normalized within net + self.K, + ) + kth_dist = -D[:, -1] + # put dummy prediction here + # as cider only trains the feature extractor + pred = torch.zeros(len(kth_dist)) + return pred, torch.from_numpy(kth_dist) + + def set_hyperparam(self, hyperparam: list): + self.K = hyperparam[0] + + def get_hyperparam(self): + return self.K diff --git a/OpenOOD/openood/postprocessors/odin_postprocessor.py b/OpenOOD/openood/postprocessors/odin_postprocessor.py new file mode 100644 index 0000000000000000000000000000000000000000..4d36745feed9ccbb5ac53990d121a2d660367af0 --- /dev/null +++ b/OpenOOD/openood/postprocessors/odin_postprocessor.py @@ -0,0 +1,89 @@ +"""Adapted from: https://github.com/facebookresearch/odin.""" +from typing import Any + +import torch +import torch.nn as nn + +from .base_postprocessor import BasePostprocessor +from openood.preprocessors.transform import normalization_dict + + +class ODINPostprocessor(BasePostprocessor): + def __init__(self, config): + super().__init__(config) + self.args = self.config.postprocessor.postprocessor_args + + self.temperature = self.args.temperature + self.noise = self.args.noise + try: + self.input_std = normalization_dict[self.config.dataset.name][1] + except KeyError: + self.input_std = [0.5, 0.5, 0.5] + self.args_dict = self.config.postprocessor.postprocessor_sweep + + def print_memory_usage(self,tag): + print(f"{tag} - Allocated: {torch.cuda.memory_allocated() / 1024 ** 2:.2f} MB, Cached: {torch.cuda.memory_reserved() / 1024 ** 2:.2f} MB") + + def postprocess(self, net: nn.Module, data: Any): + + # net.zero_grad() # 清除之前的梯度 + # self.print_memory_usage("Before processing batch") + + + data.requires_grad = True + output = net(data) + + # Calculating the perturbation we need to add, that is, + # the sign of gradient of cross entropy loss w.r.t. input + criterion = nn.CrossEntropyLoss() + + labels = output.detach().argmax(axis=1) + + # Using temperature scaling + output = output / self.temperature + + loss = criterion(output, labels) + loss.backward() + + # self.print_memory_usage("After processing batch") + + # Normalizing the gradient to binary in {0, 1} + gradient = torch.ge(data.grad.detach(), 0) + gradient = (gradient.float() - 0.5) * 2 + + # Scaling values taken from original code + gradient[:, 0] = (gradient[:, 0]) / self.input_std[0] + gradient[:, 1] = (gradient[:, 1]) / self.input_std[1] + gradient[:, 2] = (gradient[:, 2]) / self.input_std[2] + + + # Adding small perturbations to images + tempInputs = torch.add(data.detach(), gradient, alpha=-self.noise) + # with torch.no_grad(): + output = net(tempInputs) + output = output / self.temperature + + # Calculating the confidence after adding perturbations + nnOutput = output.detach() + nnOutput = nnOutput - nnOutput.max(dim=1, keepdims=True).values + nnOutput = nnOutput.exp() / nnOutput.exp().sum(dim=1, keepdims=True) + + conf, pred = nnOutput.max(dim=1) + + # # Clear gradients and cache + # data.grad = None + # pred.grad = None + # conf.grad = None + # del gradient, tempInputs, nnOutput, output, loss, labels + + # torch.cuda.empty_cache() + + # return pred.detach().clone(), conf.detach().clone() + return pred, conf + + def set_hyperparam(self, hyperparam: list): + self.temperature = hyperparam[0] + self.noise = hyperparam[1] + + def get_hyperparam(self): + return [self.temperature, self.noise] diff --git a/OpenOOD/openood/postprocessors/opengan_postprocessor.py b/OpenOOD/openood/postprocessors/opengan_postprocessor.py new file mode 100644 index 0000000000000000000000000000000000000000..5989dd3aea8e1ecd0c74a7f53a2f866d396a822d --- /dev/null +++ b/OpenOOD/openood/postprocessors/opengan_postprocessor.py @@ -0,0 +1,32 @@ +from typing import Any + +import torch + +from .base_postprocessor import BasePostprocessor + + +class OpenGanPostprocessor(BasePostprocessor): + def __init__(self, config): + super(OpenGanPostprocessor, self).__init__(config) + + @torch.no_grad() + def postprocess(self, net, data: Any): + # images input + if data.shape[-1] > 1 and data.shape[1] == 3: + output = net['backbone'](data) + score = torch.softmax(output, dim=1) + _, pred = torch.max(score, dim=1) + + _, feats = net['backbone'](data, return_feature=True) + feats = feats.unsqueeze_(-1).unsqueeze_(-1) + predConf = net['netD'](feats) + predConf = predConf.view(-1, 1) + conf = predConf.reshape(-1).detach().cpu() + # feature input + elif data.shape[-1] == 1 and data.shape[-1] == 1: + predConf = net['netD'](data) + predConf = predConf.view(-1, 1) + conf = predConf.reshape(-1).detach().cpu() + pred = torch.ones_like(conf) # dummy predictions + + return pred, conf diff --git a/OpenOOD/openood/postprocessors/openmax_postprocessor.py b/OpenOOD/openood/postprocessors/openmax_postprocessor.py new file mode 100644 index 0000000000000000000000000000000000000000..9f0671760fd7853a63658b679cc3bb76143a2952 --- /dev/null +++ b/OpenOOD/openood/postprocessors/openmax_postprocessor.py @@ -0,0 +1,225 @@ +import libmr +import numpy as np +import scipy.spatial.distance as spd +import torch +import torch.nn as nn +from tqdm import tqdm + +from .base_postprocessor import BasePostprocessor +from .info import num_classes_dict + + +class OpenMax(BasePostprocessor): + def __init__(self, config): + super(OpenMax, self).__init__(config) + self.nc = num_classes_dict[config.dataset.name] + self.weibull_alpha = 3 + self.weibull_threshold = 0.9 + self.weibull_tail = 20 + self.setup_flag = False + + def setup(self, net: nn.Module, id_loader_dict, ood_loder_dict): + if not self.setup_flag: + # Fit the weibull distribution from training data. + print('Fittting Weibull distribution...') + _, mavs, dists = compute_train_score_and_mavs_and_dists( + self.nc, id_loader_dict['train'], device='cuda', net=net) + categories = list(range(0, self.nc)) + self.weibull_model = fit_weibull(mavs, dists, categories, + self.weibull_tail, 'euclidean') + self.setup_flag = True + else: + pass + + @torch.no_grad() + def postprocess(self, net: nn.Module, data): + net.eval() + scores = net(data).cpu().numpy() + scores = np.array(scores)[:, np.newaxis, :] + categories = list(range(0, self.nc)) + + pred_openmax = [] + score_openmax = [] + for score in scores: + so, _ = openmax(self.weibull_model, categories, score, 0.5, + self.weibull_alpha, + 'euclidean') # openmax_prob, softmax_prob + pred_openmax.append( + np.argmax(so) if np.max(so) >= self.weibull_threshold else ( + self.nc - 1)) + + score_openmax.append(so) + + pred = torch.tensor(pred_openmax) + conf = -1 * torch.from_numpy(np.array(score_openmax))[:, -1] + + return pred, conf + + +def compute_channel_distances(mavs, features, eu_weight=0.5): + """ + Input: + mavs (channel, C) + features: (N, channel, C) + Output: + channel_distances: dict of distance distribution from MAV + for each channel. + """ + eucos_dists, eu_dists, cos_dists = [], [], [] + for channel, mcv in enumerate(mavs): # Compute channel specific distances + eu_dists.append( + [spd.euclidean(mcv, feat[channel]) for feat in features]) + cos_dists.append([spd.cosine(mcv, feat[channel]) for feat in features]) + eucos_dists.append([ + spd.euclidean(mcv, feat[channel]) * eu_weight + + spd.cosine(mcv, feat[channel]) for feat in features + ]) + + return { + 'eucos': np.array(eucos_dists), + 'cosine': np.array(cos_dists), + 'euclidean': np.array(eu_dists) + } + + +def compute_train_score_and_mavs_and_dists(train_class_num, trainloader, + device, net): + scores = [[] for _ in range(train_class_num)] + + train_dataiter = iter(trainloader) + with torch.no_grad(): + for train_step in tqdm(range(1, + len(train_dataiter) + 1), + desc='Progress: ', + position=0, + leave=True): + batch = next(train_dataiter) + data = batch['data'].cuda() + target = batch['label'].cuda() + + # this must cause error for cifar + outputs = net(data) + for score, t in zip(outputs, target): + + if torch.argmax(score) == t: + scores[t].append(score.unsqueeze(dim=0).unsqueeze(dim=0)) + + scores = [torch.cat(x).cpu().numpy() for x in scores] # (N_c, 1, C) * C + mavs = np.array([np.mean(x, axis=0) for x in scores]) # (C, 1, C) + dists = [ + compute_channel_distances(mcv, score) + for mcv, score in zip(mavs, scores) + ] + return scores, mavs, dists + + +def fit_weibull(means, dists, categories, tailsize=20, distance_type='eucos'): + """ + Input: + means (C, channel, C) + dists (N_c, channel, C) * C + Output: + weibull_model : Perform EVT based analysis using tails of distances + and save weibull model parameters for re-adjusting + softmax scores + """ + weibull_model = {} + for mean, dist, category_name in zip(means, dists, categories): + weibull_model[category_name] = {} + weibull_model[category_name]['distances_{}'.format( + distance_type)] = dist[distance_type] + weibull_model[category_name]['mean_vec'] = mean + weibull_model[category_name]['weibull_model'] = [] + for channel in range(mean.shape[0]): + mr = libmr.MR() + tailtofit = np.sort(dist[distance_type][channel, :])[-tailsize:] + mr.fit_high(tailtofit, len(tailtofit)) + weibull_model[category_name]['weibull_model'].append(mr) + + return weibull_model + + +def compute_openmax_prob(scores, scores_u): + prob_scores, prob_unknowns = [], [] + for s, su in zip(scores, scores_u): + channel_scores = np.exp(s) + channel_unknown = np.exp(np.sum(su)) + + total_denom = np.sum(channel_scores) + channel_unknown + prob_scores.append(channel_scores / total_denom) + prob_unknowns.append(channel_unknown / total_denom) + + # Take channel mean + scores = np.mean(prob_scores, axis=0) + unknowns = np.mean(prob_unknowns, axis=0) + modified_scores = scores.tolist() + [unknowns] + return modified_scores + + +def query_weibull(category_name, weibull_model, distance_type='eucos'): + return [ + weibull_model[category_name]['mean_vec'], + weibull_model[category_name]['distances_{}'.format(distance_type)], + weibull_model[category_name]['weibull_model'] + ] + + +def calc_distance(query_score, mcv, eu_weight, distance_type='eucos'): + if distance_type == 'eucos': + query_distance = spd.euclidean(mcv, query_score) * eu_weight + \ + spd.cosine(mcv, query_score) + elif distance_type == 'euclidean': + query_distance = spd.euclidean(mcv, query_score) + elif distance_type == 'cosine': + query_distance = spd.cosine(mcv, query_score) + else: + print('distance type not known: enter either of eucos, \ + euclidean or cosine') + return query_distance + + +def softmax(x): + e_x = np.exp(x - np.max(x)) + return e_x / e_x.sum() + + +def openmax(weibull_model, + categories, + input_score, + eu_weight, + alpha=10, + distance_type='eucos'): + """Re-calibrate scores via OpenMax layer + Output: + openmax probability and softmax probability + """ + nb_classes = len(categories) + + ranked_list = input_score.argsort().ravel()[::-1][:alpha] + alpha_weights = [((alpha + 1) - i) / float(alpha) + for i in range(1, alpha + 1)] + omega = np.zeros(nb_classes) + omega[ranked_list] = alpha_weights + + scores, scores_u = [], [] + for channel, input_score_channel in enumerate(input_score): + score_channel, score_channel_u = [], [] + for c, category_name in enumerate(categories): + mav, dist, model = query_weibull(category_name, weibull_model, + distance_type) + channel_dist = calc_distance(input_score_channel, mav[channel], + eu_weight, distance_type) + wscore = model[channel].w_score(channel_dist) + modified_score = input_score_channel[c] * (1 - wscore * omega[c]) + score_channel.append(modified_score) + score_channel_u.append(input_score_channel[c] - modified_score) + + scores.append(score_channel) + scores_u.append(score_channel_u) + + scores = np.asarray(scores) + scores_u = np.asarray(scores_u) + + openmax_prob = np.array(compute_openmax_prob(scores, scores_u)) + softmax_prob = softmax(np.array(input_score.ravel())) + return openmax_prob, softmax_prob diff --git a/OpenOOD/openood/postprocessors/patchcore_postprocessor.py b/OpenOOD/openood/postprocessors/patchcore_postprocessor.py new file mode 100644 index 0000000000000000000000000000000000000000..cf021c2e19c1312877c6dee817eaa2a09e2d3941 --- /dev/null +++ b/OpenOOD/openood/postprocessors/patchcore_postprocessor.py @@ -0,0 +1,346 @@ +from __future__ import absolute_import, division, print_function + +import abc +import os + +import faiss +import numpy as np +import torch +from sklearn.metrics import pairwise_distances +from sklearn.random_projection import SparseRandomProjection +from torch import nn +from torch.nn import functional as F +from tqdm import tqdm + +from .base_postprocessor import BasePostprocessor + + +def embedding_concat(x, y): + B, C1, H1, W1 = x.size() + _, C2, H2, W2 = y.size() + s = int(H1 / H2) + x = F.unfold(x, kernel_size=s, dilation=1, stride=s) + x = x.view(B, C1, -1, H2, W2) + z = torch.zeros(B, C1 + C2, x.size(2), H2, W2) + for i in range(x.size(2)): + z[:, :, i, :, :] = torch.cat((x[:, :, i, :, :], y), 1) + z = z.view(B, -1, H2 * W2) + z = F.fold(z, kernel_size=s, output_size=(H1, W1), stride=s) + return z + + +def reshape_embedding(embedding): + embedding_list = [] + for k in range(embedding.shape[0]): + for i in range(embedding.shape[2]): + for j in range(embedding.shape[3]): + embedding_list.append(embedding[k, :, i, j]) + return embedding_list + + +class PatchcorePostprocessor(BasePostprocessor): + def __init__(self, config): + super(PatchcorePostprocessor, self).__init__(config) + self.config = config + self.postprocessor_args = config.postprocessor.postprocessor_args + self.n_neighbors = config.postprocessor.postprocessor_args.n_neighbors + self.feature_mean, self.feature_prec = None, None + self.alpha_list = None + self.gt_list_px_lvl = [] + self.pred_list_px_lvl = [] + self.gt_list_img_lvl = [] + self.pred_list_img_lvl = [] + self.img_path_list = [] + self.features = [] + + def setup(self, net: nn.Module, id_loader_dict, ood_loader_dict): + # step 1: + self.model = net + # on train start + self.model.eval() # to stop running_var move (maybe not critical) + self.embedding_list = [] + + if (self.config.network.load_cached_faiss): + path = self.config.output_dir + # load index + if os.path.isfile(os.path.join(path, 'index.faiss')): + self.index = faiss.read_index(os.path.join( + path, 'index.faiss')) + if torch.cuda.is_available(): + res = faiss.StandardGpuResources() + self.index = faiss.index_cpu_to_gpu(res, 0, self.index) + self.init_results_list() + return + + # training step + train_dataiter = iter(id_loader_dict['train']) + + for train_step in tqdm(range(1, + len(train_dataiter) + 1), + position=0, + leave=True): + batch = next(train_dataiter) + x = batch['data'].cuda() + features = self.model.forward(x, return_feature=True) + embeddings = [] + for feature in features: + m = torch.nn.AvgPool2d(9, 1, 1) + embeddings.append(m(feature)) + embedding = embedding_concat(embeddings[0], embeddings[1]) + self.embedding_list.extend(reshape_embedding(np.array(embedding))) + + # training end + total_embeddings = np.array(self.embedding_list) + + # Random projection + print('Random projection') + self.randomprojector = SparseRandomProjection( + n_components='auto', + eps=0.9) # 'auto' => Johnson-Lindenstrauss lemma + self.randomprojector.fit(total_embeddings) + # Coreset Subsampling + print('Coreset Subsampling') + selector = kCenterGreedy(total_embeddings, 0, 0) + selected_idx = selector.select_batch( + model=self.randomprojector, + already_selected=[], + N=int(total_embeddings.shape[0] * + self.postprocessor_args.coreset_sampling_ratio)) + self.embedding_coreset = total_embeddings[selected_idx] + + print('initial embedding size : ', total_embeddings.shape) + print('final embedding size : ', self.embedding_coreset.shape) + # faiss + print('faiss indexing') + self.index = faiss.IndexFlatL2(self.embedding_coreset.shape[1]) + self.index.add(self.embedding_coreset) + if not os.path.isdir(os.path.join('./results/patch/')): + os.mkdir('./results/patch/') + faiss.write_index(self.index, + os.path.join('./results/patch/', 'index.faiss')) + + def init_results_list(self): + self.gt_list_px_lvl = [] + self.pred_list_px_lvl = [] + self.gt_list_img_lvl = [] + self.pred_list_img_lvl = [] + + def postprocess(self, net: nn.Module, data): + + self.init_results_list() + score_patch = [] + # extract embedding + for x in data.split(1, dim=0): + features = self.model.forward(x, return_feature=True) + embeddings = [] + for feature in features: + m = torch.nn.AvgPool2d(3, 1, 1) + embeddings.append(m(feature)) + embedding_ = embedding_concat(embeddings[0], embeddings[1]) + embedding_test = np.array(reshape_embedding(np.array(embedding_))) + score_patches, _ = self.index.search(embedding_test, + k=self.n_neighbors) + + score_patch.append(score_patches) + + N_b = score_patches[np.argmax(score_patches[:, 0])] + w = (1 - (np.max(np.exp(N_b)) / np.sum(np.exp(N_b)))) + score = w * max(score_patches[:, 0]) # Image-level score + + self.pred_list_img_lvl.append(score) + + pred = [] + for i in self.pred_list_img_lvl: + # 6.3 is the trial value. + if (i > 6.3): + pred.append(torch.tensor(1)) + else: + pred.append(torch.tensor(-1)) + conf = [] + for i in score_patch: + conf.append(i) + conf = torch.tensor(conf, dtype=torch.float32) + conf = conf.cuda() + + pred_list_img_lvl = [] + + for patchscore in np.concatenate([conf.cpu().tolist()]): + N_b = patchscore[np.argmax(patchscore[:, 0])] + w = (1 - (np.max(np.exp(N_b)) / np.sum(np.exp(N_b)))) + score = w * max(patchscore[:, 0]) # Image-level score + + pred_list_img_lvl.append(score) + + if self.config.evaluator.name == 'patch': + return pred, conf + else: + return pred, -1 * torch.tensor(pred_list_img_lvl).cuda() + + +# Copyright 2017 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Abstract class for sampling methods. + +Provides interface to sampling methods that allow same signature for +select_batch. Each subclass implements select_batch_ with the desired +signature for readability. +""" + + +class SamplingMethod(object): + __metaclass__ = abc.ABCMeta + + @abc.abstractmethod + def __init__(self, X, y, seed, **kwargs): + self.X = X + self.y = y + self.seed = seed + + def flatten_X(self): + shape = self.X.shape + flat_X = self.X + if len(shape) > 2: + flat_X = np.reshape(self.X, (shape[0], np.product(shape[1:]))) + return flat_X + + @abc.abstractmethod + def select_batch_(self): + return + + def select_batch(self, **kwargs): + return self.select_batch_(**kwargs) + + def to_dict(self): + return None + + +# Copyright 2017 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Returns points that minimizes the maximum distance of any point to a center. + +Implements the k-Center-Greedy method in +Ozan Sener and Silvio Savarese. A Geometric Approach to Active Learning for +Convolutional Neural Networks. https://arxiv.org/abs/1708.00489 2017 + +Distance metric defaults to l2 distance. Features used to calculate distance +are either raw features or if a model has transform method then uses the output +of model.transform(X). + +Can be extended to a robust k centers algorithm that ignores a certain number +of outlier datapoints. +Resulting centers are solution to multiple integer program. +""" + + +class kCenterGreedy(SamplingMethod): + def __init__(self, X, y, seed, metric='euclidean'): + self.X = X + self.y = y + self.flat_X = self.flatten_X() + self.name = 'kcenter' + self.features = self.flat_X + self.metric = metric + self.min_distances = None + self.n_obs = self.X.shape[0] + self.already_selected = [] + + def update_distances(self, + cluster_centers, + only_new=True, + reset_dist=False): + """Update min distances given cluster centers. + + Args: + cluster_centers: indices of cluster centers + only_new: only calculate distance for newly selected points and + update min_distances. + rest_dist: whether to reset min_distances. + """ + + if reset_dist: + self.min_distances = None + if only_new: + cluster_centers = [ + d for d in cluster_centers if d not in self.already_selected + ] + if cluster_centers: + # Update min_distances for all examples given new cluster center. + x = self.features[cluster_centers] + dist = pairwise_distances(self.features, x, metric=self.metric) + + if self.min_distances is None: + self.min_distances = np.min(dist, axis=1).reshape(-1, 1) + else: + self.min_distances = np.minimum(self.min_distances, dist) + + def select_batch_(self, model, already_selected, N, **kwargs): + """Diversity promoting active learning method that greedily forms a + batch to minimize the maximum distance to a cluster center among all + unlabeled datapoints. + + Args: + model: model with scikit-like API with decision_function implemented + already_selected: index of datapoints already selected + N: batch size + + Returns: + indices of points selected to minimize distance to cluster centers + """ + + try: + # Assumes that the transform function takes in original data and + # not flattened data. + print('Getting transformed features...') + self.features = model.transform(self.X) + print('Calculating distances...') + self.update_distances(already_selected, + only_new=False, + reset_dist=True) + except: + print('Using flat_X as features.') + self.update_distances(already_selected, + only_new=True, + reset_dist=False) + + new_batch = [] + + for _ in tqdm(range(N)): + if self.already_selected is None: + # Initialize centers with a randomly selected datapoint + ind = np.random.choice(np.arange(self.n_obs)) + else: + ind = np.argmax(self.min_distances) + # New examples should not be in already selected since those points + # should have min_distance of zero to a cluster center. + assert ind not in already_selected + + self.update_distances([ind], only_new=True, reset_dist=False) + new_batch.append(ind) + print('Maximum distance from cluster centers is %0.2f' % + max(self.min_distances)) + + self.already_selected = already_selected + + return new_batch diff --git a/OpenOOD/openood/postprocessors/rankfeat_postprocessor.py b/OpenOOD/openood/postprocessors/rankfeat_postprocessor.py new file mode 100644 index 0000000000000000000000000000000000000000..b48ce9a9f16728170070e5cd160748d6d32a3346 --- /dev/null +++ b/OpenOOD/openood/postprocessors/rankfeat_postprocessor.py @@ -0,0 +1,72 @@ +from typing import Any + +import torch +import torch.nn as nn + +from .base_postprocessor import BasePostprocessor + + +class RankFeatPostprocessor(BasePostprocessor): + def __init__(self, config): + super(RankFeatPostprocessor, self).__init__(config) + self.config = config + self.args = self.config.postprocessor.postprocessor_args + + @torch.no_grad() + def postprocess(self, net: nn.Module, data: Any): + inputs = data.cuda() + + # Logit of Block 4 feature + feat1 = net.intermediate_forward(inputs, layer_index=4) + B, C, H, W = feat1.size() + feat1 = feat1.view(B, C, H * W) + if self.args.accelerate: + feat1 = feat1 - power_iteration(feat1, iter=20) + else: + u, s, v = torch.linalg.svd(feat1, full_matrices=False) + feat1 = feat1 - s[:, 0:1].unsqueeze(2) * u[:, :, 0:1].bmm( + v[:, 0:1, :]) + feat1 = feat1.view(B, C, H, W) + logits1 = net.fc(torch.flatten(net.avgpool(feat1), 1)) + + # Logit of Block 3 feature + feat2 = net.intermediate_forward(inputs, layer_index=3) + B, C, H, W = feat2.size() + feat2 = feat2.view(B, C, H * W) + if self.args.accelerate: + feat2 = feat2 - power_iteration(feat2, iter=20) + else: + u, s, v = torch.linalg.svd(feat2, full_matrices=False) + feat2 = feat2 - s[:, 0:1].unsqueeze(2) * u[:, :, 0:1].bmm( + v[:, 0:1, :]) + feat2 = feat2.view(B, C, H, W) + feat2 = net.layer4(feat2) + logits2 = net.fc(torch.flatten(net.avgpool(feat2), 1)) + + # Fusion at the logit space + logits = (logits1 + logits2) / 2 + conf = self.args.temperature * torch.logsumexp( + logits / self.args.temperature, dim=1) + + _, pred = torch.max(logits, dim=1) + return pred, conf + + +def _l2normalize(v, eps=1e-10): + return v / (torch.norm(v, dim=2, keepdim=True) + eps) + + +# Power Iteration as SVD substitute for acceleration +def power_iteration(A, iter=20): + u = torch.FloatTensor(1, A.size(1)).normal_(0, 1).view( + 1, 1, A.size(1)).repeat(A.size(0), 1, 1).to(A) + v = torch.FloatTensor(A.size(2), + 1).normal_(0, 1).view(1, A.size(2), + 1).repeat(A.size(0), 1, + 1).to(A) + for _ in range(iter): + v = _l2normalize(u.bmm(A)).transpose(1, 2) + u = _l2normalize(A.bmm(v).transpose(1, 2)) + sigma = u.bmm(A).bmm(v) + sub = sigma * u.transpose(1, 2).bmm(v.transpose(1, 2)) + return sub diff --git a/OpenOOD/openood/postprocessors/rd4ad_postprocessor.py b/OpenOOD/openood/postprocessors/rd4ad_postprocessor.py new file mode 100644 index 0000000000000000000000000000000000000000..9fda586d7eb1c2f106862ec140174ce63f10690e --- /dev/null +++ b/OpenOOD/openood/postprocessors/rd4ad_postprocessor.py @@ -0,0 +1,96 @@ +from typing import Any + +import numpy as np +import torch +import torch.nn as nn +from scipy.ndimage import gaussian_filter +from torch.nn import functional as F + +from .base_postprocessor import BasePostprocessor + + +class Rd4adPostprocessor(BasePostprocessor): + def __init__(self, config): + super(Rd4adPostprocessor, self).__init__(config) + + def setup(self, net: nn.Module, id_loader_dict, ood_loader_dict): + pass + + @torch.no_grad() + def postprocess(self, net: nn.Module, data: Any): + encoder = net['encoder'] + bn = net['bn'] + decoder = net['decoder'] + feature_list = encoder.forward(data, return_feature_list=True)[1] + input = feature_list[1:4] + en_feature1 = input[0].cpu().numpy().tolist() + en_feature2 = input[1].cpu().numpy().tolist() + en_feature3 = input[2].cpu().numpy().tolist() + output = decoder(bn(input)) + de_feature1 = output[0].cpu().numpy().tolist() + de_feature2 = output[1].cpu().numpy().tolist() + de_feature3 = output[2].cpu().numpy().tolist() + conf_list = [] + for i in range(data.shape[0]): + feature_list_en = [] + feature_list_de = [] + feature_list_en.append(en_feature1[i]) + feature_list_en.append(en_feature2[i]) + feature_list_en.append(en_feature3[i]) + feature_list_de.append(de_feature1[i]) + feature_list_de.append(de_feature2[i]) + feature_list_de.append(de_feature3[i]) + anomaly_map, _ = cal_anomaly_map(feature_list_en, + feature_list_de, + data.shape[-1], + amap_mode='a') + anomaly_map = gaussian_filter(anomaly_map, sigma=4) + conf = np.max(anomaly_map) + conf_list.append(-conf) + return -1 * torch.ones(data.shape[0]), torch.tensor( + [conf_list]).reshape((data.shape[0])) + + # def inference(self, net: nn.Module, data_loader: DataLoader): + # pred_list, conf_list, label_list = [], [], [] + # for batch in data_loader: + # data = batch['data'].cuda() + # label = batch['label'].cuda() + # import pdb + # pdb.set_trace() + # conf = self.postprocess(net, data) + # for idx in range(len(data)): + # conf_list.append(conf[idx].tolist()) + # label_list.append(label[idx].cpu().tolist()) + + # # convert values into numpy array + + # conf_list = np.array(conf_list) + # label_list = np.array(label_list, dtype=int) + + # return pred_list, conf_list, label_list + + +def cal_anomaly_map(fs_list, ft_list, out_size=224, amap_mode='mul'): + + if amap_mode == 'mul': + anomaly_map = np.ones([out_size, out_size]) + else: + anomaly_map = np.zeros([out_size, out_size]) + a_map_list = [] + for i in range(len(ft_list)): + fs = torch.Tensor([fs_list[i]]) + ft = torch.Tensor([ft_list[i]]) + + a_map = 1 - F.cosine_similarity(fs, ft) + a_map = torch.unsqueeze(a_map, dim=1) + a_map = F.interpolate(a_map, + size=out_size, + mode='bilinear', + align_corners=True) + a_map = a_map[0, 0, :, :].to('cpu').detach().numpy() + a_map_list.append(a_map) + if amap_mode == 'mul': + anomaly_map *= a_map + else: + anomaly_map += a_map + return anomaly_map, a_map_list diff --git a/OpenOOD/openood/postprocessors/react_postprocessor.py b/OpenOOD/openood/postprocessors/react_postprocessor.py new file mode 100644 index 0000000000000000000000000000000000000000..990669e4bb0ef7d90bc1d99ec5da7844ff74ca27 --- /dev/null +++ b/OpenOOD/openood/postprocessors/react_postprocessor.py @@ -0,0 +1,58 @@ +from typing import Any + +import numpy as np +import torch +import torch.nn as nn +from tqdm import tqdm + +from .base_postprocessor import BasePostprocessor + + +class ReactPostprocessor(BasePostprocessor): + def __init__(self, config): + super(ReactPostprocessor, self).__init__(config) + self.args = self.config.postprocessor.postprocessor_args + self.percentile = self.args.percentile + self.args_dict = self.config.postprocessor.postprocessor_sweep + self.setup_flag = False + + def setup(self, net: nn.Module, id_loader_dict, ood_loader_dict): + if not self.setup_flag: + activation_log = [] + net.eval() + with torch.no_grad(): + for batch in tqdm(id_loader_dict['val'], + desc='Setup: ', + position=0, + leave=True): + data = batch['data'].cuda() + data = data.float() + + _, feature = net(data, return_feature=True) + activation_log.append(feature.data.cpu().numpy()) + + self.activation_log = np.concatenate(activation_log, axis=0) + self.setup_flag = True + else: + pass + + self.threshold = np.percentile(self.activation_log.flatten(), + self.percentile) + + @torch.no_grad() + def postprocess(self, net: nn.Module, data: Any): + output = net.forward_threshold(data, self.threshold) + score = torch.softmax(output, dim=1) + _, pred = torch.max(score, dim=1) + energyconf = torch.logsumexp(output.data.cpu(), dim=1) + return pred, energyconf + + def set_hyperparam(self, hyperparam: list): + self.percentile = hyperparam[0] + self.threshold = np.percentile(self.activation_log.flatten(), + self.percentile) + print('Threshold at percentile {:2d} over id data is: {}'.format( + self.percentile, self.threshold)) + + def get_hyperparam(self): + return self.percentile diff --git a/OpenOOD/openood/postprocessors/relation_postprocessor.py b/OpenOOD/openood/postprocessors/relation_postprocessor.py new file mode 100644 index 0000000000000000000000000000000000000000..02910ea3da9ad60bf27cea3c67a965eb7eb5831d --- /dev/null +++ b/OpenOOD/openood/postprocessors/relation_postprocessor.py @@ -0,0 +1,131 @@ +from typing import Any + +import numpy as np +import torch +import torch.nn as nn +from tqdm import tqdm + +from .base_postprocessor import BasePostprocessor +from math import ceil +""" Code borrowed from https://github.com/snu-mllab/Neural-Relation-Graph +""" + + +def normalize(feat, nc=50000): + with torch.no_grad(): + split = ceil(len(feat) / nc) + for i in range(split): + feat_ = feat[i * nc:(i + 1) * nc] + feat[i * nc:(i + 1) * + nc] = feat_ / torch.sqrt((feat_**2).sum(-1) + 1e-10).reshape(-1, 1) + + return feat + + +def kernel(feat, feat_t, prob, prob_t, split=2): + """Kernel function (assume feature is normalized) + """ + size = ceil(len(feat_t) / split) + rel_full = [] + for i in range(split): + feat_t_ = feat_t[i * size:(i + 1) * size] + prob_t_ = prob_t[i * size:(i + 1) * size] + + with torch.no_grad(): + dot = torch.matmul(feat, feat_t_.transpose(1, 0)) + dot = torch.clamp(dot, min=0.) + + sim = torch.matmul(prob, prob_t_.transpose(1, 0)) + rel = dot * sim + + rel_full.append(rel) + + rel_full = torch.cat(rel_full, dim=-1) + return rel_full + + +def get_relation(feat, feat_t, prob, prob_t, pow=1, chunk=50, thres=0.03): + """Get relation values (top-k and summation) + + Args: + feat (torch.Tensor [N,D]): features of the source data + feat_t (torch.Tensor [N',D]): features of the target data + prob (torch.Tensor [N,C]): probabilty vectors of the source data + prob_t (torch.Tensor [N',C]): probabilty vectors of the target data + pow (int): Temperature of kernel function + chunk (int): batch size of kernel calculation (trade off between memory and speed) + thres (float): cut off value for small relation graph edges. Defaults to 0.03. + + Returns: + graph: statistics of relation graph + """ + + n = feat.shape[0] + n_chunk = ceil(n / chunk) + + score = [] + for i in range(n_chunk): + feat_ = feat[i * chunk:(i + 1) * chunk] + prob_ = prob[i * chunk:(i + 1) * chunk] + + rel = kernel(feat_, feat_t, prob_, prob_t) + + mask = (rel.abs() > thres) + rel_mask = mask * rel + edge_sum = (rel_mask.sign() * (rel_mask.abs()**pow)).sum(-1) + + score.append(edge_sum.cpu()) + + score = torch.cat(score, dim=0) + + return score + + +class RelationPostprocessor(BasePostprocessor): + def __init__(self, config): + super(RelationPostprocessor, self).__init__(config) + self.args = self.config.postprocessor.postprocessor_args + self.pow = self.args.pow + self.feature_log = None + self.args_dict = self.config.postprocessor.postprocessor_sweep + self.setup_flag = False + + def setup(self, net: nn.Module, id_loader_dict, ood_loader_dict): + if not self.setup_flag: + feature_log = [] + prob_log = [] + net.eval() + with torch.no_grad(): + for batch in tqdm(id_loader_dict['train'], desc='Setup: ', position=0, leave=True): + data = batch['data'].cuda() + data = data.float() + + logit, feature = net(data, return_feature=True) + prob = torch.softmax(logit, dim=1) + feature_log.append(normalize(feature)) + prob_log.append(prob) + + self.feat_train = torch.cat(feature_log, axis=0) + self.prob_train = torch.cat(prob_log, axis=0) + + self.setup_flag = True + else: + pass + + @torch.no_grad() + def postprocess(self, net: nn.Module, data: Any): + output, feature = net(data, return_feature=True) + feature = normalize(feature) + prob = torch.softmax(output, dim=1) + + score = get_relation(feature, self.feat_train, prob, self.prob_train, pow=self.pow) + + _, pred = torch.max(prob, dim=1) + + return pred, score + + def set_hyperparam(self, hyperparam: list): + self.pow = hyperparam[0] + + def get_hyperparam(self): + return self.pow diff --git a/OpenOOD/openood/postprocessors/residual_postprocessor.py b/OpenOOD/openood/postprocessors/residual_postprocessor.py new file mode 100644 index 0000000000000000000000000000000000000000..67219c253e5b768ff4b7b2c717764ad44136ae67 --- /dev/null +++ b/OpenOOD/openood/postprocessors/residual_postprocessor.py @@ -0,0 +1,65 @@ +from typing import Any + +import numpy as np +import torch +import torch.nn as nn +from numpy.linalg import norm, pinv +from sklearn.covariance import EmpiricalCovariance +from tqdm import tqdm + +from .base_postprocessor import BasePostprocessor + + +class ResidualPostprocessor(BasePostprocessor): + def __init__(self, config): + super().__init__(config) + self.args = self.config.postprocessor.postprocessor_args + self.dim = self.args.dim + + def setup(self, net: nn.Module, id_loader_dict, ood_loader_dict): + net.eval() + + with torch.no_grad(): + self.w, self.b = net.get_fc() + print('Extracting id training feature') + feature_id_train = [] + for batch in tqdm(id_loader_dict['val'], + desc='Eval: ', + position=0, + leave=True): + data = batch['data'].cuda() + data = data.float() + _, feature = net(data, return_feature=True) + feature_id_train.append(feature.cpu().numpy()) + feature_id_train = np.concatenate(feature_id_train, axis=0) + + print('Extracting id testing feature') + feature_id_val = [] + for batch in tqdm(id_loader_dict['test'], + desc='Eval: ', + position=0, + leave=True): + data = batch['data'].cuda() + data = data.float() + _, feature = net(data, return_feature=True) + feature_id_val.append(feature.cpu().numpy()) + feature_id_val = np.concatenate(feature_id_val, axis=0) + + self.u = -np.matmul(pinv(self.w), self.b) + ec = EmpiricalCovariance(assume_centered=True) + ec.fit(feature_id_train - self.u) + eig_vals, eigen_vectors = np.linalg.eig(ec.covariance_) + self.NS = np.ascontiguousarray( + (eigen_vectors.T[np.argsort(eig_vals * -1)[self.dim:]]).T) + + self.score_id = -norm(np.matmul(feature_id_val - self.u, self.NS), + axis=-1) + + @torch.no_grad() + def postprocess(self, net: nn.Module, data: Any): + _, feature_ood = net(data, return_feature=True) + logit_ood = feature_ood.cpu() @ self.w.T + self.b + _, pred = torch.max(logit_ood, dim=1) + score_ood = -norm(np.matmul(feature_ood.cpu() - self.u, self.NS), + axis=-1) + return pred, torch.from_numpy(score_ood) diff --git a/OpenOOD/openood/postprocessors/rmds_postprocessor.py b/OpenOOD/openood/postprocessors/rmds_postprocessor.py new file mode 100644 index 0000000000000000000000000000000000000000..1e86597de0dd9d4f3fe8c0d2569aed2587b92528 --- /dev/null +++ b/OpenOOD/openood/postprocessors/rmds_postprocessor.py @@ -0,0 +1,92 @@ +from copy import deepcopy +from typing import Any + +import numpy as np +import torch +import torch.nn as nn +import sklearn.covariance +from tqdm import tqdm + +from .base_postprocessor import BasePostprocessor +from .info import num_classes_dict + + +class RMDSPostprocessor(BasePostprocessor): + def __init__(self, config): + self.config = config + self.num_classes = num_classes_dict[self.config.dataset.name] + self.setup_flag = False + + def setup(self, net: nn.Module, id_loader_dict, ood_loader_dict): + if not self.setup_flag: + # estimate mean and variance from training set + print('\n Estimating mean and variance from training set...') + all_feats = [] + all_labels = [] + all_preds = [] + with torch.no_grad(): + for batch in tqdm(id_loader_dict['train'], + desc='Setup: ', + position=0, + leave=True): + data, labels = batch['data'].cuda(), batch['label'] + logits, features = net(data, return_feature=True) + all_feats.append(features.cpu()) + all_labels.append(deepcopy(labels)) + all_preds.append(logits.argmax(1).cpu()) + + all_feats = torch.cat(all_feats) + all_labels = torch.cat(all_labels) + all_preds = torch.cat(all_preds) + # sanity check on train acc + train_acc = all_preds.eq(all_labels).float().mean() + print(f' Train acc: {train_acc:.2%}') + + # compute class-conditional statistics + self.class_mean = [] + centered_data = [] + for c in range(self.num_classes): + class_samples = all_feats[all_labels.eq(c)].data + self.class_mean.append(class_samples.mean(0)) + centered_data.append(class_samples - + self.class_mean[c].view(1, -1)) + + self.class_mean = torch.stack( + self.class_mean) # shape [#classes, feature dim] + + group_lasso = sklearn.covariance.EmpiricalCovariance( + assume_centered=False) + group_lasso.fit( + torch.cat(centered_data).cpu().numpy().astype(np.float32)) + # inverse of covariance + self.precision = torch.from_numpy(group_lasso.precision_).float() + + self.whole_mean = all_feats.mean(0) + centered_data = all_feats - self.whole_mean.view(1, -1) + group_lasso = sklearn.covariance.EmpiricalCovariance( + assume_centered=False) + group_lasso.fit(centered_data.cpu().numpy().astype(np.float32)) + self.whole_precision = torch.from_numpy( + group_lasso.precision_).float() + self.setup_flag = True + else: + pass + + @torch.no_grad() + def postprocess(self, net: nn.Module, data: Any): + logits, features = net(data, return_feature=True) + pred = logits.argmax(1) + + tensor1 = features.cpu() - self.whole_mean.view(1, -1) + background_scores = -torch.matmul( + torch.matmul(tensor1, self.whole_precision), tensor1.t()).diag() + + class_scores = torch.zeros((logits.shape[0], self.num_classes)) + for c in range(self.num_classes): + tensor = features.cpu() - self.class_mean[c].view(1, -1) + class_scores[:, c] = -torch.matmul( + torch.matmul(tensor, self.precision), tensor.t()).diag() + class_scores[:, c] = class_scores[:, c] - background_scores + + conf = torch.max(class_scores, dim=1)[0] + return pred, conf diff --git a/OpenOOD/openood/postprocessors/rotpred_postprocessor.py b/OpenOOD/openood/postprocessors/rotpred_postprocessor.py new file mode 100644 index 0000000000000000000000000000000000000000..e029770c3bebf16f704c6faac3c5125e1b5cc69d --- /dev/null +++ b/OpenOOD/openood/postprocessors/rotpred_postprocessor.py @@ -0,0 +1,58 @@ +from typing import Any + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from .base_postprocessor import BasePostprocessor + + +def kl_div(d1, d2): + """Compute KL-Divergence between d1 and d2.""" + dirty_logs = d1 * torch.log2(d1 / d2) + return torch.sum(torch.where(d1 != 0, dirty_logs, torch.zeros_like(d1)), + axis=1) + + +class RotPredPostprocessor(BasePostprocessor): + def __init__(self, config): + super(RotPredPostprocessor, self).__init__(config) + self.config = config + + @torch.no_grad() + def postprocess(self, net: nn.Module, data: Any): + batch_size = len(data) + + x_90 = torch.rot90(data, 1, [2, 3]) + x_180 = torch.rot90(data, 2, [2, 3]) + x_270 = torch.rot90(data, 3, [2, 3]) + + x_rot = torch.cat([data, x_90, x_180, x_270]) + y_rot = torch.cat([ + torch.zeros(batch_size), + torch.ones(batch_size), + 2 * torch.ones(batch_size), + 3 * torch.ones(batch_size), + ]).long().cuda() + + logits, logits_rot = net(x_rot, return_rot_logits=True) + logits = logits[:batch_size] + preds = logits.argmax(1) + + # https://github.com/hendrycks/ss-ood/blob/8051356592a152614ab7251fd15084dd86eb9104/multiclass_ood/test_auxiliary_ood.py#L177-L208 + num_classes = logits.shape[1] + uniform_dist = torch.ones_like(logits) / num_classes + cls_loss = kl_div(uniform_dist, F.softmax(logits, dim=1)) + + rot_one_hot = torch.zeros_like(logits_rot).scatter_( + 1, + y_rot.unsqueeze(1).cuda(), 1) + rot_loss = kl_div(rot_one_hot, F.softmax(logits_rot, dim=1)) + rot_0_loss, rot_90_loss, rot_180_loss, rot_270_loss = torch.chunk( + rot_loss, 4, dim=0) + total_rot_loss = (rot_0_loss + rot_90_loss + rot_180_loss + + rot_270_loss) / 4.0 + + # here ID samples will yield larger scores + scores = cls_loss - total_rot_loss + return preds, scores diff --git a/OpenOOD/openood/postprocessors/rts_postprocessor.py b/OpenOOD/openood/postprocessors/rts_postprocessor.py new file mode 100644 index 0000000000000000000000000000000000000000..84b3c810e404e870df873f959ceed86429486089 --- /dev/null +++ b/OpenOOD/openood/postprocessors/rts_postprocessor.py @@ -0,0 +1,27 @@ +from typing import Any + +import torch +import torch.nn as nn + +from .base_postprocessor import BasePostprocessor + + +class RTSPostprocessor(BasePostprocessor): + def __init__(self, config): + super(RTSPostprocessor, self).__init__(config) + self.args = self.config.postprocessor.postprocessor_args + self.ood_score = self.args.ood_score + + def postprocess(self, net: nn.Module, data: Any): + output, variance = net(data, return_var=True) + if self.ood_score == 'var': + _, pred = torch.max(torch.softmax(output, dim=1), dim=1) + conf = torch.mean(variance, dim=1) + elif self.ood_score == 'msp': + score = torch.softmax(output, dim=1) + conf, pred = torch.max(score, dim=1) + else: + print('Invalid ood score type, using var instead') + _, pred = torch.max(torch.softmax(output, dim=1), dim=1) + conf = torch.mean(variance, dim=1) + return pred, conf diff --git a/OpenOOD/openood/postprocessors/scale_postprocessor.py b/OpenOOD/openood/postprocessors/scale_postprocessor.py new file mode 100644 index 0000000000000000000000000000000000000000..c9be5944108b217b0922ce440be6046ea4adebb7 --- /dev/null +++ b/OpenOOD/openood/postprocessors/scale_postprocessor.py @@ -0,0 +1,29 @@ +from typing import Any + +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F + +from .base_postprocessor import BasePostprocessor + + +class ScalePostprocessor(BasePostprocessor): + def __init__(self, config): + super(ScalePostprocessor, self).__init__(config) + self.args = self.config.postprocessor.postprocessor_args + self.percentile = self.args.percentile + self.args_dict = self.config.postprocessor.postprocessor_sweep + + @torch.no_grad() + def postprocess(self, net: nn.Module, data: Any): + output = net.forward_threshold(data, self.percentile) + _, pred = torch.max(output, dim=1) + energyconf = torch.logsumexp(output.data.cpu(), dim=1) + return pred, energyconf + + def set_hyperparam(self, hyperparam: list): + self.percentile = hyperparam[0] + + def get_hyperparam(self): + return self.percentile diff --git a/OpenOOD/openood/postprocessors/she_postprocessor.py b/OpenOOD/openood/postprocessors/she_postprocessor.py new file mode 100644 index 0000000000000000000000000000000000000000..72d2450b851fa963b9a8e995ac8011d3b92d3aae --- /dev/null +++ b/OpenOOD/openood/postprocessors/she_postprocessor.py @@ -0,0 +1,72 @@ +from typing import Any + +from copy import deepcopy +import torch +import torch.nn as nn +from tqdm import tqdm + +from .base_postprocessor import BasePostprocessor +from .info import num_classes_dict + + +def distance(penultimate, target, metric='inner_product'): + if metric == 'inner_product': + return torch.sum(torch.mul(penultimate, target), dim=1) + elif metric == 'euclidean': + return -torch.sqrt(torch.sum((penultimate - target)**2, dim=1)) + elif metric == 'cosine': + return torch.cosine_similarity(penultimate, target, dim=1) + else: + raise ValueError('Unknown metric: {}'.format(metric)) + + +class SHEPostprocessor(BasePostprocessor): + def __init__(self, config): + super(SHEPostprocessor, self).__init__(config) + self.args = self.config.postprocessor.postprocessor_args + self.num_classes = num_classes_dict[self.config.dataset.name] + self.activation_log = None + self.setup_flag = False + + def setup(self, net: nn.Module, id_loader_dict, ood_loader_dict): + if not self.setup_flag: + net.eval() + + all_activation_log = [] + all_labels = [] + all_preds = [] + with torch.no_grad(): + for batch in tqdm(id_loader_dict['train'], + desc='Eval: ', + position=0, + leave=True): + data = batch['data'].cuda() + labels = batch['label'] + all_labels.append(deepcopy(labels)) + + logits, features = net(data, return_feature=True) + all_activation_log.append(features.cpu()) + all_preds.append(logits.argmax(1).cpu()) + + all_preds = torch.cat(all_preds) + all_labels = torch.cat(all_labels) + all_activation_log = torch.cat(all_activation_log) + + self.activation_log = [] + for i in range(self.num_classes): + mask = torch.logical_and(all_labels == i, all_preds == i) + class_correct_activations = all_activation_log[mask] + self.activation_log.append( + class_correct_activations.mean(0, keepdim=True)) + + self.activation_log = torch.cat(self.activation_log).cuda() + self.setup_flag = True + else: + pass + + @torch.no_grad() + def postprocess(self, net: nn.Module, data: Any): + output, feature = net(data, return_feature=True) + pred = output.argmax(1) + conf = distance(feature, self.activation_log[pred], self.args.metric) + return pred, conf diff --git a/OpenOOD/openood/postprocessors/ssd_postprocessor.py b/OpenOOD/openood/postprocessors/ssd_postprocessor.py new file mode 100644 index 0000000000000000000000000000000000000000..ffbef3b51423a8c19c6edd3579cbaee5269a41c0 --- /dev/null +++ b/OpenOOD/openood/postprocessors/ssd_postprocessor.py @@ -0,0 +1,24 @@ +import torch +import torch.nn as nn +from .base_postprocessor import BasePostprocessor +from .mds_ensemble_postprocessor import get_MDS_stat + + +class SSDPostprocessor(BasePostprocessor): + def __init__(self, config): + self.config = config + self.postprocessor_args = config.postprocessor.postprocessor_args + + self.feature_type_list = self.postprocessor_args.feature_type_list + self.reduce_dim_list = self.postprocessor_args.reduce_dim_list + + # self.num_classes = self.config.dataset.num_classes + self.num_classes = 1 + self.num_layer = len(self.feature_type_list) + + self.feature_mean, self.feature_prec = None, None + + def setup(self, net: nn.Module, id_loader_dict, ood_loader_dict): + self.feature_mean, self.feature_prec, self.transform_matrix = \ + get_MDS_stat(net, id_loader_dict['train'], self.num_classes, + self.feature_type_list, self.reduce_dim_list) diff --git a/OpenOOD/openood/postprocessors/temp_scaling_postprocessor.py b/OpenOOD/openood/postprocessors/temp_scaling_postprocessor.py new file mode 100644 index 0000000000000000000000000000000000000000..2acf0877843a0ca438588a173482f264b0d6c7f3 --- /dev/null +++ b/OpenOOD/openood/postprocessors/temp_scaling_postprocessor.py @@ -0,0 +1,78 @@ +from typing import Any + +import torch +from torch import nn, optim +from tqdm import tqdm + +from .base_postprocessor import BasePostprocessor + + +class TemperatureScalingPostprocessor(BasePostprocessor): + """A decorator which wraps a model with temperature scaling, internalize + 'temperature' parameter as part of a net model.""" + def __init__(self, config): + super(TemperatureScalingPostprocessor, self).__init__(config) + self.config = config + self.temperature = nn.Parameter(torch.ones(1, device='cuda') * + 1.5) # initialize T + self.setup_flag = False + + def setup(self, net: nn.Module, id_loader_dict, ood_loader_dict): + if not self.setup_flag: + # make sure that validation set exists + assert 'val' in id_loader_dict.keys( + ), 'No validation dataset found!' + + val_dl = id_loader_dict['val'] + nll_criterion = nn.CrossEntropyLoss().cuda() + + logits_list = [] # fit in whole dataset at one time to back prop + labels_list = [] + with torch.no_grad( + ): # fix other params of the net, only learn temperature + for batch in tqdm(val_dl): + data = batch['data'].cuda() + labels = batch['label'] + logits = net(data) + logits_list.append(logits) + labels_list.append(labels) + # convert a list of many tensors (each of a batch) to one tensor + logits = torch.cat(logits_list).cuda() + labels = torch.cat(labels_list).cuda() + # calculate NLL before temperature scaling + before_temperature_nll = nll_criterion(logits, labels) + + print('Before temperature - NLL: %.3f' % (before_temperature_nll)) + + optimizer = optim.LBFGS([self.temperature], lr=0.01, max_iter=50) + + # make sure only temperature parameter will be learned, + # fix other parameters of the network + def eval(): + optimizer.zero_grad() + loss = nll_criterion(self._temperature_scale(logits), labels) + loss.backward() + return loss + + optimizer.step(eval) + + # print learned parameter temperature, + # calculate NLL after temperature scaling + after_temperature_nll = nll_criterion( + self._temperature_scale(logits), labels).item() + print('Optimal temperature: %.3f' % self.temperature.item()) + print('After temperature - NLL: %.3f' % (after_temperature_nll)) + self.setup_flag = True + else: + pass + + def _temperature_scale(self, logits): + return logits / self.temperature + + @torch.no_grad() + def postprocess(self, net: nn.Module, data: Any): + logits = net(data) + logits_ts = self._temperature_scale(logits) + score = torch.softmax(logits_ts, dim=1) + conf, pred = torch.max(score, dim=1) + return pred, conf diff --git a/OpenOOD/openood/postprocessors/utils.py b/OpenOOD/openood/postprocessors/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..2e5022cab6aab5ecdbf5d3fe5705cd863600722d --- /dev/null +++ b/OpenOOD/openood/postprocessors/utils.py @@ -0,0 +1,89 @@ +from openood.utils import Config + +from .ash_postprocessor import ASHPostprocessor +from .base_postprocessor import BasePostprocessor +from .cider_postprocessor import CIDERPostprocessor +from .conf_branch_postprocessor import ConfBranchPostprocessor +from .cutpaste_postprocessor import CutPastePostprocessor +from .dice_postprocessor import DICEPostprocessor +from .draem_postprocessor import DRAEMPostprocessor +from .dropout_postprocessor import DropoutPostProcessor +from .dsvdd_postprocessor import DSVDDPostprocessor +from .ebo_postprocessor import EBOPostprocessor +from .ensemble_postprocessor import EnsemblePostprocessor +from .gmm_postprocessor import GMMPostprocessor +from .godin_postprocessor import GodinPostprocessor +from .gradnorm_postprocessor import GradNormPostprocessor +from .gram_postprocessor import GRAMPostprocessor +from .kl_matching_postprocessor import KLMatchingPostprocessor +from .knn_postprocessor import KNNPostprocessor +from .maxlogit_postprocessor import MaxLogitPostprocessor +from .mcd_postprocessor import MCDPostprocessor +from .mds_postprocessor import MDSPostprocessor +from .mds_ensemble_postprocessor import MDSEnsemblePostprocessor +from .mos_postprocessor import MOSPostprocessor +from .npos_postprocessor import NPOSPostprocessor +from .odin_postprocessor import ODINPostprocessor +from .opengan_postprocessor import OpenGanPostprocessor +from .openmax_postprocessor import OpenMax +from .patchcore_postprocessor import PatchcorePostprocessor +from .rd4ad_postprocessor import Rd4adPostprocessor +from .react_postprocessor import ReactPostprocessor +from .rmds_postprocessor import RMDSPostprocessor +from .residual_postprocessor import ResidualPostprocessor +from .rotpred_postprocessor import RotPredPostprocessor +from .rankfeat_postprocessor import RankFeatPostprocessor +from .ssd_postprocessor import SSDPostprocessor +from .she_postprocessor import SHEPostprocessor +from .temp_scaling_postprocessor import TemperatureScalingPostprocessor +from .vim_postprocessor import VIMPostprocessor +from .rts_postprocessor import RTSPostprocessor +from .gen_postprocessor import GENPostprocessor +from .relation_postprocessor import RelationPostprocessor + + +def get_postprocessor(config: Config): + postprocessors = { + 'ash': ASHPostprocessor, + 'cider': CIDERPostprocessor, + 'conf_branch': ConfBranchPostprocessor, + 'msp': BasePostprocessor, + 'ebo': EBOPostprocessor, + 'odin': ODINPostprocessor, + 'mds': MDSPostprocessor, + 'mds_ensemble': MDSEnsemblePostprocessor, + 'rmds': RMDSPostprocessor, + 'gmm': GMMPostprocessor, + 'patchcore': PatchcorePostprocessor, + 'openmax': OpenMax, + 'react': ReactPostprocessor, + 'vim': VIMPostprocessor, + 'gradnorm': GradNormPostprocessor, + 'godin': GodinPostprocessor, + 'gram': GRAMPostprocessor, + 'cutpaste': CutPastePostprocessor, + 'mls': MaxLogitPostprocessor, + 'npos': NPOSPostprocessor, + 'residual': ResidualPostprocessor, + 'klm': KLMatchingPostprocessor, + 'temperature_scaling': TemperatureScalingPostprocessor, + 'ensemble': EnsemblePostprocessor, + 'dropout': DropoutPostProcessor, + 'draem': DRAEMPostprocessor, + 'dsvdd': DSVDDPostprocessor, + 'mos': MOSPostprocessor, + 'mcd': MCDPostprocessor, + 'opengan': OpenGanPostprocessor, + 'knn': KNNPostprocessor, + 'dice': DICEPostprocessor, + 'ssd': SSDPostprocessor, + 'she': SHEPostprocessor, + 'rd4ad': Rd4adPostprocessor, + 'rts': RTSPostprocessor, + 'rotpred': RotPredPostprocessor, + 'rankfeat': RankFeatPostprocessor, + 'gen': GENPostprocessor, + 'relation': RelationPostprocessor + } + + return postprocessors[config.postprocessor.name](config) diff --git a/OpenOOD/openood/postprocessors/vim_postprocessor.py b/OpenOOD/openood/postprocessors/vim_postprocessor.py new file mode 100644 index 0000000000000000000000000000000000000000..f8bf81c60878ad9a6fa3fc73fb553a74ea1b33ad --- /dev/null +++ b/OpenOOD/openood/postprocessors/vim_postprocessor.py @@ -0,0 +1,75 @@ +from typing import Any + +import numpy as np +import torch +import torch.nn as nn +from numpy.linalg import norm, pinv +from scipy.special import logsumexp +from sklearn.covariance import EmpiricalCovariance +from tqdm import tqdm + +from .base_postprocessor import BasePostprocessor + + +class VIMPostprocessor(BasePostprocessor): + def __init__(self, config): + super().__init__(config) + self.args = self.config.postprocessor.postprocessor_args + self.args_dict = self.config.postprocessor.postprocessor_sweep + self.dim = self.args.dim + self.setup_flag = False + + def setup(self, net: nn.Module, id_loader_dict, ood_loader_dict): + if not self.setup_flag: + net.eval() + + with torch.no_grad(): + self.w, self.b = net.get_fc() + print('Extracting id training feature') + feature_id_train = [] + for batch in tqdm(id_loader_dict['train'], + desc='Setup: ', + position=0, + leave=True): + data = batch['data'].cuda() + data = data.float() + _, feature = net(data, return_feature=True) + feature_id_train.append(feature.cpu().numpy()) + feature_id_train = np.concatenate(feature_id_train, axis=0) + logit_id_train = feature_id_train @ self.w.T + self.b + + self.u = -np.matmul(pinv(self.w), self.b) + ec = EmpiricalCovariance(assume_centered=True) + ec.fit(feature_id_train - self.u) + eig_vals, eigen_vectors = np.linalg.eig(ec.covariance_) + self.Vim = np.ascontiguousarray( + (eigen_vectors.T[np.argsort(eig_vals * -1)[self.dim:]]).T) + + vlogit_id_train = norm(np.matmul(feature_id_train - self.u, + self.NS), + axis=-1) + self.alpha = logit_id_train.max( + axis=-1).mean() / vlogit_id_train.mean() + print(f'self.alpha = {self.alpha:.4f}') + + self.setup_flag = True + else: + pass + + @torch.no_grad() + def postprocess(self, net: nn.Module, data: Any): + _, feature_ood = net.forward(data, return_feature=True) + feature_ood = feature_ood.cpu() + logit_ood = feature_ood @ self.w.T + self.b + _, pred = torch.max(logit_ood, dim=1) + energy_ood = logsumexp(logit_ood.numpy(), axis=-1) + vlogit_ood = norm(np.matmul(feature_ood.numpy() - self.u, self.NS), + axis=-1) * self.alpha + score_ood = -vlogit_ood + energy_ood + return pred, torch.from_numpy(score_ood) + + def set_hyperparam(self, hyperparam: list): + self.dim = hyperparam[0] + + def get_hyperparam(self): + return self.dim diff --git a/OpenOOD/openood/preprocessors/__init__.py b/OpenOOD/openood/preprocessors/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..f2ca8e67baf114ce30438b3903f25e880424fea7 --- /dev/null +++ b/OpenOOD/openood/preprocessors/__init__.py @@ -0,0 +1,6 @@ +from .base_preprocessor import BasePreprocessor +from .cutpaste_preprocessor import CutPastePreprocessor +from .draem_preprocessor import DRAEMPreprocessor +from .pixmix_preprocessor import PixMixPreprocessor +from .test_preprocessor import TestStandardPreProcessor +from .utils import get_preprocessor diff --git a/OpenOOD/openood/preprocessors/__pycache__/__init__.cpython-311.pyc b/OpenOOD/openood/preprocessors/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a52bbe2034b61e8cc3b13ab701c4706d796ee938 Binary files /dev/null and b/OpenOOD/openood/preprocessors/__pycache__/__init__.cpython-311.pyc differ diff --git a/OpenOOD/openood/preprocessors/__pycache__/__init__.cpython-37.pyc b/OpenOOD/openood/preprocessors/__pycache__/__init__.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..db8e78cf3babb49663822ad70d2948148dbe8d34 Binary files /dev/null and b/OpenOOD/openood/preprocessors/__pycache__/__init__.cpython-37.pyc differ diff --git a/OpenOOD/openood/preprocessors/__pycache__/augmix_preprocessor.cpython-311.pyc b/OpenOOD/openood/preprocessors/__pycache__/augmix_preprocessor.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d4f15ae34336725095104195c1e3fd8aafbcc5c6 Binary files /dev/null and b/OpenOOD/openood/preprocessors/__pycache__/augmix_preprocessor.cpython-311.pyc differ diff --git a/OpenOOD/openood/preprocessors/__pycache__/augmix_preprocessor.cpython-37.pyc b/OpenOOD/openood/preprocessors/__pycache__/augmix_preprocessor.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a6122bebbb26ca03ec3e57ccbe45ec391fba9806 Binary files /dev/null and b/OpenOOD/openood/preprocessors/__pycache__/augmix_preprocessor.cpython-37.pyc differ diff --git a/OpenOOD/openood/preprocessors/__pycache__/base_preprocessor.cpython-311.pyc b/OpenOOD/openood/preprocessors/__pycache__/base_preprocessor.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bf12663784cca0a6289249d6b957e070c2048885 Binary files /dev/null and b/OpenOOD/openood/preprocessors/__pycache__/base_preprocessor.cpython-311.pyc differ diff --git a/OpenOOD/openood/preprocessors/__pycache__/base_preprocessor.cpython-37.pyc b/OpenOOD/openood/preprocessors/__pycache__/base_preprocessor.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aecbf5236319e02cefacd66e1fffe52760adebb6 Binary files /dev/null and b/OpenOOD/openood/preprocessors/__pycache__/base_preprocessor.cpython-37.pyc differ diff --git a/OpenOOD/openood/preprocessors/__pycache__/cider_preprocessor.cpython-311.pyc b/OpenOOD/openood/preprocessors/__pycache__/cider_preprocessor.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ff74a9cd7787ba4fa45bebd8016820f9a9c743b6 Binary files /dev/null and b/OpenOOD/openood/preprocessors/__pycache__/cider_preprocessor.cpython-311.pyc differ diff --git a/OpenOOD/openood/preprocessors/__pycache__/cider_preprocessor.cpython-37.pyc b/OpenOOD/openood/preprocessors/__pycache__/cider_preprocessor.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8405bf4465419f337770e52c9d2e377fd303ea0e Binary files /dev/null and b/OpenOOD/openood/preprocessors/__pycache__/cider_preprocessor.cpython-37.pyc differ diff --git a/OpenOOD/openood/preprocessors/__pycache__/csi_preprocessor.cpython-311.pyc b/OpenOOD/openood/preprocessors/__pycache__/csi_preprocessor.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c4dadfe840781016a282f1e51d065f5d7b0361c1 Binary files /dev/null and b/OpenOOD/openood/preprocessors/__pycache__/csi_preprocessor.cpython-311.pyc differ diff --git a/OpenOOD/openood/preprocessors/__pycache__/csi_preprocessor.cpython-37.pyc b/OpenOOD/openood/preprocessors/__pycache__/csi_preprocessor.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0d27a1673f3f2a6a29a302d17e1782a5ce8a36c6 Binary files /dev/null and b/OpenOOD/openood/preprocessors/__pycache__/csi_preprocessor.cpython-37.pyc differ diff --git a/OpenOOD/openood/preprocessors/__pycache__/cutout_preprocessor.cpython-311.pyc b/OpenOOD/openood/preprocessors/__pycache__/cutout_preprocessor.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ac595ea15cb5b12a1503941113852e2e06419481 Binary files /dev/null and b/OpenOOD/openood/preprocessors/__pycache__/cutout_preprocessor.cpython-311.pyc differ diff --git a/OpenOOD/openood/preprocessors/__pycache__/cutout_preprocessor.cpython-37.pyc b/OpenOOD/openood/preprocessors/__pycache__/cutout_preprocessor.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ee2df1797427a43bcd21d2b20573a4848841f0b9 Binary files /dev/null and b/OpenOOD/openood/preprocessors/__pycache__/cutout_preprocessor.cpython-37.pyc differ diff --git a/OpenOOD/openood/preprocessors/__pycache__/cutpaste_preprocessor.cpython-311.pyc b/OpenOOD/openood/preprocessors/__pycache__/cutpaste_preprocessor.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8bf8ec738df66cc539d0ec962b384059229e32b0 Binary files /dev/null and b/OpenOOD/openood/preprocessors/__pycache__/cutpaste_preprocessor.cpython-311.pyc differ diff --git a/OpenOOD/openood/preprocessors/__pycache__/cutpaste_preprocessor.cpython-37.pyc b/OpenOOD/openood/preprocessors/__pycache__/cutpaste_preprocessor.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d7ccfdb28d0aff101104b6fda2ffee943ef0a12a Binary files /dev/null and b/OpenOOD/openood/preprocessors/__pycache__/cutpaste_preprocessor.cpython-37.pyc differ diff --git a/OpenOOD/openood/preprocessors/__pycache__/draem_preprocessor.cpython-311.pyc b/OpenOOD/openood/preprocessors/__pycache__/draem_preprocessor.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e3eccab8f4a19d2a15be9f2968752b859b96a64c Binary files /dev/null and b/OpenOOD/openood/preprocessors/__pycache__/draem_preprocessor.cpython-311.pyc differ diff --git a/OpenOOD/openood/preprocessors/__pycache__/draem_preprocessor.cpython-37.pyc b/OpenOOD/openood/preprocessors/__pycache__/draem_preprocessor.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f1e754e3e95c8b3381fbd514f1e36e705a233bb6 Binary files /dev/null and b/OpenOOD/openood/preprocessors/__pycache__/draem_preprocessor.cpython-37.pyc differ diff --git a/OpenOOD/openood/preprocessors/__pycache__/pixmix_preprocessor.cpython-311.pyc b/OpenOOD/openood/preprocessors/__pycache__/pixmix_preprocessor.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9b8f8ed0350dd364ae0d03d7d98597485eefb6ba Binary files /dev/null and b/OpenOOD/openood/preprocessors/__pycache__/pixmix_preprocessor.cpython-311.pyc differ diff --git a/OpenOOD/openood/preprocessors/__pycache__/pixmix_preprocessor.cpython-37.pyc b/OpenOOD/openood/preprocessors/__pycache__/pixmix_preprocessor.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f7a415d0543f67d9eba208b268ceac59d763068e Binary files /dev/null and b/OpenOOD/openood/preprocessors/__pycache__/pixmix_preprocessor.cpython-37.pyc differ diff --git a/OpenOOD/openood/preprocessors/__pycache__/randaugment_preprocessor.cpython-311.pyc b/OpenOOD/openood/preprocessors/__pycache__/randaugment_preprocessor.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..84cff215d7edabafaf339bb2fcc14d71af0457dd Binary files /dev/null and b/OpenOOD/openood/preprocessors/__pycache__/randaugment_preprocessor.cpython-311.pyc differ diff --git a/OpenOOD/openood/preprocessors/__pycache__/randaugment_preprocessor.cpython-37.pyc b/OpenOOD/openood/preprocessors/__pycache__/randaugment_preprocessor.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ce9e6aafdd8e7d60b797c6e02bcb40fcdc22240c Binary files /dev/null and b/OpenOOD/openood/preprocessors/__pycache__/randaugment_preprocessor.cpython-37.pyc differ diff --git a/OpenOOD/openood/preprocessors/__pycache__/test_preprocessor.cpython-311.pyc b/OpenOOD/openood/preprocessors/__pycache__/test_preprocessor.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..02853060bff1409ea14d23d6fed667aac352aec2 Binary files /dev/null and b/OpenOOD/openood/preprocessors/__pycache__/test_preprocessor.cpython-311.pyc differ diff --git a/OpenOOD/openood/preprocessors/__pycache__/test_preprocessor.cpython-37.pyc b/OpenOOD/openood/preprocessors/__pycache__/test_preprocessor.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7cf1d0d612ea6ffcd4a5f6a29ac8c5773eb9513e Binary files /dev/null and b/OpenOOD/openood/preprocessors/__pycache__/test_preprocessor.cpython-37.pyc differ diff --git a/OpenOOD/openood/preprocessors/__pycache__/transform.cpython-311.pyc b/OpenOOD/openood/preprocessors/__pycache__/transform.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b41a2eceff9043c9dd7d2f7e27c3fd06095e5697 Binary files /dev/null and b/OpenOOD/openood/preprocessors/__pycache__/transform.cpython-311.pyc differ diff --git a/OpenOOD/openood/preprocessors/__pycache__/transform.cpython-37.pyc b/OpenOOD/openood/preprocessors/__pycache__/transform.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..47d7f185c28e73696390be9767242fc91b7918ee Binary files /dev/null and b/OpenOOD/openood/preprocessors/__pycache__/transform.cpython-37.pyc differ diff --git a/OpenOOD/openood/preprocessors/__pycache__/utils.cpython-311.pyc b/OpenOOD/openood/preprocessors/__pycache__/utils.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..332f751bf27b36981d27d63404ed126c1fd84948 Binary files /dev/null and b/OpenOOD/openood/preprocessors/__pycache__/utils.cpython-311.pyc differ diff --git a/OpenOOD/openood/preprocessors/__pycache__/utils.cpython-37.pyc b/OpenOOD/openood/preprocessors/__pycache__/utils.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..03e8978596df23650d6bce2414abf21fa9e24b72 Binary files /dev/null and b/OpenOOD/openood/preprocessors/__pycache__/utils.cpython-37.pyc differ diff --git a/OpenOOD/openood/preprocessors/augmix_preprocessor.py b/OpenOOD/openood/preprocessors/augmix_preprocessor.py new file mode 100644 index 0000000000000000000000000000000000000000..25950d0c3736cb2ad2bcefa54584fbddc41d0231 --- /dev/null +++ b/OpenOOD/openood/preprocessors/augmix_preprocessor.py @@ -0,0 +1,72 @@ +import torchvision.transforms as tvs_trans + +from openood.utils.config import Config + +from .transform import Convert, interpolation_modes, normalization_dict + + +class AugMixPreprocessor(): + def __init__(self, config: Config): + self.pre_size = config.dataset.pre_size + self.image_size = config.dataset.image_size + self.interpolation = interpolation_modes[config.dataset.interpolation] + normalization_type = config.dataset.normalization_type + if normalization_type in normalization_dict.keys(): + self.mean = normalization_dict[normalization_type][0] + self.std = normalization_dict[normalization_type][1] + else: + self.mean = [0.5, 0.5, 0.5] + self.std = [0.5, 0.5, 0.5] + + self.severity = config.preprocessor.severity + self.mixture_width = config.preprocessor.mixture_width + self.alpha = config.preprocessor.alpha + self.chain_depth = config.preprocessor.chain_depth + self.all_ops = config.preprocessor.all_ops + self.jsd = config.trainer.trainer_args.jsd + + self.augmix = tvs_trans.AugMix(severity=self.severity, + mixture_width=self.mixture_width, + chain_depth=self.chain_depth, + alpha=self.alpha, + all_ops=self.all_ops, + interpolation=self.interpolation) + self.normalize = tvs_trans.Compose([ + tvs_trans.ToTensor(), + tvs_trans.Normalize(mean=self.mean, std=self.std), + ]) + + if 'imagenet' in config.dataset.name: + self.transform = tvs_trans.Compose([ + tvs_trans.RandomResizedCrop(self.image_size, + interpolation=self.interpolation), + tvs_trans.RandomHorizontalFlip(0.5), + ]) + elif 'aircraft' in config.dataset.name or 'cub' in config.dataset.name: + self.transform = tvs_trans.Compose([ + tvs_trans.Resize(self.pre_size, + interpolation=self.interpolation), + tvs_trans.RandomCrop(self.image_size), + tvs_trans.RandomHorizontalFlip(), + ]) + else: + self.transform = tvs_trans.Compose([ + Convert('RGB'), + tvs_trans.Resize(self.pre_size, + interpolation=self.interpolation), + tvs_trans.CenterCrop(self.image_size), + tvs_trans.RandomHorizontalFlip(), + tvs_trans.RandomCrop(self.image_size, padding=4), + ]) + + def setup(self, **kwargs): + pass + + def __call__(self, image): + if self.jsd: + orig = self.transform(image) + aug1 = self.normalize(self.augmix(orig)) + aug2 = self.normalize(self.augmix(orig)) + return self.normalize(orig), aug1, aug2 + else: + return self.normalize(self.augmix(self.transform(image))) diff --git a/OpenOOD/openood/preprocessors/base_preprocessor.py b/OpenOOD/openood/preprocessors/base_preprocessor.py new file mode 100644 index 0000000000000000000000000000000000000000..c8105cc9ea9bea67ba02d8996c101f04b6d763bf --- /dev/null +++ b/OpenOOD/openood/preprocessors/base_preprocessor.py @@ -0,0 +1,58 @@ +import torchvision.transforms as tvs_trans + +from openood.utils.config import Config + +from .transform import Convert, interpolation_modes, normalization_dict + + +class BasePreprocessor(): + """For train dataset standard transformation.""" + def __init__(self, config: Config): + self.pre_size = config.dataset.pre_size + self.image_size = config.dataset.image_size + self.interpolation = interpolation_modes[config.dataset.interpolation] + normalization_type = config.dataset.normalization_type + if normalization_type in normalization_dict.keys(): + self.mean = normalization_dict[normalization_type][0] + self.std = normalization_dict[normalization_type][1] + else: + self.mean = [0.5, 0.5, 0.5] + self.std = [0.5, 0.5, 0.5] + + if 'imagenet' in config.dataset.name: + self.transform = tvs_trans.Compose([ + tvs_trans.RandomResizedCrop(self.image_size, + interpolation=self.interpolation), + tvs_trans.RandomHorizontalFlip(0.5), + tvs_trans.ToTensor(), + tvs_trans.Normalize(mean=self.mean, std=self.std), + ]) + elif 'aircraft' in config.dataset.name or 'cub' in config.dataset.name: + self.transform = tvs_trans.Compose([ + tvs_trans.Resize(self.pre_size, + interpolation=self.interpolation), + tvs_trans.RandomCrop(self.image_size), + tvs_trans.RandomHorizontalFlip(), + tvs_trans.ColorJitter(brightness=32. / 255., saturation=0.5), + tvs_trans.ToTensor(), + tvs_trans.Normalize(mean=self.mean, std=self.std), + ]) + + + else: + self.transform = tvs_trans.Compose([ + Convert('RGB'), + tvs_trans.Resize(self.pre_size, + interpolation=self.interpolation), + tvs_trans.CenterCrop(self.image_size), + tvs_trans.RandomHorizontalFlip(), + tvs_trans.RandomCrop(self.image_size, padding=4), + tvs_trans.ToTensor(), + tvs_trans.Normalize(mean=self.mean, std=self.std), + ]) + + def setup(self, **kwargs): + pass + + def __call__(self, image): + return self.transform(image) diff --git a/OpenOOD/openood/preprocessors/cider_preprocessor.py b/OpenOOD/openood/preprocessors/cider_preprocessor.py new file mode 100644 index 0000000000000000000000000000000000000000..10c6c684f97799d19a185accd58ee60f1c20f5b5 --- /dev/null +++ b/OpenOOD/openood/preprocessors/cider_preprocessor.py @@ -0,0 +1,62 @@ +import torchvision.transforms as tvs_trans + +from openood.utils.config import Config + +from .transform import Convert, interpolation_modes, normalization_dict + + +class CiderPreprocessor(): + def __init__(self, config: Config): + self.pre_size = config.dataset.pre_size + self.image_size = config.dataset.image_size + self.interpolation = interpolation_modes[config.dataset.interpolation] + normalization_type = config.dataset.normalization_type + if normalization_type in normalization_dict.keys(): + self.mean = normalization_dict[normalization_type][0] + self.std = normalization_dict[normalization_type][1] + else: + self.mean = [0.5, 0.5, 0.5] + self.std = [0.5, 0.5, 0.5] + + if 'imagenet' in config.dataset.name: + self.transform = tvs_trans.Compose([ + tvs_trans.RandomResizedCrop(size=self.image_size, + scale=(0.4, 1.), + interpolation=self.interpolation), + tvs_trans.RandomHorizontalFlip(), + tvs_trans.RandomApply( + [tvs_trans.ColorJitter(0.4, 0.4, 0.4, 0.1)], p=0.8), + tvs_trans.RandomGrayscale(p=0.2), + tvs_trans.ToTensor(), + tvs_trans.Normalize(mean=self.mean, std=self.std), + ]) + else: + self.transform = tvs_trans.Compose([ + Convert('RGB'), + tvs_trans.RandomResizedCrop(size=self.image_size, + scale=(0.2, 1.), + interpolation=self.interpolation), + tvs_trans.RandomHorizontalFlip(), + tvs_trans.RandomApply( + [tvs_trans.ColorJitter(0.4, 0.4, 0.4, 0.1)], p=0.8), + tvs_trans.RandomGrayscale(p=0.2), + tvs_trans.ToTensor(), + tvs_trans.Normalize(mean=self.mean, std=self.std), + ]) + + self.transform = TwoCropTransform(self.transform) + + def setup(self, **kwargs): + pass + + def __call__(self, image): + return self.transform(image) + + +class TwoCropTransform: + """Create two crops of the same image.""" + def __init__(self, transform): + self.transform = transform + + def __call__(self, x): + return [self.transform(x), self.transform(x)] diff --git a/OpenOOD/openood/preprocessors/csi_preprocessor.py b/OpenOOD/openood/preprocessors/csi_preprocessor.py new file mode 100644 index 0000000000000000000000000000000000000000..1a7dd9b8e01816aec2d7aa67cd5711693581e073 --- /dev/null +++ b/OpenOOD/openood/preprocessors/csi_preprocessor.py @@ -0,0 +1,55 @@ +import torchvision.transforms as tvs_trans + +from openood.utils.config import Config + +from .transform import Convert, interpolation_modes, normalization_dict + + +class CSIPreprocessor(): + def __init__(self, config: Config): + self.pre_size = config.dataset.pre_size + self.image_size = config.dataset.image_size + self.interpolation = interpolation_modes[config.dataset.interpolation] + normalization_type = config.dataset.normalization_type + if normalization_type in normalization_dict.keys(): + self.mean = normalization_dict[normalization_type][0] + self.std = normalization_dict[normalization_type][1] + else: + self.mean = [0.5, 0.5, 0.5] + self.std = [0.5, 0.5, 0.5] + + if 'imagenet' in config.dataset.name: + self.transform = tvs_trans.Compose([ + tvs_trans.RandomResizedCrop(self.image_size, + interpolation=self.interpolation), + # tvs_trans.RandomHorizontalFlip(0.5), + tvs_trans.ToTensor(), + tvs_trans.Normalize(mean=self.mean, std=self.std), + ]) + elif 'aircraft' in config.dataset.name or 'cub' in config.dataset.name: + self.transform = tvs_trans.Compose([ + tvs_trans.Resize(self.pre_size, + interpolation=self.interpolation), + tvs_trans.RandomCrop(self.image_size), + # tvs_trans.RandomHorizontalFlip(), + # tvs_trans.ColorJitter(brightness=32./255., saturation=0.5), + tvs_trans.ToTensor(), + tvs_trans.Normalize(mean=self.mean, std=self.std), + ]) + else: + self.transform = tvs_trans.Compose([ + Convert('RGB'), + tvs_trans.Resize(self.pre_size, + interpolation=self.interpolation), + # tvs_trans.RandomHorizontalFlip(), + # tvs_trans.RandomCrop(self.image_size, padding=4), + tvs_trans.CenterCrop(self.image_size), + tvs_trans.ToTensor(), + tvs_trans.Normalize(mean=self.mean, std=self.std), + ]) + + def setup(self, **kwargs): + pass + + def __call__(self, image): + return self.transform(image) diff --git a/OpenOOD/openood/preprocessors/cutout_preprocessor.py b/OpenOOD/openood/preprocessors/cutout_preprocessor.py new file mode 100644 index 0000000000000000000000000000000000000000..c811641d0c9669ac33a38c15b16b5a37c2111a32 --- /dev/null +++ b/OpenOOD/openood/preprocessors/cutout_preprocessor.py @@ -0,0 +1,104 @@ +import numpy as np +import torch +import torchvision.transforms as tvs_trans + +from openood.utils.config import Config +from .transform import Convert, interpolation_modes, normalization_dict + + +class CutoutPreprocessor(): + def __init__(self, config: Config): + self.pre_size = config.dataset.pre_size + self.image_size = config.dataset.image_size + self.interpolation = interpolation_modes[config.dataset.interpolation] + normalization_type = config.dataset.normalization_type + if normalization_type in normalization_dict.keys(): + self.mean = normalization_dict[normalization_type][0] + self.std = normalization_dict[normalization_type][1] + else: + self.mean = [0.5, 0.5, 0.5] + self.std = [0.5, 0.5, 0.5] + + self.n_holes = config.preprocessor.n_holes + self.length = config.preprocessor.length + + if 'imagenet' in config.dataset.name: + self.transform = tvs_trans.Compose([ + tvs_trans.RandomResizedCrop(self.image_size, + interpolation=self.interpolation), + tvs_trans.RandomHorizontalFlip(0.5), + tvs_trans.ToTensor(), + tvs_trans.Normalize(mean=self.mean, std=self.std), + Cutout(n_holes=self.n_holes, length=self.length) + ]) + elif 'aircraft' in config.dataset.name or 'cub' in config.dataset.name: + self.transform = tvs_trans.Compose([ + tvs_trans.Resize(self.pre_size, + interpolation=self.interpolation), + tvs_trans.RandomCrop(self.image_size), + tvs_trans.RandomHorizontalFlip(), + tvs_trans.ColorJitter(brightness=32. / 255., saturation=0.5), + tvs_trans.ToTensor(), + tvs_trans.Normalize(mean=self.mean, std=self.std), + Cutout(n_holes=self.n_holes, length=self.length) + ]) + else: + self.transform = tvs_trans.Compose([ + Convert('RGB'), + tvs_trans.Resize(self.pre_size, + interpolation=self.interpolation), + tvs_trans.CenterCrop(self.image_size), + tvs_trans.RandomHorizontalFlip(), + tvs_trans.RandomCrop(self.image_size, padding=4), + tvs_trans.ToTensor(), + tvs_trans.Normalize(mean=self.mean, std=self.std), + Cutout(n_holes=self.n_holes, length=self.length) + ]) + + def setup(self, **kwargs): + pass + + def __call__(self, image): + return self.transform(image) + + +class Cutout(object): + """Randomly mask out one or more patches from an image. + + Args: + n_holes (int): Number of patches to cut out of each image. + length (int): The length (in pixels) of each square patch. + """ + def __init__(self, n_holes, length): + self.n_holes = n_holes + self.length = length + + def __call__(self, img): + """ + Args: + img (Tensor): Tensor image of size (C, H, W). + Returns: + Tensor: Image with n_holes of dimension length x length + cut out of it. + """ + h = img.size(1) + w = img.size(2) + + mask = np.ones((h, w), np.float32) + + for n in range(self.n_holes): + y = np.random.randint(h) + x = np.random.randint(w) + + y1 = np.clip(y - self.length // 2, 0, h) + y2 = np.clip(y + self.length // 2, 0, h) + x1 = np.clip(x - self.length // 2, 0, w) + x2 = np.clip(x + self.length // 2, 0, w) + + mask[y1:y2, x1:x2] = 0. + + mask = torch.from_numpy(mask) + mask = mask.expand_as(img) + img = img * mask + + return img diff --git a/OpenOOD/openood/preprocessors/cutpaste_preprocessor.py b/OpenOOD/openood/preprocessors/cutpaste_preprocessor.py new file mode 100644 index 0000000000000000000000000000000000000000..ced79a8c0680ed7e8704e0919c965173e8dee980 --- /dev/null +++ b/OpenOOD/openood/preprocessors/cutpaste_preprocessor.py @@ -0,0 +1,85 @@ +import math +import random + +import torch +import torchvision.transforms as tvs_trans + +from .base_preprocessor import BasePreprocessor +from .transform import Convert, normalization_dict + + +class CutPastePreprocessor(BasePreprocessor): + def __init__( + self, config, + split): # modify, preprocessors unify to only passing in "config" + self.args = config.preprocessor.preprocessor_args + self.area_ratio = self.args.area_ratio + self.aspect_ratio = self.args.aspect_ratio + + dataset_name = config.dataset.name.split('_')[0] + image_size = config.dataset.image_size + pre_size = config.dataset.pre_size + if dataset_name in normalization_dict.keys(): + mean = normalization_dict[dataset_name][0] + std = normalization_dict[dataset_name][1] + else: + mean = [0.485, 0.456, 0.406] + std = [0.229, 0.224, 0.225] + + self.before_preprocessor_transform = tvs_trans.Compose([ + Convert('RGB'), + tvs_trans.Resize( + pre_size, interpolation=tvs_trans.InterpolationMode.BILINEAR), + tvs_trans.CenterCrop(image_size), + tvs_trans.RandomHorizontalFlip(), + tvs_trans.RandomCrop(image_size, padding=4), + ]) + self.after_preprocessor_transform = tvs_trans.Compose([ + tvs_trans.ToTensor(), + tvs_trans.Normalize(mean=mean, std=std), + ]) + + def __call__(self, img): + img = self.before_preprocessor_transform(img) + + h = img.size[0] + w = img.size[1] + + # ratio between area_ratio[0] and area_ratio[1] + ratio_area = random.uniform(0.02, 0.15) * w * h + + # sample in log space + log_ratio = torch.log( + torch.tensor((self.aspect_ratio, 1 / self.aspect_ratio))) + aspect = torch.exp( + torch.empty(1).uniform_(log_ratio[0], log_ratio[1])).item() + + cut_w = int(round(math.sqrt(ratio_area * aspect))) + cut_h = int(round(math.sqrt(ratio_area / aspect))) + + from_location_h = int(random.uniform(0, h - cut_h)) + from_location_w = int(random.uniform(0, w - cut_w)) + + box = [ + from_location_w, from_location_h, from_location_w + cut_w, + from_location_h + cut_h + ] + patch = img.crop(box) + + # if self.colorJitter: + # patch = self.colorJitter(patch) + + to_location_h = int(random.uniform(0, h - cut_h)) + to_location_w = int(random.uniform(0, w - cut_w)) + + insert_box = [ + to_location_w, to_location_h, to_location_w + cut_w, + to_location_h + cut_h + ] + augmented = img.copy() + augmented.paste(patch, insert_box) + + img = self.after_preprocessor_transform(img) + augmented = self.after_preprocessor_transform(augmented) + + return img, augmented diff --git a/OpenOOD/openood/preprocessors/draem_preprocessor.py b/OpenOOD/openood/preprocessors/draem_preprocessor.py new file mode 100644 index 0000000000000000000000000000000000000000..967f0bca9b537ed252a9448cd1d3e0d6784d6b3e --- /dev/null +++ b/OpenOOD/openood/preprocessors/draem_preprocessor.py @@ -0,0 +1,313 @@ +import glob +import math +import os + +import cv2 +import imgaug.augmenters as iaa +import numpy as np +import torch + +from .base_preprocessor import BasePreprocessor + + +class DRAEMPreprocessor(BasePreprocessor): + def __init__(self, config): + self.config = config + self.args = self.config.preprocessor.preprocessor_args + + self.resize_shape = [self.args.image_size, self.args.image_size] + + self.anomaly_source_paths = sorted( + glob.glob(self.args.anomaly_source + '/*/*.jpg')) + + self.augmenters = [ + iaa.GammaContrast((0.5, 2.0), per_channel=True), + iaa.MultiplyAndAddToBrightness(mul=(0.8, 1.2), add=(-30, 30)), + iaa.pillike.EnhanceSharpness(), + iaa.AddToHueAndSaturation((-50, 50), per_channel=True), + iaa.Solarize(0.5, threshold=(32, 128)), + iaa.Posterize(), + iaa.Invert(), + iaa.pillike.Autocontrast(), + iaa.pillike.Equalize(), + iaa.Affine(rotate=(-45, 45)) + ] + + self.rot = iaa.Sequential([iaa.Affine(rotate=(-90, 90))]) + + # if config.evaluator.name == 'ood': + # assert config.use_gt == False + # if config.evaluator.name == 'draem': + # assert config.use_gt == True + + def transform_test_image(self, image_path, mask_path): + image = cv2.imread(image_path, cv2.IMREAD_COLOR) + if self.resize_shape is not None: + image = cv2.resize(image, + dsize=(self.resize_shape[1], + self.resize_shape[0])) + + image = image / 255.0 + + image = np.array(image).reshape( + (image.shape[0], image.shape[1], 3)).astype(np.float32) + + image = np.transpose(image, (2, 0, 1)) + mask = image + if self.config.use_gt: + if mask_path is not None: + mask = cv2.imread(mask_path, cv2.IMREAD_GRAYSCALE) + else: + mask = np.zeros((image.shape[0], image.shape[1])) + if self.resize_shape is not None: + mask = cv2.resize(mask, + dsize=(self.resize_shape[1], + self.resize_shape[0])) + mask = mask / 255.0 + mask = np.array(mask).reshape( + (mask.shape[0], mask.shape[1], 1)).astype(np.float32) + mask = np.transpose(mask, (2, 0, 1)) + + return image, mask + + def get_test_item(self, path): + sample = {} + dir_path, file_name = os.path.split(path) + base_dir = os.path.basename(dir_path) + if base_dir == 'good': + image, mask = self.transform_test_image(path, None) + else: + mask_path = os.path.join(dir_path, '../../ground_truth/') + mask_path = os.path.join(mask_path, base_dir) + mask_file_name = file_name.split('.')[0] + '_mask.png' + mask_path = os.path.join(mask_path, mask_file_name) + image, mask = self.transform_test_image(path, mask_path) + + if self.config.use_gt: + sample['image'] = image + sample['mask'] = mask + return sample + else: + return image + + def randAugmenter(self): + aug_ind = np.random.choice(np.arange(len(self.augmenters)), + 3, + replace=False) + aug = iaa.Sequential([ + self.augmenters[aug_ind[0]], self.augmenters[aug_ind[1]], + self.augmenters[aug_ind[2]] + ]) + return aug + + def augment_image(self, image, anomaly_source_path): + aug = self.randAugmenter() + perlin_scale = 6 + min_perlin_scale = 0 + anomaly_source_img = cv2.imread(anomaly_source_path) + + anomaly_source_img = cv2.resize(anomaly_source_img, + dsize=(self.resize_shape[1], + self.resize_shape[0])) + anomaly_img_augmented = aug(image=anomaly_source_img) + perlin_scalex = 2**(torch.randint(min_perlin_scale, perlin_scale, + (1, )).numpy()[0]) + perlin_scaley = 2**(torch.randint(min_perlin_scale, perlin_scale, + (1, )).numpy()[0]) + + perlin_noise = rand_perlin_2d_np( + (self.resize_shape[0], self.resize_shape[1]), + (perlin_scalex, perlin_scaley)) + perlin_noise = self.rot(image=perlin_noise) + threshold = 0.5 + perlin_thr = np.where(perlin_noise > threshold, + np.ones_like(perlin_noise), + np.zeros_like(perlin_noise)) + perlin_thr = np.expand_dims(perlin_thr, axis=2) + + img_thr = anomaly_img_augmented.astype(np.float32) * perlin_thr / 255.0 + + beta = torch.rand(1).numpy()[0] * 0.8 + + augmented_image = image * (1 - perlin_thr) + ( + 1 - beta) * img_thr + beta * image * (perlin_thr) + + no_anomaly = torch.rand(1).numpy()[0] + if no_anomaly > 0.5: + image = image.astype(np.float32) + return image, np.zeros_like( + perlin_thr, dtype=np.float32), np.array([0.0], + dtype=np.float32) + else: + augmented_image = augmented_image.astype(np.float32) + msk = (perlin_thr).astype(np.float32) + augmented_image = msk * augmented_image + (1 - msk) * image + has_anomaly = 1.0 + if np.sum(msk) == 0: + has_anomaly = 0.0 + return augmented_image, msk, np.array([has_anomaly], + dtype=np.float32) + + def transform_train_image(self, image_path, anomaly_source_path): + image = cv2.imread(image_path) + image = cv2.resize(image, + dsize=(self.resize_shape[1], self.resize_shape[0])) + + do_aug_orig = torch.rand(1).numpy()[0] > 0.7 + if do_aug_orig: + image = self.rot(image=image) + + image = np.array(image).reshape( + (image.shape[0], image.shape[1], image.shape[2])).astype( + np.float32) / 255.0 + augmented_image, anomaly_mask, has_anomaly = self.augment_image( + image, anomaly_source_path) + augmented_image = np.transpose(augmented_image, (2, 0, 1)) + image = np.transpose(image, (2, 0, 1)) + anomaly_mask = np.transpose(anomaly_mask, (2, 0, 1)) + return image, augmented_image, anomaly_mask, has_anomaly + + def get_train_item(self, path): + sample = {} + anomaly_source_idx = torch.randint(0, len(self.anomaly_source_paths), + (1, )).item() + image, augmented_image, anomaly_mask, has_anomaly = \ + self.transform_train_image( + path, self.anomaly_source_paths[anomaly_source_idx]) + sample['image'] = image + sample['anomaly_mask'] = anomaly_mask + sample['augmented_image'] = augmented_image + sample['has_anomaly'] = has_anomaly + + return sample + + def __call__(self, img): + if self.name.endswith('_train'): + sample = self.get_train_item(self.path) + else: + sample = self.get_test_item(self.path) + return sample + + # some setup so that the preprocessor can get the gt map + def setup(self, **kwargs): + self.path = kwargs['path'] + self.name = kwargs['name'] + + # append transforms that will apply after the preprocessor + def concat_transform(self, post_preprocessor_transform=None): + self.post_preprocessor_transform = post_preprocessor_transform + return self + + +def lerp_np(x, y, w): + fin_out = (y - x) * w + x + return fin_out + + +def generate_fractal_noise_2d(shape, res, octaves=1, persistence=0.5): + noise = np.zeros(shape) + frequency = 1 + amplitude = 1 + for _ in range(octaves): + noise += amplitude * generate_perlin_noise_2d( + shape, (frequency * res[0], frequency * res[1])) + frequency *= 2 + amplitude *= persistence + return noise + + +def generate_perlin_noise_2d(shape, res): + def f(t): + return 6 * t**5 - 15 * t**4 + 10 * t**3 + + delta = (res[0] / shape[0], res[1] / shape[1]) + d = (shape[0] // res[0], shape[1] // res[1]) + grid = np.mgrid[0:res[0]:delta[0], 0:res[1]:delta[1]].transpose(1, 2, + 0) % 1 + # Gradients + angles = 2 * np.pi * np.random.rand(res[0] + 1, res[1] + 1) + gradients = np.dstack((np.cos(angles), np.sin(angles))) + g00 = gradients[0:-1, 0:-1].repeat(d[0], 0).repeat(d[1], 1) + g10 = gradients[1:, 0:-1].repeat(d[0], 0).repeat(d[1], 1) + g01 = gradients[0:-1, 1:].repeat(d[0], 0).repeat(d[1], 1) + g11 = gradients[1:, 1:].repeat(d[0], 0).repeat(d[1], 1) + # Ramps + n00 = np.sum(grid * g00, 2) + n10 = np.sum(np.dstack((grid[:, :, 0] - 1, grid[:, :, 1])) * g10, 2) + n01 = np.sum(np.dstack((grid[:, :, 0], grid[:, :, 1] - 1)) * g01, 2) + n11 = np.sum(np.dstack((grid[:, :, 0] - 1, grid[:, :, 1] - 1)) * g11, 2) + # Interpolation + t = f(grid) + n0 = n00 * (1 - t[:, :, 0]) + t[:, :, 0] * n10 + n1 = n01 * (1 - t[:, :, 0]) + t[:, :, 0] * n11 + return np.sqrt(2) * ((1 - t[:, :, 1]) * n0 + t[:, :, 1] * n1) + + +def rand_perlin_2d_np(shape, + res, + fade=lambda t: 6 * t**5 - 15 * t**4 + 10 * t**3): + delta = (res[0] / shape[0], res[1] / shape[1]) + d = (shape[0] // res[0], shape[1] // res[1]) + grid = np.mgrid[0:res[0]:delta[0], 0:res[1]:delta[1]].transpose(1, 2, + 0) % 1 + + angles = 2 * math.pi * np.random.rand(res[0] + 1, res[1] + 1) + gradients = np.stack((np.cos(angles), np.sin(angles)), axis=-1) + + tile_grads = lambda slice1, slice2: np.repeat(np.repeat( + gradients[slice1[0]:slice1[1], slice2[0]:slice2[1]], d[0], axis=0), + d[1], + axis=1) + dot = lambda grad, shift: (np.stack( + (grid[:shape[0], :shape[1], 0] + shift[0], grid[:shape[0], :shape[1], 1 + ] + shift[1]), + axis=-1) * grad[:shape[0], :shape[1]]).sum(axis=-1) + + n00 = dot(tile_grads([0, -1], [0, -1]), [0, 0]) + n10 = dot(tile_grads([1, None], [0, -1]), [-1, 0]) + n01 = dot(tile_grads([0, -1], [1, None]), [0, -1]) + n11 = dot(tile_grads([1, None], [1, None]), [-1, -1]) + t = fade(grid[:shape[0], :shape[1]]) + return math.sqrt(2) * lerp_np(lerp_np(n00, n10, t[..., 0]), + lerp_np(n01, n11, t[..., 0]), t[..., 1]) + + +def rand_perlin_2d(shape, + res, + fade=lambda t: 6 * t**5 - 15 * t**4 + 10 * t**3): + delta = (res[0] / shape[0], res[1] / shape[1]) + d = (shape[0] // res[0], shape[1] // res[1]) + + grid = torch.stack(torch.meshgrid(torch.arange(0, res[0], delta[0]), + torch.arange(0, res[1], delta[1])), + dim=-1) % 1 + angles = 2 * math.pi * torch.rand(res[0] + 1, res[1] + 1) + gradients = torch.stack((torch.cos(angles), torch.sin(angles)), dim=-1) + + tile_grads = lambda slice1, slice2: gradients[slice1[0]:slice1[1], slice2[ + 0]:slice2[1]].repeat_interleave(d[0], 0).repeat_interleave(d[1], 1) + dot = lambda grad, shift: (torch.stack( + (grid[:shape[0], :shape[1], 0] + shift[0], grid[:shape[0], :shape[1], 1 + ] + shift[1]), + dim=-1) * grad[:shape[0], :shape[1]]).sum(dim=-1) + + n00 = dot(tile_grads([0, -1], [0, -1]), [0, 0]) + + n10 = dot(tile_grads([1, None], [0, -1]), [-1, 0]) + n01 = dot(tile_grads([0, -1], [1, None]), [0, -1]) + n11 = dot(tile_grads([1, None], [1, None]), [-1, -1]) + t = fade(grid[:shape[0], :shape[1]]) + return math.sqrt(2) * torch.lerp(torch.lerp( + n00, n10, t[..., 0]), torch.lerp(n01, n11, t[..., 0]), t[..., 1]) + + +def rand_perlin_2d_octaves(shape, res, octaves=1, persistence=0.5): + noise = torch.zeros(shape) + frequency = 1 + amplitude = 1 + for _ in range(octaves): + noise += amplitude * rand_perlin_2d( + shape, (frequency * res[0], frequency * res[1])) + frequency *= 2 + amplitude *= persistence + return noise diff --git a/OpenOOD/openood/preprocessors/pixmix_preprocessor.py b/OpenOOD/openood/preprocessors/pixmix_preprocessor.py new file mode 100644 index 0000000000000000000000000000000000000000..6a370210ca78a1b24467464052d40092f1412d69 --- /dev/null +++ b/OpenOOD/openood/preprocessors/pixmix_preprocessor.py @@ -0,0 +1,313 @@ +import os + +import numpy as np +import torch +import torchvision.transforms as tvs_trans +from PIL import Image as Image +from PIL import ImageEnhance, ImageOps + +from .base_preprocessor import BasePreprocessor +from .transform import Convert, interpolation_modes, normalization_dict + +resize_list = { + 'osr': 32, + 'mnist': 32, + 'cifar10': 36, + 'cifar100': 36, + 'tin': 72, + 'imagenet': 256, + 'imagenet200': 256, + 'aircraft': 512, + 'cub': 512, +} # set mnist bymyself, imagenet was set to 224 by author, but 256 here + + +class PixMixPreprocessor(BasePreprocessor): + def __init__(self, config): + self.pre_size = config.dataset.pre_size + self.dataset_name = config.dataset.name.split('_')[0] + self.image_size = config.dataset.image_size + self.interpolation = interpolation_modes[config.dataset.interpolation] + normalization_type = config.dataset.normalization_type + if normalization_type in normalization_dict.keys(): + self.mean = normalization_dict[normalization_type][0] + self.std = normalization_dict[normalization_type][1] + else: + self.mean = [0.5, 0.5, 0.5] + self.std = [0.5, 0.5, 0.5] + self.normalize = tvs_trans.Normalize(self.mean, self.std) + self.tensorize = tvs_trans.ToTensor() + + self.args = config.preprocessor.preprocessor_args + + if 'imagenet' in config.dataset.name: + self.transform = tvs_trans.Compose([ + tvs_trans.RandomResizedCrop(self.image_size, + interpolation=self.interpolation), + tvs_trans.RandomHorizontalFlip(0.5), + ]) + elif 'aircraft' in config.dataset.name or 'cub' in config.dataset.name: + self.transform = tvs_trans.Compose([ + tvs_trans.Resize(self.pre_size, + interpolation=self.interpolation), + tvs_trans.RandomCrop(self.image_size), + tvs_trans.RandomHorizontalFlip(), + ]) + else: + self.transform = tvs_trans.Compose([ + Convert('RGB'), + tvs_trans.Resize(self.pre_size, + interpolation=self.interpolation), + tvs_trans.CenterCrop(self.image_size), + tvs_trans.RandomHorizontalFlip(), + tvs_trans.RandomCrop(self.image_size, padding=4), + ]) + + self.mixing_set_transform = tvs_trans.Compose([ + tvs_trans.Resize(resize_list[self.dataset_name]), + tvs_trans.RandomCrop(self.image_size) + ]) + + with open(self.args.mixing_set_dir, 'r') as f: + self.mixing_list = f.readlines() + + def __call__(self, image): + # ? need to add random seed ? + rnd_idx = np.random.choice(len(self.mixing_list)) + mixing_pic_dir = self.mixing_list[rnd_idx].strip('\n') + + mixing_pic = Image.open( + os.path.join('./data/images_classic/', + mixing_pic_dir)).convert('RGB') + return self.pixmix(image, mixing_pic) + + def augment_input(self, image): + aug_list = augmentations_all if self.args.all_ops else augmentations + op = np.random.choice(aug_list) + return op(image.copy(), self.args.aug_severity, self.image_size) + + def pixmix(self, orig, mixing_pic): + mixings = [add, multiply] + orig = self.transform(orig) + + # do basic augmentation first + mixing_pic = self.mixing_set_transform(mixing_pic) + + if np.random.random() < 0.5: + mixed = self.tensorize(self.augment_input(orig)) + else: + mixed = self.tensorize(orig) + + for _ in range(np.random.randint(self.args.k + 1)): + + if np.random.random() < 0.5: + aug_image_copy = self.tensorize(self.augment_input(orig)) + else: + aug_image_copy = self.tensorize(mixing_pic) + + mixed_op = np.random.choice(mixings) + + mixed = mixed_op(mixed, aug_image_copy, self.args.beta) + mixed = torch.clip(mixed, 0, 1) + + return self.normalize(mixed) + + +"""Base augmentations operators.""" + +######################################################### +#################### AUGMENTATIONS ###################### +######################################################### + + +def int_parameter(level, maxval): + """Helper function to scale `val` between 0 and maxval . + + Args: + level: Level of the operation that will be between [0, `PARAMETER_MAX`]. + maxval: Maximum value that the operation can have. This will be scaled to + level/PARAMETER_MAX. + Returns: + An int that results from scaling `maxval` according to `level`. + """ + return int(level * maxval / 10) + + +def float_parameter(level, maxval): + """Helper function to scale `val` between 0 and maxval. + + Args: + level: Level of the operation that will be between [0, `PARAMETER_MAX`]. + maxval: Maximum value that the operation can have. This will be scaled to + level/PARAMETER_MAX. + Returns: + A float that results from scaling `maxval` according to `level`. + """ + return float(level) * maxval / 10. + + +def sample_level(n): + return np.random.uniform(low=0.1, high=n) + + +def autocontrast(pil_img, _, IMAGE_SIZE): + return ImageOps.autocontrast(pil_img) + + +def equalize(pil_img, _, IMAGE_SIZE): + return ImageOps.equalize(pil_img) + + +def posterize(pil_img, level, IMAGE_SIZE): + level = int_parameter(sample_level(level), 4) + return ImageOps.posterize(pil_img, 4 - level) + + +def rotate(pil_img, level, IMAGE_SIZE): + degrees = int_parameter(sample_level(level), 30) + if np.random.uniform() > 0.5: + degrees = -degrees + return pil_img.rotate(degrees, resample=Image.BILINEAR) + + +def solarize(pil_img, level, IMAGE_SIZE): + level = int_parameter(sample_level(level), 256) + return ImageOps.solarize(pil_img, 256 - level) + + +def shear_x(pil_img, level, IMAGE_SIZE): + level = float_parameter(sample_level(level), 0.3) + if np.random.uniform() > 0.5: + level = -level + return pil_img.transform((IMAGE_SIZE, IMAGE_SIZE), + Image.AFFINE, (1, level, 0, 0, 1, 0), + resample=Image.BILINEAR) + + +def shear_y(pil_img, level, IMAGE_SIZE): + level = float_parameter(sample_level(level), 0.3) + if np.random.uniform() > 0.5: + level = -level + return pil_img.transform((IMAGE_SIZE, IMAGE_SIZE), + Image.AFFINE, (1, 0, 0, level, 1, 0), + resample=Image.BILINEAR) + + +def translate_x(pil_img, level, IMAGE_SIZE): + level = int_parameter(sample_level(level), IMAGE_SIZE / 3) + if np.random.random() > 0.5: + level = -level + return pil_img.transform((IMAGE_SIZE, IMAGE_SIZE), + Image.AFFINE, (1, 0, level, 0, 1, 0), + resample=Image.BILINEAR) + + +def translate_y(pil_img, level, IMAGE_SIZE): + level = int_parameter(sample_level(level), IMAGE_SIZE / 3) + if np.random.random() > 0.5: + level = -level + return pil_img.transform((IMAGE_SIZE, IMAGE_SIZE), + Image.AFFINE, (1, 0, 0, 0, 1, level), + resample=Image.BILINEAR) + + +# operation that overlaps with ImageNet-C's test set +def color(pil_img, level, IMAGE_SIZE): + level = float_parameter(sample_level(level), 1.8) + 0.1 + return ImageEnhance.Color(pil_img).enhance(level) + + +# operation that overlaps with ImageNet-C's test set +def contrast(pil_img, level, IMAGE_SIZE): + level = float_parameter(sample_level(level), 1.8) + 0.1 + return ImageEnhance.Contrast(pil_img).enhance(level) + + +# operation that overlaps with ImageNet-C's test set +def brightness(pil_img, level, IMAGE_SIZE): + level = float_parameter(sample_level(level), 1.8) + 0.1 + return ImageEnhance.Brightness(pil_img).enhance(level) + + +# operation that overlaps with ImageNet-C's test set +def sharpness(pil_img, level, IMAGE_SIZE): + level = float_parameter(sample_level(level), 1.8) + 0.1 + return ImageEnhance.Sharpness(pil_img).enhance(level) + + +augmentations = [ + autocontrast, equalize, posterize, rotate, solarize, shear_x, shear_y, + translate_x, translate_y +] + +augmentations_all = [ + autocontrast, equalize, posterize, rotate, solarize, shear_x, shear_y, + translate_x, translate_y, color, contrast, brightness, sharpness +] + +######################################################### +######################## MIXINGS ######################## +######################################################### + + +def get_ab(beta): + if np.random.random() < 0.5: + a = np.float32(np.random.beta(beta, 1)) + b = np.float32(np.random.beta(1, beta)) + else: + a = 1 + np.float32(np.random.beta(1, beta)) + b = -np.float32(np.random.beta(1, beta)) + return a, b + + +def add(img1, img2, beta): + a, b = get_ab(beta) + img1, img2 = img1 * 2 - 1, img2 * 2 - 1 + out = a * img1 + b * img2 + return (out + 1) / 2 + + +def multiply(img1, img2, beta): + a, b = get_ab(beta) + img1, img2 = img1 * 2, img2 * 2 + out = (img1**a) * (img2.clip(1e-37)**b) + return out / 2 + + +######################################## +##### EXTRA MIXIMGS (EXPREIMENTAL) ##### +######################################## + + +def invert(img): + return 1 - img + + +def screen(img1, img2, beta): + img1, img2 = invert(img1), invert(img2) + out = multiply(img1, img2, beta) + return invert(out) + + +def overlay(img1, img2, beta): + case1 = multiply(img1, img2, beta) + case2 = screen(img1, img2, beta) + if np.random.random() < 0.5: + cond = img1 < 0.5 + else: + cond = img1 > 0.5 + return torch.where(cond, case1, case2) + + +def darken_or_lighten(img1, img2, beta): + if np.random.random() < 0.5: + cond = img1 < img2 + else: + cond = img1 > img2 + return torch.where(cond, img1, img2) + + +def swap_channel(img1, img2, beta): + channel = np.random.randint(3) + img1[channel] = img2[channel] + return img1 diff --git a/OpenOOD/openood/preprocessors/randaugment_preprocessor.py b/OpenOOD/openood/preprocessors/randaugment_preprocessor.py new file mode 100644 index 0000000000000000000000000000000000000000..ab6e391ba227a3d712d375721f1771311eb32097 --- /dev/null +++ b/OpenOOD/openood/preprocessors/randaugment_preprocessor.py @@ -0,0 +1,66 @@ +import torchvision.transforms as tvs_trans + +from openood.utils.config import Config + +from .transform import Convert, interpolation_modes, normalization_dict + + +class RandAugmentPreprocessor(): + def __init__(self, config: Config): + self.pre_size = config.dataset.pre_size + self.image_size = config.dataset.image_size + self.interpolation = interpolation_modes[config.dataset.interpolation] + normalization_type = config.dataset.normalization_type + if normalization_type in normalization_dict.keys(): + self.mean = normalization_dict[normalization_type][0] + self.std = normalization_dict[normalization_type][1] + else: + self.mean = [0.5, 0.5, 0.5] + self.std = [0.5, 0.5, 0.5] + + self.n = config.preprocessor.n + self.m = config.preprocessor.m + + if 'imagenet' in config.dataset.name: + self.transform = tvs_trans.Compose([ + tvs_trans.RandomResizedCrop(self.image_size, + interpolation=self.interpolation), + tvs_trans.RandomHorizontalFlip(0.5), + tvs_trans.RandAugment(num_ops=self.n, + magnitude=self.m, + interpolation=self.interpolation), + tvs_trans.ToTensor(), + tvs_trans.Normalize(mean=self.mean, std=self.std), + ]) + elif 'aircraft' in config.dataset.name or 'cub' in config.dataset.name: + self.transform = tvs_trans.Compose([ + tvs_trans.Resize(self.pre_size, + interpolation=self.interpolation), + tvs_trans.RandomCrop(self.image_size), + tvs_trans.RandomHorizontalFlip(), + tvs_trans.RandAugment(num_ops=self.n, + magnitude=self.m, + interpolation=self.interpolation), + tvs_trans.ToTensor(), + tvs_trans.Normalize(mean=self.mean, std=self.std), + ]) + else: + self.transform = tvs_trans.Compose([ + Convert('RGB'), + tvs_trans.RandAugment(num_ops=self.n, + magnitude=self.m, + interpolation=self.interpolation), + tvs_trans.Resize(self.pre_size, + interpolation=self.interpolation), + tvs_trans.CenterCrop(self.image_size), + tvs_trans.RandomHorizontalFlip(), + tvs_trans.RandomCrop(self.image_size, padding=4), + tvs_trans.ToTensor(), + tvs_trans.Normalize(mean=self.mean, std=self.std), + ]) + + def setup(self, **kwargs): + pass + + def __call__(self, image): + return self.transform(image) diff --git a/OpenOOD/openood/preprocessors/test_preprocessor.py b/OpenOOD/openood/preprocessors/test_preprocessor.py new file mode 100644 index 0000000000000000000000000000000000000000..dd9252482292b135916eb6f0b5cf31d8e40aadfd --- /dev/null +++ b/OpenOOD/openood/preprocessors/test_preprocessor.py @@ -0,0 +1,27 @@ +import torchvision.transforms as tvs_trans + +from openood.utils.config import Config + +from .base_preprocessor import BasePreprocessor +from .transform import Convert + + +class TestStandardPreProcessor(BasePreprocessor): + """For test and validation dataset standard image transformation.""" + def __init__(self, config: Config): + super(TestStandardPreProcessor, self).__init__(config) + + # self.transform = tvs_trans.Compose([ + # Convert('RGB'), + # tvs_trans.Resize(self.pre_size, interpolation=self.interpolation), + # tvs_trans.CenterCrop(self.image_size), + # tvs_trans.ToTensor(), + # tvs_trans.Normalize(mean=self.mean, std=self.std), + # ]) + self.transform = tvs_trans.Compose([ + Convert('RGB'), + tvs_trans.Resize((self.pre_size, self.pre_size)), + # tvs_trans.CenterCrop(self.image_size), + tvs_trans.ToTensor(), + tvs_trans.Normalize(mean=self.mean, std=self.std), + ]) diff --git a/OpenOOD/openood/preprocessors/transform.py b/OpenOOD/openood/preprocessors/transform.py new file mode 100644 index 0000000000000000000000000000000000000000..0eba6e2dbadd28a57e434c07d30fb8f83c0e4a98 --- /dev/null +++ b/OpenOOD/openood/preprocessors/transform.py @@ -0,0 +1,28 @@ +import torchvision.transforms as tvs_trans + +normalization_dict = { + 'cifar10': [[0.4914, 0.4822, 0.4465], [0.2470, 0.2435, 0.2616]], + 'cifar100': [[0.5071, 0.4867, 0.4408], [0.2675, 0.2565, 0.2761]], + 'imagenet': [[0.485, 0.456, 0.406], [0.229, 0.224, 0.225]], + 'imagenet200': [[0.485, 0.456, 0.406], [0.229, 0.224, 0.225]], + 'covid': [[0.4907, 0.4907, 0.4907], [0.2697, 0.2697, 0.2697]], + 'aircraft': [[0.5, 0.5, 0.5], [0.5, 0.5, 0.5]], + 'cub': [[0.5, 0.5, 0.5], [0.5, 0.5, 0.5]], + 'cars': [[0.5, 0.5, 0.5], [0.5, 0.5, 0.5]], +} + +interpolation_modes = { + 'nearest': tvs_trans.InterpolationMode.NEAREST, + 'bilinear': tvs_trans.InterpolationMode.BILINEAR, +} + + +class Convert: + def __init__(self, mode='RGB'): + self.mode = mode + + def __call__(self, image): + return image.convert(self.mode) + + +# More transform classes shall be written here diff --git a/OpenOOD/openood/preprocessors/utils.py b/OpenOOD/openood/preprocessors/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..1bbd0633df244a0d0700eda3a62239db764e541e --- /dev/null +++ b/OpenOOD/openood/preprocessors/utils.py @@ -0,0 +1,39 @@ +from openood.utils import Config + +from .base_preprocessor import BasePreprocessor +from .cider_preprocessor import CiderPreprocessor +from .csi_preprocessor import CSIPreprocessor +from .cutpaste_preprocessor import CutPastePreprocessor +from .draem_preprocessor import DRAEMPreprocessor +from .augmix_preprocessor import AugMixPreprocessor +from .pixmix_preprocessor import PixMixPreprocessor +from .randaugment_preprocessor import RandAugmentPreprocessor +from .cutout_preprocessor import CutoutPreprocessor +from .test_preprocessor import TestStandardPreProcessor + + +def get_preprocessor(config: Config, split): + train_preprocessors = { + 'base': BasePreprocessor, + 'draem': DRAEMPreprocessor, + 'cutpaste': CutPastePreprocessor, + 'augmix': AugMixPreprocessor, + 'pixmix': PixMixPreprocessor, + 'randaugment': RandAugmentPreprocessor, + 'cutout': CutoutPreprocessor, + 'csi': CSIPreprocessor, + 'cider': CiderPreprocessor, + } + test_preprocessors = { + 'base': TestStandardPreProcessor, + 'draem': DRAEMPreprocessor, + 'cutpaste': CutPastePreprocessor, + } + + if split == 'train': + return train_preprocessors[config.preprocessor.name](config) + else: + try: + return test_preprocessors[config.preprocessor.name](config) + except KeyError: + return test_preprocessors['base'](config) diff --git a/OpenOOD/openood/recorders/__init__.py b/OpenOOD/openood/recorders/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..dcc99f09bbdb58d22c6e15ca66579a411319072f --- /dev/null +++ b/OpenOOD/openood/recorders/__init__.py @@ -0,0 +1 @@ +from .utils import get_recorder diff --git a/OpenOOD/openood/recorders/__pycache__/__init__.cpython-311.pyc b/OpenOOD/openood/recorders/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1809d726d4b6fc0f3a46db7e64c98fe6c80868e7 Binary files /dev/null and b/OpenOOD/openood/recorders/__pycache__/__init__.cpython-311.pyc differ diff --git a/OpenOOD/openood/recorders/__pycache__/__init__.cpython-37.pyc b/OpenOOD/openood/recorders/__pycache__/__init__.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2c7e1217dd96817ca141dacaead42b6374840874 Binary files /dev/null and b/OpenOOD/openood/recorders/__pycache__/__init__.cpython-37.pyc differ diff --git a/OpenOOD/openood/recorders/__pycache__/ad_recorder.cpython-311.pyc b/OpenOOD/openood/recorders/__pycache__/ad_recorder.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2a598d0ead57696d5c68765914504da0e9cb7fc8 Binary files /dev/null and b/OpenOOD/openood/recorders/__pycache__/ad_recorder.cpython-311.pyc differ diff --git a/OpenOOD/openood/recorders/__pycache__/ad_recorder.cpython-37.pyc b/OpenOOD/openood/recorders/__pycache__/ad_recorder.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2004ec947681c83e58ae4aebf53c7caaaea6f1d9 Binary files /dev/null and b/OpenOOD/openood/recorders/__pycache__/ad_recorder.cpython-37.pyc differ diff --git a/OpenOOD/openood/recorders/__pycache__/arpl_recorder.cpython-311.pyc b/OpenOOD/openood/recorders/__pycache__/arpl_recorder.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..00bdf56c735e1afef3ed0138b748690025eee60c Binary files /dev/null and b/OpenOOD/openood/recorders/__pycache__/arpl_recorder.cpython-311.pyc differ diff --git a/OpenOOD/openood/recorders/__pycache__/arpl_recorder.cpython-37.pyc b/OpenOOD/openood/recorders/__pycache__/arpl_recorder.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8dad5f143455467cf3197f57fcd8e00a1e3c7f39 Binary files /dev/null and b/OpenOOD/openood/recorders/__pycache__/arpl_recorder.cpython-37.pyc differ diff --git a/OpenOOD/openood/recorders/__pycache__/base_recorder.cpython-311.pyc b/OpenOOD/openood/recorders/__pycache__/base_recorder.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..455ecbd54b4cf87fc03ce80505b2ed75682a17f9 Binary files /dev/null and b/OpenOOD/openood/recorders/__pycache__/base_recorder.cpython-311.pyc differ diff --git a/OpenOOD/openood/recorders/__pycache__/base_recorder.cpython-37.pyc b/OpenOOD/openood/recorders/__pycache__/base_recorder.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c52f653e193774a6ce6e66507c194e4d7dcc7e09 Binary files /dev/null and b/OpenOOD/openood/recorders/__pycache__/base_recorder.cpython-37.pyc differ diff --git a/OpenOOD/openood/recorders/__pycache__/cider_recorder.cpython-311.pyc b/OpenOOD/openood/recorders/__pycache__/cider_recorder.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3c6a37efb5c30fc8a9774d252f3ba98ee8bd58a1 Binary files /dev/null and b/OpenOOD/openood/recorders/__pycache__/cider_recorder.cpython-311.pyc differ diff --git a/OpenOOD/openood/recorders/__pycache__/cider_recorder.cpython-37.pyc b/OpenOOD/openood/recorders/__pycache__/cider_recorder.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..73e881b7b289edd6e20060554b82280d02a44bfc Binary files /dev/null and b/OpenOOD/openood/recorders/__pycache__/cider_recorder.cpython-37.pyc differ diff --git a/OpenOOD/openood/recorders/__pycache__/cutpaste_recorder.cpython-311.pyc b/OpenOOD/openood/recorders/__pycache__/cutpaste_recorder.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4957d0ca25c858292aeb522c3b217ef8663e62b0 Binary files /dev/null and b/OpenOOD/openood/recorders/__pycache__/cutpaste_recorder.cpython-311.pyc differ diff --git a/OpenOOD/openood/recorders/__pycache__/cutpaste_recorder.cpython-37.pyc b/OpenOOD/openood/recorders/__pycache__/cutpaste_recorder.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..adbc78e9784342dc32e612dfa6dc2ba202393c71 Binary files /dev/null and b/OpenOOD/openood/recorders/__pycache__/cutpaste_recorder.cpython-37.pyc differ diff --git a/OpenOOD/openood/recorders/__pycache__/draem_recorder.cpython-311.pyc b/OpenOOD/openood/recorders/__pycache__/draem_recorder.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3de1527dab1a2aa1f5ef73555ff94f29b12c08aa Binary files /dev/null and b/OpenOOD/openood/recorders/__pycache__/draem_recorder.cpython-311.pyc differ diff --git a/OpenOOD/openood/recorders/__pycache__/draem_recorder.cpython-37.pyc b/OpenOOD/openood/recorders/__pycache__/draem_recorder.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a24dc2ebaaaf937b74f0a7232ce3b823131bc51e Binary files /dev/null and b/OpenOOD/openood/recorders/__pycache__/draem_recorder.cpython-37.pyc differ diff --git a/OpenOOD/openood/recorders/__pycache__/dsvdd_recorder.cpython-311.pyc b/OpenOOD/openood/recorders/__pycache__/dsvdd_recorder.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0fa6053668c16852122ab23b72797d61e6e71120 Binary files /dev/null and b/OpenOOD/openood/recorders/__pycache__/dsvdd_recorder.cpython-311.pyc differ diff --git a/OpenOOD/openood/recorders/__pycache__/dsvdd_recorder.cpython-37.pyc b/OpenOOD/openood/recorders/__pycache__/dsvdd_recorder.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4966307568ecfa984118d3c85bcadd2873ddbab6 Binary files /dev/null and b/OpenOOD/openood/recorders/__pycache__/dsvdd_recorder.cpython-37.pyc differ diff --git a/OpenOOD/openood/recorders/__pycache__/kdad_recorder.cpython-311.pyc b/OpenOOD/openood/recorders/__pycache__/kdad_recorder.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5fbcf62ca89d7ffb054a81e1198499acf3485b95 Binary files /dev/null and b/OpenOOD/openood/recorders/__pycache__/kdad_recorder.cpython-311.pyc differ diff --git a/OpenOOD/openood/recorders/__pycache__/kdad_recorder.cpython-37.pyc b/OpenOOD/openood/recorders/__pycache__/kdad_recorder.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3b82c20b5163f136cf529f1243c056dd0d80391b Binary files /dev/null and b/OpenOOD/openood/recorders/__pycache__/kdad_recorder.cpython-37.pyc differ diff --git a/OpenOOD/openood/recorders/__pycache__/opengan_recorder.cpython-311.pyc b/OpenOOD/openood/recorders/__pycache__/opengan_recorder.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0c8630a360b3ba22f60add3fb8b1b2f640930586 Binary files /dev/null and b/OpenOOD/openood/recorders/__pycache__/opengan_recorder.cpython-311.pyc differ diff --git a/OpenOOD/openood/recorders/__pycache__/opengan_recorder.cpython-37.pyc b/OpenOOD/openood/recorders/__pycache__/opengan_recorder.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..72d872779413b6dba878cd685376ba37275c0cbb Binary files /dev/null and b/OpenOOD/openood/recorders/__pycache__/opengan_recorder.cpython-37.pyc differ diff --git a/OpenOOD/openood/recorders/__pycache__/rd4ad_recorder.cpython-311.pyc b/OpenOOD/openood/recorders/__pycache__/rd4ad_recorder.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..df5ad50b476afe1f9bf1ceacad1eda4bdda966e7 Binary files /dev/null and b/OpenOOD/openood/recorders/__pycache__/rd4ad_recorder.cpython-311.pyc differ diff --git a/OpenOOD/openood/recorders/__pycache__/rd4ad_recorder.cpython-37.pyc b/OpenOOD/openood/recorders/__pycache__/rd4ad_recorder.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0ba249376a401def297433438327f64a1ff91835 Binary files /dev/null and b/OpenOOD/openood/recorders/__pycache__/rd4ad_recorder.cpython-37.pyc differ diff --git a/OpenOOD/openood/recorders/__pycache__/utils.cpython-311.pyc b/OpenOOD/openood/recorders/__pycache__/utils.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3644e876b4f7d4637d86696c8c8f149ef307c142 Binary files /dev/null and b/OpenOOD/openood/recorders/__pycache__/utils.cpython-311.pyc differ diff --git a/OpenOOD/openood/recorders/__pycache__/utils.cpython-37.pyc b/OpenOOD/openood/recorders/__pycache__/utils.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d5a021b964efa0455021fb14fc1b6424d3014dff Binary files /dev/null and b/OpenOOD/openood/recorders/__pycache__/utils.cpython-37.pyc differ diff --git a/OpenOOD/openood/recorders/ad_recorder.py b/OpenOOD/openood/recorders/ad_recorder.py new file mode 100644 index 0000000000000000000000000000000000000000..73894c1dbc9fbd011a55d7f0dbf7cd6dd43c633b --- /dev/null +++ b/OpenOOD/openood/recorders/ad_recorder.py @@ -0,0 +1,59 @@ +import os +import time +from pathlib import Path + +import torch + +from .base_recorder import BaseRecorder + + +class ADRecorder(BaseRecorder): + def __init__(self, config) -> None: + super(ADRecorder, self).__init__(config) + + self.best_epoch_idx = 0 + self.best_result = 0 + + self.begin_time = time.time() + + def report(self, train_metrics, test_metrics): + print('Epoch {:03d} | Time {:5d}s | Train Loss {:.4f} | ' + 'Auroc {:.4f}\n'.format(train_metrics['epoch_idx'], + int(time.time() - self.begin_time), + train_metrics['loss'], + 100.0 * test_metrics['image_auroc']), + flush=True) + + def save_model(self, net, test_metrics): + if self.config.recorder.save_all_models: + torch.save( + net.state_dict(), + os.path.join( + self.output_dir, + 'model_epoch{}.ckpt'.format(test_metrics['epoch_idx']))) + + # enter only if lower loss occurs + if test_metrics['image_auroc'] >= self.best_result: + + # delete the depreciated best model + old_fname = 'best_epoch{}_auroc{:.4f}.ckpt'.format( + self.best_epoch_idx, self.best_result) + old_pth = os.path.join(self.output_dir, old_fname) + Path(old_pth).unlink(missing_ok=True) + + # update the best model + self.best_epoch_idx = test_metrics['epoch_idx'] + self.best_result = test_metrics['image_auroc'] + torch.save(net.state_dict(), + os.path.join(self.output_dir, 'best.ckpt')) + + save_fname = 'best_epoch{}_auroc{:.4f}.ckpt'.format( + self.best_epoch_idx, self.best_result) + save_pth = os.path.join(self.output_dir, save_fname) + torch.save(net.state_dict(), save_pth) + + def summary(self): + print('Training Completed!\n ' + 'Best Auroc: {:.4f} at epoch {:d}\n'.format( + 100.0 * self.best_result, self.best_epoch_idx), + flush=True) diff --git a/OpenOOD/openood/recorders/arpl_recorder.py b/OpenOOD/openood/recorders/arpl_recorder.py new file mode 100644 index 0000000000000000000000000000000000000000..5e7a4291cc8d527fe5c269dc9d2e67f2b4cc7a97 --- /dev/null +++ b/OpenOOD/openood/recorders/arpl_recorder.py @@ -0,0 +1,65 @@ +import copy +import os +import time + +import torch + +from .base_recorder import BaseRecorder + + +class ARPLRecorder(BaseRecorder): + def __init__(self, config) -> None: + super().__init__(config) + + def report(self, train_metrics, val_metrics): + if 'lossD' in train_metrics.keys(): + print('\nEpoch {:03d} | Time {:5d}s | D Loss {:.4f} | ' + 'G Loss {:.4f} | Train Loss {:.4f} | ' + 'Val Loss {:.3f} | Val Acc {:.2f}'.format( + (train_metrics['epoch_idx']), + int(time.time() - self.begin_time), + train_metrics['lossD'], train_metrics['lossG'], + train_metrics['loss'], val_metrics['loss'], + 100.0 * val_metrics['acc']), + flush=True) + else: + print('\nEpoch {:03d} | Time {:5d}s | Train Loss {:.4f} | ' + 'Val Loss {:.3f} | Val Acc {:.2f}'.format( + (train_metrics['epoch_idx']), + int(time.time() - self.begin_time), + train_metrics['loss'], val_metrics['loss'], + 100.0 * val_metrics['acc']), + flush=True) + + def save_model(self, net, val_metrics): + + netF = net['netF'] + criterion = net['criterion'] + epoch_idx = val_metrics['epoch_idx'] + + try: + netF_wts = copy.deepcopy(netF.module.state_dict()) + criterion_wts = copy.deepcopy(criterion.module.state_dict()) + except AttributeError: + netF_wts = copy.deepcopy(netF.state_dict()) + criterion_wts = copy.deepcopy(criterion.state_dict()) + + if self.config.recorder.save_all_models: + save_pth = os.path.join(self.save_dir, + 'epoch-{}_NetF.ckpt'.format(epoch_idx)) + torch.save(netF_wts, save_pth) + save_pth = os.path.join( + self.save_dir, 'epoch-{}_criterion.ckpt'.format(epoch_idx)) + torch.save(criterion_wts, save_pth) + + # enter only if better accuracy occurs + if val_metrics['acc'] >= self.best_acc: + + # update the best model + self.best_epoch_idx = val_metrics['epoch_idx'] + self.best_acc = val_metrics['acc'] + + torch.save(netF_wts, os.path.join(self.output_dir, + 'best_NetF.ckpt')) + torch.save(criterion_wts, + os.path.join(self.output_dir, 'best_criterion.ckpt')) diff --git a/OpenOOD/openood/recorders/base_recorder.py b/OpenOOD/openood/recorders/base_recorder.py new file mode 100644 index 0000000000000000000000000000000000000000..d381b14422a8d0dc3a349eba20996bd564af9fcd --- /dev/null +++ b/OpenOOD/openood/recorders/base_recorder.py @@ -0,0 +1,68 @@ +import os +import time +from pathlib import Path + +import torch + + +class BaseRecorder: + def __init__(self, config) -> None: + self.config = config + + self.best_acc = 0.0 + self.best_epoch_idx = 0 + + self.begin_time = time.time() + self.output_dir = config.output_dir + + def report(self, train_metrics, val_metrics): + print('\nEpoch {:03d} | Time {:5d}s | Train Loss {:.4f} | ' + 'Val Loss {:.3f} | Val Acc {:.2f}'.format( + (train_metrics['epoch_idx']), + int(time.time() - self.begin_time), train_metrics['loss'], + val_metrics['loss'], 100.0 * val_metrics['acc']), + flush=True) + + def save_model(self, net, val_metrics): + try: + state_dict = net.module.state_dict() + except AttributeError: + state_dict = net.state_dict() + + if self.config.recorder.save_all_models: + torch.save( + state_dict, + os.path.join( + self.output_dir, + 'model_epoch{}.ckpt'.format(val_metrics['epoch_idx']))) + + # enter only if better accuracy occurs + if val_metrics['acc'] >= self.best_acc: + # delete the depreciated best model + old_fname = 'best_epoch{}_acc{:.4f}.ckpt'.format( + self.best_epoch_idx, self.best_acc) + old_pth = os.path.join(self.output_dir, old_fname) + Path(old_pth).unlink(missing_ok=True) + + # update the best model + self.best_epoch_idx = val_metrics['epoch_idx'] + self.best_acc = val_metrics['acc'] + torch.save(state_dict, os.path.join(self.output_dir, 'best.ckpt')) + + save_fname = 'best_epoch{}_acc{:.4f}.ckpt'.format( + self.best_epoch_idx, self.best_acc) + save_pth = os.path.join(self.output_dir, save_fname) + torch.save(state_dict, save_pth) + + # save last path + if val_metrics['epoch_idx'] == self.config.optimizer.num_epochs: + save_fname = 'last_epoch{}_acc{:.4f}.ckpt'.format( + val_metrics['epoch_idx'], val_metrics['acc']) + save_pth = os.path.join(self.output_dir, save_fname) + torch.save(state_dict, save_pth) + + def summary(self): + print('Training Completed! ' + 'Best accuracy: {:.2f} ' + 'at epoch {:d}'.format(100 * self.best_acc, self.best_epoch_idx), + flush=True) diff --git a/OpenOOD/openood/recorders/cider_recorder.py b/OpenOOD/openood/recorders/cider_recorder.py new file mode 100644 index 0000000000000000000000000000000000000000..aca350afcf00cb01af614cbfcf19193279ba1c01 --- /dev/null +++ b/OpenOOD/openood/recorders/cider_recorder.py @@ -0,0 +1,66 @@ +import os +import time +from pathlib import Path + +import torch + + +class CiderRecorder: + def __init__(self, config) -> None: + self.config = config + + self.best_loss = float('inf') + self.best_epoch_idx = 0 + + self.begin_time = time.time() + self.output_dir = config.output_dir + + def report(self, train_metrics, val_metrics): + print('\nEpoch {:03d} | Time {:5d}s | Train Loss {:.4f}'.format( + (train_metrics['epoch_idx']), int(time.time() - self.begin_time), + train_metrics['loss']), + flush=True) + + def save_model(self, net, train_metrics): + try: + state_dict = net.module.state_dict() + except AttributeError: + state_dict = net.state_dict() + + if self.config.recorder.save_all_models: + torch.save( + state_dict, + os.path.join( + self.output_dir, + 'model_epoch{}.ckpt'.format(train_metrics['epoch_idx']))) + + # enter only if better accuracy occurs + if train_metrics['loss'] <= self.best_loss: + # delete the depreciated best model + old_fname = 'best_epoch{}_loss{:.4f}.ckpt'.format( + self.best_epoch_idx, self.best_loss) + old_pth = os.path.join(self.output_dir, old_fname) + Path(old_pth).unlink(missing_ok=True) + + # update the best model + self.best_epoch_idx = train_metrics['epoch_idx'] + self.best_loss = train_metrics['loss'] + torch.save(state_dict, os.path.join(self.output_dir, 'best.ckpt')) + + save_fname = 'best_epoch{}_loss{:.4f}.ckpt'.format( + self.best_epoch_idx, self.best_loss) + save_pth = os.path.join(self.output_dir, save_fname) + torch.save(state_dict, save_pth) + + # save last path + if train_metrics['epoch_idx'] == self.config.optimizer.num_epochs: + save_fname = 'last_epoch{}_loss{:.4f}.ckpt'.format( + train_metrics['epoch_idx'], train_metrics['loss']) + save_pth = os.path.join(self.output_dir, save_fname) + torch.save(state_dict, save_pth) + + def summary(self): + print('Training Completed! ' + 'Best loss: {:.4f} ' + 'at epoch {:d}'.format(self.best_loss, self.best_epoch_idx), + flush=True) diff --git a/OpenOOD/openood/recorders/cutpaste_recorder.py b/OpenOOD/openood/recorders/cutpaste_recorder.py new file mode 100644 index 0000000000000000000000000000000000000000..7a1030cfabe366b7c08d3c734bbb346f3637de0c --- /dev/null +++ b/OpenOOD/openood/recorders/cutpaste_recorder.py @@ -0,0 +1,58 @@ +import os +import time +from pathlib import Path + +import torch + + +class CutpasteRecorder: + def __init__(self, config) -> None: + self.config = config + + self.best_auroc = 0.0 + self.best_epoch_idx = 0 + + self.begin_time = time.time() + self.output_dir = config.output_dir + + def report(self, train_metrics, val_metrics): + print('\nEpoch {:03d} | Time {:5d}s | Train Loss {:.4f} | ' + 'AUROC {:.3f}'.format((val_metrics['epoch_idx']), + int(time.time() - self.begin_time), + train_metrics['loss'], + val_metrics['image_auroc']), + flush=True) + + def save_model(self, net, val_metrics): + if self.config.recorder.save_all_models: + torch.save( + net.state_dict(), + os.path.join( + self.output_dir, + 'model_epoch{}.ckpt'.format(val_metrics['epoch_idx']))) + + # enter only if best auroc occurs + if val_metrics['image_auroc'] >= self.best_auroc: + + # delete the depreciated best model + old_fname = 'best_epoch{}_auroc{}.ckpt'.format( + self.best_epoch_idx, self.best_auroc) + old_pth = os.path.join(self.output_dir, old_fname) + Path(old_pth).unlink(missing_ok=True) + + # update the best model + self.best_epoch_idx = val_metrics['epoch_idx'] + self.best_auroc = val_metrics['image_auroc'] + torch.save(net.state_dict(), + os.path.join(self.output_dir, 'best.ckpt')) + + save_fname = 'best_epoch{}_auroc{}.ckpt'.format( + self.best_epoch_idx, self.best_auroc) + save_pth = os.path.join(self.output_dir, save_fname) + torch.save(net.state_dict(), save_pth) + + def summary(self): + print('Training Completed! ' + 'Best auroc: {:.2f} ' + 'at epoch {:d}'.format(self.best_auroc, self.best_epoch_idx), + flush=True) diff --git a/OpenOOD/openood/recorders/draem_recorder.py b/OpenOOD/openood/recorders/draem_recorder.py new file mode 100644 index 0000000000000000000000000000000000000000..4b2fe0fdbe9e4a8dbcb56c9b7c24b412f293334e --- /dev/null +++ b/OpenOOD/openood/recorders/draem_recorder.py @@ -0,0 +1,56 @@ +import os +from pathlib import Path + +import torch + +from .ad_recorder import ADRecorder + + +class DRAEMRecorder(ADRecorder): + def __init__(self, config) -> None: + super(DRAEMRecorder, self).__init__(config) + + self.best_model_basis = self.config.recorder.best_model_basis + + self.run_name = ('draem_test_' + str(self.config.optimizer.lr) + '_' + + str(self.config.optimizer.num_epochs) + '_bs' + + str(self.config.dataset.train.batch_size) + '_' + + self.config.dataset.name) + + def save_model(self, net, test_metrics): + if self.config.recorder.save_all_models: + + save_fname = self.run_name + '_model_epoch{}'.format( + test_metrics['epoch_idx']) + save_pth = os.path.join(self.output_dir, save_fname) + torch.save(net['generative'].state_dict(), save_pth + '.ckpt') + torch.save(net['discriminative'].state_dict(), + save_pth + '_seg.ckpt') + + # enter only if lower loss occurs + if test_metrics[self.best_model_basis] >= self.best_result: + + # delete the depreciated best model + old_fname = self.run_name + '_best_epoch{}_loss{:.4f}'.format( + self.best_epoch_idx, self.best_result) + old_pth = os.path.join(self.output_dir, old_fname) + Path(old_pth + '.ckpt').unlink(missing_ok=True) + Path(old_pth + '_seg.ckpt').unlink(missing_ok=True) + + # update the best model + self.best_epoch_idx = test_metrics['epoch_idx'] + self.best_result = test_metrics[self.best_model_basis] + + save_fname = self.run_name + '_best_epoch{}_loss{:.4f}'.format( + self.best_epoch_idx, self.best_result) + save_pth = os.path.join(self.output_dir, save_fname) + torch.save(net['generative'].state_dict(), save_pth + '.ckpt') + torch.save(net['discriminative'].state_dict(), + save_pth + '_seg.ckpt') + + if test_metrics['epoch_idx'] == self.config.optimizer.num_epochs: + save_fname = self.run_name + '_latest_checkpoint' + save_pth = os.path.join(self.output_dir, save_fname) + torch.save(net['generative'].state_dict(), save_pth + '.ckpt') + torch.save(net['discriminative'].state_dict(), + save_pth + '_seg.ckpt') diff --git a/OpenOOD/openood/recorders/dsvdd_recorder.py b/OpenOOD/openood/recorders/dsvdd_recorder.py new file mode 100644 index 0000000000000000000000000000000000000000..3d5a0399819c018585e597d62279d21cdc2092a3 --- /dev/null +++ b/OpenOOD/openood/recorders/dsvdd_recorder.py @@ -0,0 +1,85 @@ +import os +import time +from pathlib import Path + +import torch + + +class DCAERecorder: + def __init__(self, config) -> None: + self.config = config + self.output_dir = config.output_dir + self.best_roc_auc = 0.0 + self.best_epoch_idx = 0 + self.begin_time = time.time() + + def report(self, train_metrics, test_metrics): + print('epoch [{}],time:{:5d}s,loss:{:.4f},roc_auc:{:.2f}'.format( + train_metrics['epoch_idx'], int(time.time() - self.begin_time), + train_metrics['epoch_loss'], test_metrics['roc_auc'])) + + def save_model(self, net, test_metrics): + + # enter only if better accuracy occurs + if test_metrics['roc_auc'] >= self.best_roc_auc: + + # delete the depreciated best model + old_fname = 'AE_best_epoch{}_roc_auc{}.pth'.format( + self.best_epoch_idx, self.best_roc_auc) + old_pth = os.path.join(self.output_dir, old_fname) + Path(old_pth).unlink(missing_ok=True) + + # update the best model + self.best_epoch_idx = test_metrics['epoch_idx'] + self.best_roc_auc = test_metrics['roc_auc'] + save_fname = 'AE_best_epoch{}_roc_auc{}.pth'.format( + self.best_epoch_idx, self.best_roc_auc) + save_pth = os.path.join(self.output_dir, save_fname) + torch.save(net.state_dict(), save_pth) + + def summary(self): + print('Training Completed! ' + 'Best Roc_auc: {:.2f}%,' + 'at epoch {:d}'.format(100 * self.best_roc_auc, + self.best_epoch_idx), + flush=True) + + +class DSVDDRecorder: + def __init__(self, config) -> None: + self.config = config + self.output_dir = config.output_dir + self.best_roc_auc = 0.0 + self.best_epoch_idx = 0 + self.begin_time = time.time() + + def report(self, train_metrics, test_metrics): + print('epoch [{}],time:{:5d}s,loss:{:.4f},roc_auc:{:.2f}'.format( + train_metrics['epoch_idx'], int(time.time() - self.begin_time), + train_metrics['epoch_loss'], test_metrics['roc_auc'])) + + def save_model(self, net, test_metrics): + + # enter only if better accuracy occurs + if test_metrics['roc_auc'] >= self.best_roc_auc: + + # delete the depreciated best model + old_fname = 'DSVDD_best_epoch{}_roc_auc{}.pth'.format( + self.best_epoch_idx, self.best_roc_auc) + old_pth = os.path.join(self.output_dir, old_fname) + Path(old_pth).unlink(missing_ok=True) + + # update the best model + self.best_epoch_idx = test_metrics['epoch_idx'] + self.best_roc_auc = test_metrics['roc_auc'] + save_fname = 'DSVDD_best_epoch{}_roc_auc{}.pth'.format( + self.best_epoch_idx, self.best_roc_auc) + save_pth = os.path.join(self.output_dir, save_fname) + torch.save(net.state_dict(), save_pth) + + def summary(self): + print('Training Completed! ' + 'Best Roc_auc: {:.2f}%,' + 'at epoch {:d}'.format(100 * self.best_roc_auc, + self.best_epoch_idx), + flush=True) diff --git a/OpenOOD/openood/recorders/kdad_recorder.py b/OpenOOD/openood/recorders/kdad_recorder.py new file mode 100644 index 0000000000000000000000000000000000000000..03412aaad9ae3c6733bb1aaad3ee3a4ec42f1394 --- /dev/null +++ b/OpenOOD/openood/recorders/kdad_recorder.py @@ -0,0 +1,57 @@ +import os +import time +from pathlib import Path + +import torch + + +class KdadRecorder: + def __init__(self, config) -> None: + self.config = config + self.output_dir = config.output_dir + self.best_roc_auc = 0.0 + self.best_epoch_idx = 0 + self.begin_time = time.time() + + def report(self, train_metrics, test_metrics): + print('epoch [{}],time:{:5d}s,loss:{:.4f},roc_auc:{:.2f}'.format( + train_metrics['epoch_idx'], int(time.time() - self.begin_time), + train_metrics['epoch_loss'], test_metrics['roc_auc'])) + + def save_model(self, net, test_metrics): + if self.config.recorder.save_all_models: + torch.save( + net['model'].state_dict(), + os.path.join( + self.output_dir, + 'Clone_epoch{}.ckpt'.format(test_metrics['epoch_idx']))) + + # enter only if better accuracy occurs + if test_metrics['roc_auc'] >= self.best_roc_auc: + + # delete the depreciated best model + old_fname = 'Clone_best_epoch{}_roc_auc{}.pth'.format( + self.best_epoch_idx, self.best_roc_auc) + old_pth = os.path.join(self.output_dir, old_fname) + Path(old_pth).unlink(missing_ok=True) + + # update the best model + self.best_epoch_idx = test_metrics['epoch_idx'] + self.best_roc_auc = test_metrics['roc_auc'] + save_fname = 'Clone_best_epoch{}_roc_auc{}.pth'.format( + self.best_epoch_idx, self.best_roc_auc) + save_pth = os.path.join(self.output_dir, save_fname) + torch.save(net['model'].state_dict(), save_pth) + if test_metrics['epoch_idx'] == self.config['last_checkpoint']: + torch.save( + net['model'].state_dict(), + '{}/Cloner_{}_epoch_{}.pth'.format(self.config['output_dir'], + self.config.normal_class, + test_metrics['epoch_idx'])) + + def summary(self): + print('Training Completed! ' + 'Best Roc_auc: {:.2f}%,' + 'at epoch {:d}'.format(100 * self.best_roc_auc, + self.best_epoch_idx), + flush=True) diff --git a/OpenOOD/openood/recorders/opengan_recorder.py b/OpenOOD/openood/recorders/opengan_recorder.py new file mode 100644 index 0000000000000000000000000000000000000000..66f99bdf8bd0181d524add140a10d89cb91428ae --- /dev/null +++ b/OpenOOD/openood/recorders/opengan_recorder.py @@ -0,0 +1,59 @@ +import copy +import os +import time + +import torch + +from .base_recorder import BaseRecorder + + +class OpenGanRecorder(BaseRecorder): + def __init__(self, config) -> None: + super().__init__(config) + self.save_dir = self.config.output_dir + self.best_val_auroc = 0 + self.best_epoch_idx = 0 + + def report(self, train_metrics, val_metrics): + print('Epoch [{:03d}/{:03d}] | Time {:5d}s | Loss_G: {:.4f} | ' + 'Loss_D: {:.4f} | Val AUROC: {:.2f}\n'.format( + train_metrics['epoch_idx'], self.config.optimizer.num_epochs, + int(time.time() - self.begin_time), + train_metrics['G_losses'][-1], train_metrics['D_losses'][-1], + val_metrics['auroc']), + flush=True) + + def save_model(self, net, val_metrics): + netG = net['netG'] + netD = net['netD'] + epoch_idx = val_metrics['epoch_idx'] + + try: + netG_wts = copy.deepcopy(netG.module.state_dict()) + netD_wts = copy.deepcopy(netD.module.state_dict()) + except AttributeError: + netG_wts = copy.deepcopy(netG.state_dict()) + netD_wts = copy.deepcopy(netD.state_dict()) + + if self.config.recorder.save_all_models: + save_pth = os.path.join(self.save_dir, + 'epoch-{}_GNet.ckpt'.format(epoch_idx)) + torch.save(netG_wts, save_pth) + save_pth = os.path.join(self.save_dir, + 'epoch-{}_DNet.ckpt'.format(epoch_idx)) + torch.save(netD_wts, save_pth) + + if val_metrics['auroc'] >= self.best_val_auroc: + self.best_epoch_idx = epoch_idx + self.best_val_auroc = val_metrics['auroc'] + + torch.save(netG_wts, os.path.join(self.output_dir, + 'best_GNet.ckpt')) + torch.save(netD_wts, os.path.join(self.output_dir, + 'best_DNet.ckpt')) + + def summary(self): + print('Training Completed! ' + 'Best val AUROC on netD: {:.6f} ' + 'at epoch {:d}'.format(self.best_val_auroc, self.best_epoch_idx), + flush=True) diff --git a/OpenOOD/openood/recorders/rd4ad_recorder.py b/OpenOOD/openood/recorders/rd4ad_recorder.py new file mode 100644 index 0000000000000000000000000000000000000000..5666134eb41575202fd3705ead3cf438c750bb08 --- /dev/null +++ b/OpenOOD/openood/recorders/rd4ad_recorder.py @@ -0,0 +1,71 @@ +import os +import time +from pathlib import Path + +import torch + +from .base_recorder import BaseRecorder + + +class Rd4adRecorder(BaseRecorder): + def __init__(self, config) -> None: + super(Rd4adRecorder, self).__init__(config) + + self.best_epoch_idx = 0 + self.best_result = 0 + + self.begin_time = time.time() + + def report(self, train_metrics, test_metrics): + print('Epoch {:03d} | Time {:5d}s | Train Loss {:.4f} | ' + 'Auroc {:.4f}\n'.format(train_metrics['epoch_idx'], + int(time.time() - self.begin_time), + train_metrics['loss'], + 100.0 * test_metrics['image_auroc']), + flush=True) + + def save_model(self, net, test_metrics): + if self.config.recorder.save_all_models: + torch.save( + { + 'bn': net['bn'].state_dict(), + 'decoder': net['decoder'].state_dict() + }, + os.path.join( + self.output_dir, + 'model_epoch{}.ckpt'.format(test_metrics['epoch_idx']))) + + # enter only if lower loss occurs + if test_metrics['image_auroc'] >= self.best_result: + + # delete the depreciated best model + old_fname1 = 'bn_best_epoch{}_auroc{:.4f}.ckpt'.format( + self.best_epoch_idx, self.best_result) + old_fname2 = 'decoder_best_epoch{}_auroc{:.4f}.ckpt'.format( + self.best_epoch_idx, self.best_result) + + old_pth1 = os.path.join(self.output_dir, old_fname1) + old_pth2 = os.path.join(self.output_dir, old_fname2) + Path(old_pth1).unlink(missing_ok=True) + Path(old_pth2).unlink(missing_ok=True) + # update the best model + self.best_epoch_idx = test_metrics['epoch_idx'] + self.best_result = test_metrics['image_auroc'] + torch.save({'bn': net['bn'].state_dict()}, + os.path.join(self.output_dir, 'bn_best.ckpt')) + torch.save({'decoder': net['decoder'].state_dict()}, + os.path.join(self.output_dir, 'decoder_best.ckpt')) + save_fname1 = 'bn_best_epoch{}_auroc{:.4f}.ckpt'.format( + self.best_epoch_idx, self.best_result) + save_pth1 = os.path.join(self.output_dir, save_fname1) + save_fname2 = 'decoder_best_epoch{}_auroc{:.4f}.ckpt'.format( + self.best_epoch_idx, self.best_result) + save_pth2 = os.path.join(self.output_dir, save_fname2) + torch.save({'bn': net['bn'].state_dict()}, save_pth1) + torch.save({'decoder': net['decoder'].state_dict()}, save_pth2) + + def summary(self): + print('Training Completed!\n ' + 'Best Auroc: {:.4f} at epoch {:d}\n'.format( + 100.0 * self.best_result, self.best_epoch_idx), + flush=True) diff --git a/OpenOOD/openood/recorders/utils.py b/OpenOOD/openood/recorders/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..25d995931a504f7f98678526be572d2a485cf690 --- /dev/null +++ b/OpenOOD/openood/recorders/utils.py @@ -0,0 +1,30 @@ +from openood.utils import Config + +from .ad_recorder import ADRecorder +from .arpl_recorder import ARPLRecorder +from .base_recorder import BaseRecorder +from .cider_recorder import CiderRecorder +from .cutpaste_recorder import CutpasteRecorder +from .draem_recorder import DRAEMRecorder +from .dsvdd_recorder import DCAERecorder, DSVDDRecorder +from .kdad_recorder import KdadRecorder +from .opengan_recorder import OpenGanRecorder +from .rd4ad_recorder import Rd4adRecorder + + +def get_recorder(config: Config): + recorders = { + 'base': BaseRecorder, + 'cider': CiderRecorder, + 'draem': DRAEMRecorder, + 'opengan': OpenGanRecorder, + 'dcae': DCAERecorder, + 'dsvdd': DSVDDRecorder, + 'kdad': KdadRecorder, + 'arpl': ARPLRecorder, + 'cutpaste': CutpasteRecorder, + 'ad': ADRecorder, + 'rd4ad': Rd4adRecorder, + } + + return recorders[config.recorder.name](config) diff --git a/OpenOOD/openood/trainers/__init__.py b/OpenOOD/openood/trainers/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..af35089fd9f53f1dfb9828222c6c5ac6dc0fd77d --- /dev/null +++ b/OpenOOD/openood/trainers/__init__.py @@ -0,0 +1 @@ +from .utils import get_trainer diff --git a/OpenOOD/openood/trainers/__pycache__/__init__.cpython-311.pyc b/OpenOOD/openood/trainers/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dd7fec9b8ebb53bfed6db62f2fc3093451db823d Binary files /dev/null and b/OpenOOD/openood/trainers/__pycache__/__init__.cpython-311.pyc differ diff --git a/OpenOOD/openood/trainers/__pycache__/__init__.cpython-37.pyc b/OpenOOD/openood/trainers/__pycache__/__init__.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3bd6a2b14d880107c3895300333e913772a8743a Binary files /dev/null and b/OpenOOD/openood/trainers/__pycache__/__init__.cpython-37.pyc differ diff --git a/OpenOOD/openood/trainers/__pycache__/arpl_gan_trainer.cpython-311.pyc b/OpenOOD/openood/trainers/__pycache__/arpl_gan_trainer.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..85e487c52e06ee644e37dc261524ee242acdd1ac Binary files /dev/null and b/OpenOOD/openood/trainers/__pycache__/arpl_gan_trainer.cpython-311.pyc differ diff --git a/OpenOOD/openood/trainers/__pycache__/arpl_gan_trainer.cpython-37.pyc b/OpenOOD/openood/trainers/__pycache__/arpl_gan_trainer.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c762d09e781a9a2787d5847973d08ef345ad13b7 Binary files /dev/null and b/OpenOOD/openood/trainers/__pycache__/arpl_gan_trainer.cpython-37.pyc differ diff --git a/OpenOOD/openood/trainers/__pycache__/arpl_trainer.cpython-311.pyc b/OpenOOD/openood/trainers/__pycache__/arpl_trainer.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fea151013140c20b6136b2a1a2387c994d91d2b7 Binary files /dev/null and b/OpenOOD/openood/trainers/__pycache__/arpl_trainer.cpython-311.pyc differ diff --git a/OpenOOD/openood/trainers/__pycache__/arpl_trainer.cpython-37.pyc b/OpenOOD/openood/trainers/__pycache__/arpl_trainer.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..79c00ad66db4d9c5547b71761d1e895fb76e35ff Binary files /dev/null and b/OpenOOD/openood/trainers/__pycache__/arpl_trainer.cpython-37.pyc differ diff --git a/OpenOOD/openood/trainers/__pycache__/augmix_trainer.cpython-311.pyc b/OpenOOD/openood/trainers/__pycache__/augmix_trainer.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e59b211b276a49453c7c94f0537561efccfe2908 Binary files /dev/null and b/OpenOOD/openood/trainers/__pycache__/augmix_trainer.cpython-311.pyc differ diff --git a/OpenOOD/openood/trainers/__pycache__/augmix_trainer.cpython-37.pyc b/OpenOOD/openood/trainers/__pycache__/augmix_trainer.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..af3054beb3fe828f40f49f5e753faf7506c91982 Binary files /dev/null and b/OpenOOD/openood/trainers/__pycache__/augmix_trainer.cpython-37.pyc differ diff --git a/OpenOOD/openood/trainers/__pycache__/base_trainer.cpython-311.pyc b/OpenOOD/openood/trainers/__pycache__/base_trainer.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a77b8f33758283d7c9c95c1af4133efd25e0225f Binary files /dev/null and b/OpenOOD/openood/trainers/__pycache__/base_trainer.cpython-311.pyc differ diff --git a/OpenOOD/openood/trainers/__pycache__/base_trainer.cpython-37.pyc b/OpenOOD/openood/trainers/__pycache__/base_trainer.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ff7e38c4bc893ccac087b1a53c4a43f2d72b41f6 Binary files /dev/null and b/OpenOOD/openood/trainers/__pycache__/base_trainer.cpython-37.pyc differ diff --git a/OpenOOD/openood/trainers/__pycache__/cider_trainer.cpython-311.pyc b/OpenOOD/openood/trainers/__pycache__/cider_trainer.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0c94288beb3235834e809c6dfbd27989320ef341 Binary files /dev/null and b/OpenOOD/openood/trainers/__pycache__/cider_trainer.cpython-311.pyc differ diff --git a/OpenOOD/openood/trainers/__pycache__/cider_trainer.cpython-37.pyc b/OpenOOD/openood/trainers/__pycache__/cider_trainer.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3d1514c5d30f2802fedd44ca89eeda722dfff7a9 Binary files /dev/null and b/OpenOOD/openood/trainers/__pycache__/cider_trainer.cpython-37.pyc differ diff --git a/OpenOOD/openood/trainers/__pycache__/conf_branch_trainer.cpython-311.pyc b/OpenOOD/openood/trainers/__pycache__/conf_branch_trainer.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5746bab5598f5b0057411501cbbc7f3fd2b9f4ed Binary files /dev/null and b/OpenOOD/openood/trainers/__pycache__/conf_branch_trainer.cpython-311.pyc differ diff --git a/OpenOOD/openood/trainers/__pycache__/conf_branch_trainer.cpython-37.pyc b/OpenOOD/openood/trainers/__pycache__/conf_branch_trainer.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..14ffec451d6cba1009472aac440800d9b4406cfb Binary files /dev/null and b/OpenOOD/openood/trainers/__pycache__/conf_branch_trainer.cpython-37.pyc differ diff --git a/OpenOOD/openood/trainers/__pycache__/csi_trainer.cpython-311.pyc b/OpenOOD/openood/trainers/__pycache__/csi_trainer.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ac244d7f7d3f1b7e31ad2b8682a257c9f824a13c Binary files /dev/null and b/OpenOOD/openood/trainers/__pycache__/csi_trainer.cpython-311.pyc differ diff --git a/OpenOOD/openood/trainers/__pycache__/csi_trainer.cpython-37.pyc b/OpenOOD/openood/trainers/__pycache__/csi_trainer.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bbc484c5c7fa9c7eab9beaa744940cfb99a1baba Binary files /dev/null and b/OpenOOD/openood/trainers/__pycache__/csi_trainer.cpython-37.pyc differ diff --git a/OpenOOD/openood/trainers/__pycache__/cutmix_trainer.cpython-311.pyc b/OpenOOD/openood/trainers/__pycache__/cutmix_trainer.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5e9f6144b768fc0a627154276acf2720bc7cdacc Binary files /dev/null and b/OpenOOD/openood/trainers/__pycache__/cutmix_trainer.cpython-311.pyc differ diff --git a/OpenOOD/openood/trainers/__pycache__/cutmix_trainer.cpython-37.pyc b/OpenOOD/openood/trainers/__pycache__/cutmix_trainer.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4e15a5fddc6b9373d7a9bd809dd33780aeb8a66e Binary files /dev/null and b/OpenOOD/openood/trainers/__pycache__/cutmix_trainer.cpython-37.pyc differ diff --git a/OpenOOD/openood/trainers/__pycache__/cutpaste_trainer.cpython-311.pyc b/OpenOOD/openood/trainers/__pycache__/cutpaste_trainer.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b1ed4b3696ed86ef6344669be763913ff88cf8e4 Binary files /dev/null and b/OpenOOD/openood/trainers/__pycache__/cutpaste_trainer.cpython-311.pyc differ diff --git a/OpenOOD/openood/trainers/__pycache__/cutpaste_trainer.cpython-37.pyc b/OpenOOD/openood/trainers/__pycache__/cutpaste_trainer.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d9fe238cd50b024459b10bdbcea2e990395b383f Binary files /dev/null and b/OpenOOD/openood/trainers/__pycache__/cutpaste_trainer.cpython-37.pyc differ diff --git a/OpenOOD/openood/trainers/__pycache__/draem_trainer.cpython-311.pyc b/OpenOOD/openood/trainers/__pycache__/draem_trainer.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..01d201b5b9186867bb6d4ed1529a17fe05f7311e Binary files /dev/null and b/OpenOOD/openood/trainers/__pycache__/draem_trainer.cpython-311.pyc differ diff --git a/OpenOOD/openood/trainers/__pycache__/draem_trainer.cpython-37.pyc b/OpenOOD/openood/trainers/__pycache__/draem_trainer.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..db8c231dbafcb930a0415047c8a824f238ab9fea Binary files /dev/null and b/OpenOOD/openood/trainers/__pycache__/draem_trainer.cpython-37.pyc differ diff --git a/OpenOOD/openood/trainers/__pycache__/dropout_trainer.cpython-311.pyc b/OpenOOD/openood/trainers/__pycache__/dropout_trainer.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d6ec482a1af8998c932f7c47e4d39a965633f276 Binary files /dev/null and b/OpenOOD/openood/trainers/__pycache__/dropout_trainer.cpython-311.pyc differ diff --git a/OpenOOD/openood/trainers/__pycache__/dropout_trainer.cpython-37.pyc b/OpenOOD/openood/trainers/__pycache__/dropout_trainer.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8e46586473ca74fabef87b129bf139856cab4da6 Binary files /dev/null and b/OpenOOD/openood/trainers/__pycache__/dropout_trainer.cpython-37.pyc differ diff --git a/OpenOOD/openood/trainers/__pycache__/dsvdd_trainer.cpython-311.pyc b/OpenOOD/openood/trainers/__pycache__/dsvdd_trainer.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..90892f8224ff6be1cc8254be1fec4d13ccbcbe57 Binary files /dev/null and b/OpenOOD/openood/trainers/__pycache__/dsvdd_trainer.cpython-311.pyc differ diff --git a/OpenOOD/openood/trainers/__pycache__/dsvdd_trainer.cpython-37.pyc b/OpenOOD/openood/trainers/__pycache__/dsvdd_trainer.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..edb750a8ef7ef417a82d888be1f8e5a109f468ff Binary files /dev/null and b/OpenOOD/openood/trainers/__pycache__/dsvdd_trainer.cpython-37.pyc differ diff --git a/OpenOOD/openood/trainers/__pycache__/godin_trainer.cpython-311.pyc b/OpenOOD/openood/trainers/__pycache__/godin_trainer.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7c5c549e73bef2d488761000f4898401dd9d0fd4 Binary files /dev/null and b/OpenOOD/openood/trainers/__pycache__/godin_trainer.cpython-311.pyc differ diff --git a/OpenOOD/openood/trainers/__pycache__/godin_trainer.cpython-37.pyc b/OpenOOD/openood/trainers/__pycache__/godin_trainer.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5083a2d643b57d3244d67a6749604fb8d2975ebc Binary files /dev/null and b/OpenOOD/openood/trainers/__pycache__/godin_trainer.cpython-37.pyc differ diff --git a/OpenOOD/openood/trainers/__pycache__/ish_trainer.cpython-311.pyc b/OpenOOD/openood/trainers/__pycache__/ish_trainer.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f9fa6908018e60ee00cb837a90fe725bf758ef9a Binary files /dev/null and b/OpenOOD/openood/trainers/__pycache__/ish_trainer.cpython-311.pyc differ diff --git a/OpenOOD/openood/trainers/__pycache__/ish_trainer.cpython-37.pyc b/OpenOOD/openood/trainers/__pycache__/ish_trainer.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..94ac4c0ee500dc1d1126947f153c211f7c8ea4ef Binary files /dev/null and b/OpenOOD/openood/trainers/__pycache__/ish_trainer.cpython-37.pyc differ diff --git a/OpenOOD/openood/trainers/__pycache__/kdad_trainer.cpython-311.pyc b/OpenOOD/openood/trainers/__pycache__/kdad_trainer.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..31555a1d2479003a38c1d24867baa9bca6566f76 Binary files /dev/null and b/OpenOOD/openood/trainers/__pycache__/kdad_trainer.cpython-311.pyc differ diff --git a/OpenOOD/openood/trainers/__pycache__/kdad_trainer.cpython-37.pyc b/OpenOOD/openood/trainers/__pycache__/kdad_trainer.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..237274861ec609cc6dff77fcd2b705fadc821036 Binary files /dev/null and b/OpenOOD/openood/trainers/__pycache__/kdad_trainer.cpython-37.pyc differ diff --git a/OpenOOD/openood/trainers/__pycache__/logitnorm_trainer.cpython-311.pyc b/OpenOOD/openood/trainers/__pycache__/logitnorm_trainer.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f291048fd282a8b75a471e68d74c6cff31d9736c Binary files /dev/null and b/OpenOOD/openood/trainers/__pycache__/logitnorm_trainer.cpython-311.pyc differ diff --git a/OpenOOD/openood/trainers/__pycache__/logitnorm_trainer.cpython-37.pyc b/OpenOOD/openood/trainers/__pycache__/logitnorm_trainer.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b250959552ef50a22fb11ca067ccae3b36d80dc0 Binary files /dev/null and b/OpenOOD/openood/trainers/__pycache__/logitnorm_trainer.cpython-37.pyc differ diff --git a/OpenOOD/openood/trainers/__pycache__/lr_scheduler.cpython-311.pyc b/OpenOOD/openood/trainers/__pycache__/lr_scheduler.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c0447bcedb41e1386822f53e0c20ccad9546612b Binary files /dev/null and b/OpenOOD/openood/trainers/__pycache__/lr_scheduler.cpython-311.pyc differ diff --git a/OpenOOD/openood/trainers/__pycache__/lr_scheduler.cpython-37.pyc b/OpenOOD/openood/trainers/__pycache__/lr_scheduler.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..645e2e1e85cf2d4fff671c78f3eabb18994b6c51 Binary files /dev/null and b/OpenOOD/openood/trainers/__pycache__/lr_scheduler.cpython-37.pyc differ diff --git a/OpenOOD/openood/trainers/__pycache__/mcd_trainer.cpython-311.pyc b/OpenOOD/openood/trainers/__pycache__/mcd_trainer.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..956f732d7d853b7f51f23d7ec031704f4fd06ac9 Binary files /dev/null and b/OpenOOD/openood/trainers/__pycache__/mcd_trainer.cpython-311.pyc differ diff --git a/OpenOOD/openood/trainers/__pycache__/mcd_trainer.cpython-37.pyc b/OpenOOD/openood/trainers/__pycache__/mcd_trainer.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6e90a0d1dbee84e88c59b8978f68ed5a35210007 Binary files /dev/null and b/OpenOOD/openood/trainers/__pycache__/mcd_trainer.cpython-37.pyc differ diff --git a/OpenOOD/openood/trainers/__pycache__/mixoe_trainer.cpython-311.pyc b/OpenOOD/openood/trainers/__pycache__/mixoe_trainer.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2842fdca17102d2d9b321fafdd025775ba964647 Binary files /dev/null and b/OpenOOD/openood/trainers/__pycache__/mixoe_trainer.cpython-311.pyc differ diff --git a/OpenOOD/openood/trainers/__pycache__/mixoe_trainer.cpython-37.pyc b/OpenOOD/openood/trainers/__pycache__/mixoe_trainer.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3c576a82f07fe7d390c8ff67846c1cce37912581 Binary files /dev/null and b/OpenOOD/openood/trainers/__pycache__/mixoe_trainer.cpython-37.pyc differ diff --git a/OpenOOD/openood/trainers/__pycache__/mixup_trainer.cpython-311.pyc b/OpenOOD/openood/trainers/__pycache__/mixup_trainer.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..37a14b10ff03b3af3cf29db1328ae4e27b3c0cb2 Binary files /dev/null and b/OpenOOD/openood/trainers/__pycache__/mixup_trainer.cpython-311.pyc differ diff --git a/OpenOOD/openood/trainers/__pycache__/mixup_trainer.cpython-37.pyc b/OpenOOD/openood/trainers/__pycache__/mixup_trainer.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9328fce841184fc87360d245862e9af6695e4df9 Binary files /dev/null and b/OpenOOD/openood/trainers/__pycache__/mixup_trainer.cpython-37.pyc differ diff --git a/OpenOOD/openood/trainers/__pycache__/mos_trainer.cpython-311.pyc b/OpenOOD/openood/trainers/__pycache__/mos_trainer.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ac48ee0ccb6e277a7245103c0db47c6626cbf6f4 Binary files /dev/null and b/OpenOOD/openood/trainers/__pycache__/mos_trainer.cpython-311.pyc differ diff --git a/OpenOOD/openood/trainers/__pycache__/mos_trainer.cpython-37.pyc b/OpenOOD/openood/trainers/__pycache__/mos_trainer.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..92f9e502e90bc578f70a5133b7f03c9eca5237ff Binary files /dev/null and b/OpenOOD/openood/trainers/__pycache__/mos_trainer.cpython-37.pyc differ diff --git a/OpenOOD/openood/trainers/__pycache__/npos_trainer.cpython-311.pyc b/OpenOOD/openood/trainers/__pycache__/npos_trainer.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..25b18c93eac53ad1af0544872fbbd6c87c023416 Binary files /dev/null and b/OpenOOD/openood/trainers/__pycache__/npos_trainer.cpython-311.pyc differ diff --git a/OpenOOD/openood/trainers/__pycache__/npos_trainer.cpython-37.pyc b/OpenOOD/openood/trainers/__pycache__/npos_trainer.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..54a8a69f56f37277dd7695c5211d5451a200c033 Binary files /dev/null and b/OpenOOD/openood/trainers/__pycache__/npos_trainer.cpython-37.pyc differ diff --git a/OpenOOD/openood/trainers/__pycache__/oe_trainer.cpython-311.pyc b/OpenOOD/openood/trainers/__pycache__/oe_trainer.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2b42df4fa97ad7ae5592b4eb5394bc4d9aaf6618 Binary files /dev/null and b/OpenOOD/openood/trainers/__pycache__/oe_trainer.cpython-311.pyc differ diff --git a/OpenOOD/openood/trainers/__pycache__/oe_trainer.cpython-37.pyc b/OpenOOD/openood/trainers/__pycache__/oe_trainer.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cc10aa14e4b823332afdf74c62a356648b5f09e6 Binary files /dev/null and b/OpenOOD/openood/trainers/__pycache__/oe_trainer.cpython-37.pyc differ diff --git a/OpenOOD/openood/trainers/__pycache__/opengan_trainer.cpython-311.pyc b/OpenOOD/openood/trainers/__pycache__/opengan_trainer.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7035ab79ac533b6053f7ccecc1cd1ca030b66f3e Binary files /dev/null and b/OpenOOD/openood/trainers/__pycache__/opengan_trainer.cpython-311.pyc differ diff --git a/OpenOOD/openood/trainers/__pycache__/opengan_trainer.cpython-37.pyc b/OpenOOD/openood/trainers/__pycache__/opengan_trainer.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bbcaa60325cb90ec9f4b4fd54da843df64e7555c Binary files /dev/null and b/OpenOOD/openood/trainers/__pycache__/opengan_trainer.cpython-37.pyc differ diff --git a/OpenOOD/openood/trainers/__pycache__/rd4ad_trainer.cpython-311.pyc b/OpenOOD/openood/trainers/__pycache__/rd4ad_trainer.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b1ac7735db817268054ba1e0caae1c7b4f5d1426 Binary files /dev/null and b/OpenOOD/openood/trainers/__pycache__/rd4ad_trainer.cpython-311.pyc differ diff --git a/OpenOOD/openood/trainers/__pycache__/rd4ad_trainer.cpython-37.pyc b/OpenOOD/openood/trainers/__pycache__/rd4ad_trainer.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a660054dde20a32664fcc4365a4fda0d01e406a9 Binary files /dev/null and b/OpenOOD/openood/trainers/__pycache__/rd4ad_trainer.cpython-37.pyc differ diff --git a/OpenOOD/openood/trainers/__pycache__/regmixup_trainer.cpython-311.pyc b/OpenOOD/openood/trainers/__pycache__/regmixup_trainer.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d51e93ab256fd37e696511b7bcea16cf6be3df94 Binary files /dev/null and b/OpenOOD/openood/trainers/__pycache__/regmixup_trainer.cpython-311.pyc differ diff --git a/OpenOOD/openood/trainers/__pycache__/regmixup_trainer.cpython-37.pyc b/OpenOOD/openood/trainers/__pycache__/regmixup_trainer.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..92be922315eab1281071f593fdc3166316858b88 Binary files /dev/null and b/OpenOOD/openood/trainers/__pycache__/regmixup_trainer.cpython-37.pyc differ diff --git a/OpenOOD/openood/trainers/__pycache__/rotpred_trainer.cpython-311.pyc b/OpenOOD/openood/trainers/__pycache__/rotpred_trainer.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9f263e6c2e16a9ef1d4f38faf0d663f1025d6587 Binary files /dev/null and b/OpenOOD/openood/trainers/__pycache__/rotpred_trainer.cpython-311.pyc differ diff --git a/OpenOOD/openood/trainers/__pycache__/rotpred_trainer.cpython-37.pyc b/OpenOOD/openood/trainers/__pycache__/rotpred_trainer.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2abe12de1bfe48910d269ae0aeb4232a3ad30c6c Binary files /dev/null and b/OpenOOD/openood/trainers/__pycache__/rotpred_trainer.cpython-37.pyc differ diff --git a/OpenOOD/openood/trainers/__pycache__/rts_trainer.cpython-311.pyc b/OpenOOD/openood/trainers/__pycache__/rts_trainer.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9c5b9178ae977e8ca4686c8f9c48230fe6406482 Binary files /dev/null and b/OpenOOD/openood/trainers/__pycache__/rts_trainer.cpython-311.pyc differ diff --git a/OpenOOD/openood/trainers/__pycache__/rts_trainer.cpython-37.pyc b/OpenOOD/openood/trainers/__pycache__/rts_trainer.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ab9ae05e81f655d45d23fd59ec8e56b01e0893d9 Binary files /dev/null and b/OpenOOD/openood/trainers/__pycache__/rts_trainer.cpython-37.pyc differ diff --git a/OpenOOD/openood/trainers/__pycache__/sae_trainer.cpython-311.pyc b/OpenOOD/openood/trainers/__pycache__/sae_trainer.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7a4fcf8a55334a288c3195eb2ae5346343ce38e4 Binary files /dev/null and b/OpenOOD/openood/trainers/__pycache__/sae_trainer.cpython-311.pyc differ diff --git a/OpenOOD/openood/trainers/__pycache__/sae_trainer.cpython-37.pyc b/OpenOOD/openood/trainers/__pycache__/sae_trainer.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0127bfc05af03ed3f4df08403fec008ba61b42b0 Binary files /dev/null and b/OpenOOD/openood/trainers/__pycache__/sae_trainer.cpython-37.pyc differ diff --git a/OpenOOD/openood/trainers/__pycache__/udg_trainer.cpython-311.pyc b/OpenOOD/openood/trainers/__pycache__/udg_trainer.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3978c42bb7665b1464a66c03d3dda913fd4177ea Binary files /dev/null and b/OpenOOD/openood/trainers/__pycache__/udg_trainer.cpython-311.pyc differ diff --git a/OpenOOD/openood/trainers/__pycache__/udg_trainer.cpython-37.pyc b/OpenOOD/openood/trainers/__pycache__/udg_trainer.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2d3a018170e63b9dc9b945f6c7d134c3c18dd4d8 Binary files /dev/null and b/OpenOOD/openood/trainers/__pycache__/udg_trainer.cpython-37.pyc differ diff --git a/OpenOOD/openood/trainers/__pycache__/utils.cpython-311.pyc b/OpenOOD/openood/trainers/__pycache__/utils.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b0fd379e32ca13778287880b5c71b98a8445cf05 Binary files /dev/null and b/OpenOOD/openood/trainers/__pycache__/utils.cpython-311.pyc differ diff --git a/OpenOOD/openood/trainers/__pycache__/utils.cpython-37.pyc b/OpenOOD/openood/trainers/__pycache__/utils.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..63a561927ccd311ce522828fe65a5034c7284f75 Binary files /dev/null and b/OpenOOD/openood/trainers/__pycache__/utils.cpython-37.pyc differ diff --git a/OpenOOD/openood/trainers/__pycache__/vos_trainer.cpython-311.pyc b/OpenOOD/openood/trainers/__pycache__/vos_trainer.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a0bf51de9968272b1af423efffc12e2ebd675d4f Binary files /dev/null and b/OpenOOD/openood/trainers/__pycache__/vos_trainer.cpython-311.pyc differ diff --git a/OpenOOD/openood/trainers/__pycache__/vos_trainer.cpython-37.pyc b/OpenOOD/openood/trainers/__pycache__/vos_trainer.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..47cdb330a96add4816271a73962e1241427f36bf Binary files /dev/null and b/OpenOOD/openood/trainers/__pycache__/vos_trainer.cpython-37.pyc differ diff --git a/OpenOOD/openood/trainers/arpl_gan_trainer.py b/OpenOOD/openood/trainers/arpl_gan_trainer.py new file mode 100644 index 0000000000000000000000000000000000000000..c921b7c14b82625142ef8f02716ce76b66f9d953 --- /dev/null +++ b/OpenOOD/openood/trainers/arpl_gan_trainer.py @@ -0,0 +1,164 @@ +import torch +import torch.nn as nn +from torch.autograd import Variable +from torch.utils.data import DataLoader +from tqdm import tqdm + +from openood.utils import Config + +from .lr_scheduler import cosine_annealing + + +class ARPLGANTrainer: + def __init__(self, net: dict, train_loader: DataLoader, + config: Config) -> None: + + self.net = net['netF'] + self.netG = net['netG'] + self.netD = net['netD'] + self.train_loader = train_loader + self.config = config + self.criterion = net['criterion'] + + self.fixed_noise = torch.FloatTensor(64, config.network.nz, 1, + 1).normal_(0, 1).cuda() + self.criterionD = nn.BCELoss() + + params_list = [{ + 'params': self.net.parameters() + }, { + 'params': self.criterion.parameters() + }] + + self.optimizer = torch.optim.SGD( + params_list, + config.optimizer.lr, + momentum=config.optimizer.momentum, + weight_decay=config.optimizer.weight_decay, + nesterov=True, + ) + + self.scheduler = torch.optim.lr_scheduler.LambdaLR( + self.optimizer, + lr_lambda=lambda step: cosine_annealing( + step, + config.optimizer.num_epochs * len(train_loader), + 1, + 1e-6 / config.optimizer.lr, + ), + ) + + self.optimizerD = torch.optim.Adam(self.netD.parameters(), + lr=config.optimizer.gan_lr, + betas=(0.5, 0.999)) + self.optimizerG = torch.optim.Adam(self.netG.parameters(), + lr=config.optimizer.gan_lr, + betas=(0.5, 0.999)) + + def train_epoch(self, epoch_idx): + self.net.train() + self.netD.train() + self.netG.train() + + loss_avg, lossG_avg, lossD_avg = 0.0, 0.0, 0.0 + train_dataiter = iter(self.train_loader) + + real_label, fake_label = 1, 0 + for train_step in tqdm(range(1, + len(train_dataiter) + 1), + desc='Epoch {:03d}: '.format(epoch_idx), + position=0, + leave=True): + batch = next(train_dataiter) + data = batch['data'].cuda() + target = batch['label'].cuda() + gan_target = torch.FloatTensor(target.size()).fill_(0).cuda() + + noise = torch.FloatTensor( + data.size(0), self.config.network.nz, self.config.network.ns, + self.config.network.ns).normal_(0, 1).cuda() + noise = noise.cuda() + noise = Variable(noise) + fake = self.netG(noise) + + ########################### + # (1) Update D network # + ########################### + # train with real + gan_target.fill_(real_label) + targetv = Variable(gan_target) + self.optimizerD.zero_grad() + output = self.netD(data) + errD_real = self.criterionD(output, targetv) + errD_real.backward() + + # train with fake + targetv = Variable(gan_target.fill_(fake_label)) + output = self.netD(fake.detach()) + errD_fake = self.criterionD(output, targetv) + errD_fake.backward() + errD = errD_real + errD_fake + self.optimizerD.step() + + ########################### + # (2) Update G network # + ########################### + self.optimizerG.zero_grad() + # Original GAN loss + targetv = Variable(gan_target.fill_(real_label)) + output = self.netD(fake) + errG = self.criterionD(output, targetv) + + # minimize the true distribution + _, feat = self.net( + fake, True, + 1 * torch.ones(data.shape[0], dtype=torch.long).cuda()) + errG_F = self.criterion.fake_loss(feat).mean() + generator_loss = errG + self.config.loss.beta * errG_F + generator_loss.backward() + self.optimizerG.step() + + ########################### + # (3) Update classifier # + ########################### + # cross entropy loss + self.optimizer.zero_grad() + _, feat = self.net( + data, True, + 0 * torch.ones(data.shape[0], dtype=torch.long).cuda()) + _, loss = self.criterion(feat, target) + + # KL divergence + noise = torch.FloatTensor( + data.size(0), self.config.network.nz, self.config.network.ns, + self.config.network.ns).normal_(0, 1).cuda() + noise = Variable(noise) + fake = self.netG(noise) + _, feat = self.net( + fake, True, + 1 * torch.ones(data.shape[0], dtype=torch.long).cuda()) + F_loss_fake = self.criterion.fake_loss(feat).mean() + total_loss = loss + self.config.loss.beta * F_loss_fake + total_loss.backward() + self.optimizer.step() + + self.scheduler.step() + + # exponential moving average, show smooth values + with torch.no_grad(): + loss_avg = loss_avg * 0.8 + float(total_loss) * 0.2 + lossG_avg = lossG_avg * 0.8 + float(generator_loss) * 0.2 + lossD_avg = lossD_avg * 0.8 + float(errD) * 0.2 + + metrics = {} + metrics['epoch_idx'] = epoch_idx + metrics['loss'] = loss_avg + metrics['lossG'] = lossG_avg + metrics['lossD'] = lossD_avg + + return { + 'netG': self.netG, + 'netD': self.netD, + 'netF': self.net, + 'criterion': self.criterion + }, metrics diff --git a/OpenOOD/openood/trainers/arpl_trainer.py b/OpenOOD/openood/trainers/arpl_trainer.py new file mode 100644 index 0000000000000000000000000000000000000000..5cd320a07fa07c79553683b435c06b857563a23e --- /dev/null +++ b/OpenOOD/openood/trainers/arpl_trainer.py @@ -0,0 +1,79 @@ +import torch +import torch.nn as nn +from torch.utils.data import DataLoader +from tqdm import tqdm + +import openood.utils.comm as comm +from openood.utils import Config + +from .lr_scheduler import cosine_annealing + + +class ARPLTrainer: + def __init__(self, net: nn.Module, train_loader: DataLoader, + config: Config) -> None: + + self.net = net['netF'] + self.train_loader = train_loader + self.config = config + self.criterion = net['criterion'] + + params_list = [{ + 'params': self.net.parameters() + }, { + 'params': self.criterion.parameters() + }] + + self.optimizer = torch.optim.SGD( + params_list, + config.optimizer.lr, + momentum=config.optimizer.momentum, + weight_decay=config.optimizer.weight_decay, + nesterov=True, + ) + + self.scheduler = torch.optim.lr_scheduler.LambdaLR( + self.optimizer, + lr_lambda=lambda step: cosine_annealing( + step, + config.optimizer.num_epochs * len(train_loader), + 1, + 1e-6 / config.optimizer.lr, + ), + ) + + def train_epoch(self, epoch_idx): + self.net.train() + + loss_avg = 0.0 + train_dataiter = iter(self.train_loader) + + for train_step in tqdm(range(1, + len(train_dataiter) + 1), + desc='Epoch {:03d}: '.format(epoch_idx), + position=0, + leave=True, + disable=not comm.is_main_process()): + batch = next(train_dataiter) + data = batch['data'].cuda() + target = batch['label'].cuda() + + # forward + _, feat = self.net(data, return_feature=True) + logits, loss = self.criterion(feat, target) + + # backward + self.optimizer.zero_grad() + loss.backward() + self.optimizer.step() + self.scheduler.step() + + # exponential moving average, show smooth values + with torch.no_grad(): + loss_avg = loss_avg * 0.8 + float(loss) * 0.2 + + metrics = {} + metrics['epoch_idx'] = epoch_idx + metrics['loss'] = loss_avg + + return {'netF': self.net, 'criterion': self.criterion}, metrics diff --git a/OpenOOD/openood/trainers/augmix_trainer.py b/OpenOOD/openood/trainers/augmix_trainer.py new file mode 100644 index 0000000000000000000000000000000000000000..6a1b822f0b52c3558e25987de2e0202230803c3d --- /dev/null +++ b/OpenOOD/openood/trainers/augmix_trainer.py @@ -0,0 +1,112 @@ +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch.utils.data import DataLoader +from tqdm import tqdm + +import openood.utils.comm as comm +from openood.utils import Config + +from .lr_scheduler import cosine_annealing + + +class AugMixTrainer: + def __init__(self, net: nn.Module, train_loader: DataLoader, + config: Config) -> None: + + self.net = net + self.train_loader = train_loader + self.config = config + self.lam = config.trainer.trainer_args.lam + self.jsd = config.trainer.trainer_args.jsd + + self.optimizer = torch.optim.SGD( + net.parameters(), + config.optimizer.lr, + momentum=config.optimizer.momentum, + weight_decay=config.optimizer.weight_decay, + nesterov=True, + ) + + self.scheduler = torch.optim.lr_scheduler.LambdaLR( + self.optimizer, + lr_lambda=lambda step: cosine_annealing( + step, + config.optimizer.num_epochs * len(train_loader), + 1, + 1e-6 / config.optimizer.lr, + ), + ) + + def train_epoch(self, epoch_idx): + self.net.train() + + loss_avg = 0.0 + train_dataiter = iter(self.train_loader) + + for train_step in tqdm(range(1, + len(train_dataiter) + 1), + desc='Epoch {:03d}: '.format(epoch_idx), + position=0, + leave=True, + disable=not comm.is_main_process()): + batch = next(train_dataiter) + target = batch['label'].cuda() + + if self.jsd: + orig_data = batch['data'].cuda() + aug1_data = batch['data_aug1'].cuda() + aug2_data = batch['data_aug2'].cuda() + data = torch.cat([orig_data, aug1_data, aug2_data]) + + # forward + logits_all = self.net(data) + logits_clean, logits_aug1, logits_aug2 = torch.split( + logits_all, orig_data.size(0)) + + # Cross-entropy is only computed on clean images + loss = F.cross_entropy(logits_clean, target) + + p_clean, p_aug1, p_aug2 = \ + F.softmax(logits_clean, dim=1), \ + F.softmax(logits_aug1, dim=1), \ + F.softmax(logits_aug2, dim=1) + + # Clamp mixture distribution to avoid exploding KL divergence + p_mixture = torch.clamp((p_clean + p_aug1 + p_aug2) / 3., 1e-7, + 1).log() + loss += self.lam * ( + F.kl_div(p_mixture, p_clean, reduction='batchmean') + + F.kl_div(p_mixture, p_aug1, reduction='batchmean') + + F.kl_div(p_mixture, p_aug2, reduction='batchmean')) / 3. + else: + data = batch['data'].cuda() + + # forward + logits = self.net(data) + loss = F.cross_entropy(logits, target) + + # backward + self.optimizer.zero_grad() + loss.backward() + self.optimizer.step() + self.scheduler.step() + + # exponential moving average, show smooth values + with torch.no_grad(): + loss_avg = loss_avg * 0.8 + float(loss) * 0.2 + + # comm.synchronize() + + metrics = {} + metrics['epoch_idx'] = epoch_idx + metrics['loss'] = self.save_metrics(loss_avg) + + return self.net, metrics + + def save_metrics(self, loss_avg): + all_loss = comm.gather(loss_avg) + total_losses_reduced = np.mean([x for x in all_loss]) + + return total_losses_reduced diff --git a/OpenOOD/openood/trainers/base_trainer.py b/OpenOOD/openood/trainers/base_trainer.py new file mode 100644 index 0000000000000000000000000000000000000000..b04d08f8e36e9e71bf22cf522ced14593fd72696 --- /dev/null +++ b/OpenOOD/openood/trainers/base_trainer.py @@ -0,0 +1,82 @@ +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch.utils.data import DataLoader +from tqdm import tqdm + +import openood.utils.comm as comm +from openood.utils import Config + +from .lr_scheduler import cosine_annealing + + +class BaseTrainer: + def __init__(self, net: nn.Module, train_loader: DataLoader, + config: Config) -> None: + + self.net = net + self.train_loader = train_loader + self.config = config + + self.optimizer = torch.optim.SGD( + net.parameters(), + config.optimizer.lr, + momentum=config.optimizer.momentum, + weight_decay=config.optimizer.weight_decay, + nesterov=True, + ) + + self.scheduler = torch.optim.lr_scheduler.LambdaLR( + self.optimizer, + lr_lambda=lambda step: cosine_annealing( + step, + config.optimizer.num_epochs * len(train_loader), + 1, + 1e-6 / config.optimizer.lr, + ), + ) + + def train_epoch(self, epoch_idx): + self.net.train() + + loss_avg = 0.0 + train_dataiter = iter(self.train_loader) + + for train_step in tqdm(range(1, + len(train_dataiter) + 1), + desc='Epoch {:03d}: '.format(epoch_idx), + position=0, + leave=True, + disable=not comm.is_main_process()): + batch = next(train_dataiter) + data = batch['data'].cuda() + target = batch['label'].cuda() + + # forward + logits_classifier = self.net(data) + loss = F.cross_entropy(logits_classifier, target) + + # backward + self.optimizer.zero_grad() + loss.backward() + self.optimizer.step() + self.scheduler.step() + + # exponential moving average, show smooth values + with torch.no_grad(): + loss_avg = loss_avg * 0.8 + float(loss) * 0.2 + + # comm.synchronize() + + metrics = {} + metrics['epoch_idx'] = epoch_idx + metrics['loss'] = self.save_metrics(loss_avg) + + return self.net, metrics + + def save_metrics(self, loss_avg): + all_loss = comm.gather(loss_avg) + total_losses_reduced = np.mean([x for x in all_loss]) + + return total_losses_reduced diff --git a/OpenOOD/openood/trainers/cider_trainer.py b/OpenOOD/openood/trainers/cider_trainer.py new file mode 100644 index 0000000000000000000000000000000000000000..148e0c55f24147dc68458c80d82870a9da667918 --- /dev/null +++ b/OpenOOD/openood/trainers/cider_trainer.py @@ -0,0 +1,258 @@ +import math +import time +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch.utils.data import DataLoader +from tqdm import tqdm + +import openood.utils.comm as comm +from openood.utils import Config + + +class CIDERTrainer: + def __init__(self, net: nn.Module, train_loader: DataLoader, + val_loader: DataLoader, config: Config) -> None: + + self.net = net + self.train_loader = train_loader + self.config = config + + if 'imagenet' in self.config.dataset.name: + try: + for name, p in self.net.backbone.named_parameters(): + if not name.startswith('layer4'): + p.requires_grad = False + except AttributeError: + for name, p in self.net.module.backbone.named_parameters(): + if not name.startswith('layer4'): + p.requires_grad = False + + self.optimizer = torch.optim.SGD( + net.parameters(), + config.optimizer.lr, + momentum=config.optimizer.momentum, + weight_decay=config.optimizer.weight_decay, + nesterov=True, + ) + + if config.dataset.train.batch_size \ + * config.num_gpus * config.num_machines > 256: + config.optimizer.warm = True + + if config.optimizer.warm: + self.warmup_from = 0.001 + self.warm_epochs = 10 + if config.optimizer.cosine: + eta_min = config.optimizer.lr * \ + (config.optimizer.lr_decay_rate**3) + self.warmup_to = eta_min + (config.optimizer.lr - eta_min) * ( + 1 + math.cos(math.pi * self.warm_epochs / + config.optimizer.num_epochs)) / 2 + else: + self.warmup_to = config.optimizer.lr + + self.criterion_comp = CompLoss( + config.dataset.num_classes, + temperature=config.trainer.trainer_args.temp).cuda() + # V2: EMA style prototypes + self.criterion_dis = DisLoss( + config.dataset.num_classes, + config.network.feat_dim, + config.trainer.trainer_args.proto_m, + self.net, + val_loader, + temperature=config.trainer.trainer_args.temp).cuda() + + def train_epoch(self, epoch_idx): + adjust_learning_rate(self.config, self.optimizer, epoch_idx - 1) + + self.net.train() + + loss_avg = 0.0 + train_dataiter = iter(self.train_loader) + + for train_step in tqdm(range(1, + len(train_dataiter) + 1), + desc='Epoch {:03d}: '.format(epoch_idx), + position=0, + leave=True, + disable=not comm.is_main_process()): + warmup_learning_rate(self.config, self.warm_epochs, + self.warmup_from, + self.warmup_to, epoch_idx - 1, train_step, + len(train_dataiter), self.optimizer) + + batch = next(train_dataiter) + data = batch['data'] + target = batch['label'] + + data = torch.cat([data[0], data[1]], dim=0).cuda() + target = target.repeat(2).cuda() + + # forward + features = self.net(data) + dis_loss = self.criterion_dis(features, target) # V2: EMA style + comp_loss = self.criterion_comp(features, + self.criterion_dis.prototypes, + target) + loss = self.config.trainer.trainer_args.w * comp_loss + dis_loss + + # backward + self.optimizer.zero_grad() + loss.backward() + self.optimizer.step() + + # exponential moving average, show smooth values + with torch.no_grad(): + loss_avg = loss_avg * 0.8 + float(loss) * 0.2 + + # comm.synchronize() + + metrics = {} + metrics['epoch_idx'] = epoch_idx + metrics['loss'] = self.save_metrics(loss_avg) + + return self.net, metrics + + def save_metrics(self, loss_avg): + all_loss = comm.gather(loss_avg) + total_losses_reduced = np.mean([x for x in all_loss]) + + return total_losses_reduced + + +def adjust_learning_rate(config, optimizer, epoch): + lr = config.optimizer.lr + if config.optimizer.cosine: + eta_min = lr * (config.optimizer.lr_decay_rate**3) + lr = eta_min + (lr - eta_min) * ( + 1 + math.cos(math.pi * epoch / config.optimizer.num_epochs)) / 2 + else: + steps = np.sum(epoch > np.asarray(config.optimizer.lr_decay_epochs)) + if steps > 0: + lr = lr * (config.optimizer.lr_decay_rate**steps) + + for param_group in optimizer.param_groups: + param_group['lr'] = lr + + +def warmup_learning_rate(config, warm_epochs, warmup_from, warmup_to, epoch, + batch_id, total_batches, optimizer): + if config.optimizer.warm and epoch <= warm_epochs: + p = (batch_id + (epoch - 1) * total_batches) / \ + (warm_epochs * total_batches) + lr = warmup_from + p * (warmup_to - warmup_from) + + for param_group in optimizer.param_groups: + param_group['lr'] = lr + + +class CompLoss(nn.Module): + """Compactness Loss with class-conditional prototypes.""" + def __init__(self, n_cls, temperature=0.07, base_temperature=0.07): + super(CompLoss, self).__init__() + self.n_cls = n_cls + self.temperature = temperature + self.base_temperature = base_temperature + + def forward(self, features, prototypes, labels): + prototypes = F.normalize(prototypes, dim=1) + proxy_labels = torch.arange(0, self.n_cls).cuda() + labels = labels.contiguous().view(-1, 1) + mask = torch.eq(labels, proxy_labels.T).float().cuda() # bz, cls + + # compute logits + feat_dot_prototype = torch.div(torch.matmul(features, prototypes.T), + self.temperature) + # for numerical stability + logits_max, _ = torch.max(feat_dot_prototype, dim=1, keepdim=True) + logits = feat_dot_prototype - logits_max.detach() + + # compute log_prob + exp_logits = torch.exp(logits) + log_prob = logits - torch.log(exp_logits.sum(1, keepdim=True)) + + # compute mean of log-likelihood over positive + mean_log_prob_pos = (mask * log_prob).sum(1) + + # loss + loss = -(self.temperature / + self.base_temperature) * mean_log_prob_pos.mean() + return loss + + +class DisLoss(nn.Module): + """Dispersion Loss with EMA prototypes.""" + def __init__(self, + n_cls, + feat_dim, + proto_m, + model, + loader, + temperature=0.1, + base_temperature=0.1): + super(DisLoss, self).__init__() + self.n_cls = n_cls + self.feat_dim = feat_dim + self.proto_m = proto_m + + self.temperature = temperature + self.base_temperature = base_temperature + self.register_buffer('prototypes', + torch.zeros(self.n_cls, self.feat_dim)) + self.model = model + self.loader = loader + self.init_class_prototypes() + + def forward(self, features, labels): + prototypes = self.prototypes + num_cls = self.n_cls + for j in range(len(features)): + prototypes[labels[j].item()] = F.normalize( + prototypes[labels[j].item()] * self.proto_m + features[j] * + (1 - self.proto_m), + dim=0) + self.prototypes = prototypes.detach() + labels = torch.arange(0, num_cls).cuda() + labels = labels.contiguous().view(-1, 1) + labels = labels.contiguous().view(-1, 1) + + mask = (1 - torch.eq(labels, labels.T).float()).cuda() + + logits = torch.div(torch.matmul(prototypes, prototypes.T), + self.temperature) + + logits_mask = torch.scatter(torch.ones_like(mask), 1, + torch.arange(num_cls).view(-1, 1).cuda(), + 0) + mask = mask * logits_mask + mean_prob_neg = torch.log( + (mask * torch.exp(logits)).sum(1) / mask.sum(1)) + mean_prob_neg = mean_prob_neg[~torch.isnan(mean_prob_neg)] + loss = self.temperature / self.base_temperature * mean_prob_neg.mean() + return loss + + def init_class_prototypes(self): + """Initialize class prototypes.""" + self.model.eval() + start = time.time() + prototype_counts = [0] * self.n_cls + with torch.no_grad(): + prototypes = torch.zeros(self.n_cls, self.feat_dim).cuda() + for i, batch in enumerate(self.loader): + input = batch['data'] + target = batch['label'] + input, target = input.cuda(), target.cuda() + features = self.model(input) + for j, feature in enumerate(features): + prototypes[target[j].item()] += feature + prototype_counts[target[j].item()] += 1 + for cls in range(self.n_cls): + prototypes[cls] /= prototype_counts[cls] + # measure elapsed time + duration = time.time() - start + print(f'Time to initialize prototypes: {duration:.3f}') + prototypes = F.normalize(prototypes, dim=1) + self.prototypes = prototypes diff --git a/OpenOOD/openood/trainers/conf_branch_trainer.py b/OpenOOD/openood/trainers/conf_branch_trainer.py new file mode 100644 index 0000000000000000000000000000000000000000..09400186ff7076bfb3972ca6c203a9af91e1b174 --- /dev/null +++ b/OpenOOD/openood/trainers/conf_branch_trainer.py @@ -0,0 +1,114 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch.autograd import Variable +from tqdm import tqdm + +import openood.utils.comm as comm +from openood.utils import Config +from .lr_scheduler import cosine_annealing + + +class ConfBranchTrainer: + def __init__(self, net, train_loader, config: Config) -> None: + self.train_loader = train_loader + self.config = config + self.net = net + self.prediction_criterion = nn.NLLLoss().cuda() + self.optimizer = torch.optim.SGD( + net.parameters(), + lr=config.optimizer['lr'], + momentum=config.optimizer['momentum'], + nesterov=config.optimizer['nesterov'], + weight_decay=config.optimizer['weight_decay']) + self.scheduler = torch.optim.lr_scheduler.LambdaLR( + self.optimizer, + lr_lambda=lambda step: cosine_annealing( + step, + config.optimizer.num_epochs * len(train_loader), + 1, + 1e-6 / config.optimizer.lr, + ), + ) + self.lmbda = self.config.trainer['lmbda'] + + def train_epoch(self, epoch_idx): + self.net.train() + correct_count = 0. + total = 0. + accuracy = 0. + train_dataiter = iter(self.train_loader) + + for train_step in tqdm(range(1, + len(train_dataiter) + 1), + desc='Epoch {:03d}'.format(epoch_idx), + position=0, + leave=True, + disable=not comm.is_main_process()): + batch = next(train_dataiter) + images = Variable(batch['data']).cuda() + labels = Variable(batch['label']).cuda() + labels_onehot = Variable( + encode_onehot(labels, self.config.num_classes)) + self.net.zero_grad() + + pred_original, confidence = self.net(images, + return_confidence=True) + pred_original = F.softmax(pred_original, dim=-1) + confidence = torch.sigmoid(confidence) + eps = self.config.trainer['eps'] + pred_original = torch.clamp(pred_original, 0. + eps, 1. - eps) + confidence = torch.clamp(confidence, 0. + eps, 1. - eps) + + if not self.config.baseline: + # Randomly set half of the confidences to 1 (i.e. no hints) + b = Variable( + torch.bernoulli( + torch.Tensor(confidence.size()).uniform_(0, + 1))).cuda() + conf = confidence * b + (1 - b) + pred_new = pred_original * conf.expand_as( + pred_original) + labels_onehot * ( + 1 - conf.expand_as(labels_onehot)) + pred_new = torch.log(pred_new) + else: + pred_new = torch.log(pred_original) + + xentropy_loss = self.prediction_criterion(pred_new, labels) + confidence_loss = torch.mean(-torch.log(confidence)) + + if self.config.baseline: + total_loss = xentropy_loss + else: + total_loss = xentropy_loss + (self.lmbda * confidence_loss) + + if self.config.trainer['budget'] > confidence_loss.item(): + self.lmbda = self.lmbda / 1.01 + elif self.config.trainer['budget'] <= confidence_loss.item(): + self.lmbda = self.lmbda / 0.99 + + total_loss.backward() + self.optimizer.step() + self.scheduler.step() + + pred_idx = torch.max(pred_original.data, 1)[1] + total += labels.size(0) + correct_count += (pred_idx == labels.data).sum() + accuracy = correct_count / total + + metrics = {} + metrics['train_acc'] = accuracy + metrics['loss'] = total_loss + metrics['epoch_idx'] = epoch_idx + return self.net, metrics + + +def encode_onehot(labels, n_classes): + onehot = torch.FloatTensor(labels.size()[0], + n_classes) # batchsize * num of class + labels = labels.data + if labels.is_cuda: + onehot = onehot.cuda() + onehot.zero_() + onehot.scatter_(1, labels.view(-1, 1), 1) + return onehot diff --git a/OpenOOD/openood/trainers/csi_trainer.py b/OpenOOD/openood/trainers/csi_trainer.py new file mode 100644 index 0000000000000000000000000000000000000000..714506353c5dd60b1b90ff4965f9b3fefd55e7de --- /dev/null +++ b/OpenOOD/openood/trainers/csi_trainer.py @@ -0,0 +1,912 @@ +import math +import numbers + +import diffdist.functional as distops +import numpy as np +import torch +import torch.distributed as dist +import torch.nn as nn +import torch.nn.functional as F +import torch.optim as optim +import torch.optim.lr_scheduler as lr_scheduler +from torch.autograd import Function +from torch.optim.lr_scheduler import ReduceLROnPlateau, _LRScheduler +from torch.utils.data import DataLoader +from tqdm import tqdm + +import openood.utils.comm as comm +from openood.utils import Config + + +class CSITrainer: + def __init__(self, net: nn.Module, train_loader: DataLoader, + config: Config) -> None: + self.net = net['backbone'] + self.train_loader = train_loader + self.config = config + self.mode = config.mode + + if self.config.num_gpus > 1: + self.dummy_net = net['dummy_net'].module + else: + self.dummy_net = net['dummy_net'] + self.dummy_net.cpu() + + self.simclr_aug = get_simclr_augmentation( + config, image_size=config.dataset.image_size).cuda() + self.linear = net['linear'] + self.linear_optim = torch.optim.Adam( + self.linear.parameters(), + lr=1e-3, + betas=(.9, .999), + weight_decay=config.optimizer.weight_decay) + self.criterion = nn.CrossEntropyLoss().cuda() + self.hflip = HorizontalFlipLayer().cuda() + + self.simclr_layer = net['simclr_layer'] + self.rotation_linear = net['shift_cls_layer'] + self.joint_linear = net['joint_distribution_layer'] + + if 'step1' in self.mode: + self.optimizer = optim.SGD( + list(self.net.parameters()) + + list(self.simclr_layer.parameters()), + lr=config.optimizer.lr, + momentum=0.9, + weight_decay=config.optimizer.weight_decay) + self.scheduler = lr_scheduler.CosineAnnealingLR( + self.optimizer, config.optimizer.num_epochs) + self.scheduler_warmup = GradualWarmupScheduler( + self.optimizer, + multiplier=10.0, + total_epoch=config.optimizer.warmup, + after_scheduler=self.scheduler) + else: + milestones = [ + int(0.6 * config.optimizer.num_epochs), + int(0.75 * config.optimizer.num_epochs), + int(0.9 * config.optimizer.num_epochs) + ] + + self.linear_optim = torch.optim.Adam( + self.linear.parameters(), + lr=1e-3, + betas=(.9, .999), + weight_decay=config.optimizer.weight_decay) + self.linear_scheduler = lr_scheduler.MultiStepLR( + self.linear_optim, gamma=0.1, milestones=milestones) + + self.rotation_linear_optim = torch.optim.SGD( + self.rotation_linear.parameters(), + lr=1e-1, + weight_decay=config.optimizer.weight_decay) + self.rot_scheduler = lr_scheduler.MultiStepLR( + self.rotation_linear_optim, gamma=0.1, milestones=milestones) + + self.joint_linear_optim = torch.optim.SGD( + self.joint_linear.parameters(), + lr=1e-1, + weight_decay=config.optimizer.weight_decay) + self.joint_scheduler = lr_scheduler.MultiStepLR( + self.joint_linear_optim, gamma=0.1, milestones=milestones) + + def train_epoch(self, epoch_idx): + if 'step1' in self.mode: + return self.train_sup_epoch(epoch_idx) + else: + return self.train_suplinear_epoch(epoch_idx) + + def train_sup_epoch(self, epoch_idx): + self.net.train() + train_dataiter = iter(self.train_loader) + + n = 0 + for train_step in tqdm(range(1, + len(train_dataiter) + 1), + desc='Epoch {:03d}: '.format(epoch_idx), + position=0, + leave=True, + disable=not comm.is_main_process()): + batch = next(train_dataiter) + images = batch['data'].cuda() + labels = batch['label'].cuda() + + batch_size = images.size(0) + images1, images2 = self.hflip(images.repeat(2, 1, 1, + 1)).chunk(2) # hflip + images1 = torch.cat( + [torch.rot90(images1, rot, (2, 3)) for rot in range(4)]) # 4B + images2 = torch.cat( + [torch.rot90(images2, rot, (2, 3)) for rot in range(4)]) # 4B + images_pair = torch.cat([images1, images2], dim=0) # 8B + + rot_sim_labels = torch.cat([ + labels + self.config.dataset.num_classes * i for i in range(4) + ], + dim=0) + + images_pair = self.simclr_aug(images_pair) # simclr augment + _, features = self.net(images_pair, return_feature=True) + + simclr_outputs = self.simclr_layer(features) + simclr = normalize(simclr_outputs) # normalize + sim_matrix = get_similarity_matrix( + simclr, multi_gpu=self.config.num_gpus > 1) + loss_sim = Supervised_NT_xent( + sim_matrix, + labels=rot_sim_labels, + temperature=self.config.temperature, + multi_gpu=self.config.num_gpus > 1) * self.config.sim_lambda + + # total loss + loss = loss_sim + + self.optimizer.zero_grad() + loss.backward() + self.optimizer.step() + + self.scheduler.step(epoch_idx - 1 + n / len(self.train_loader)) + # lr = self.optimizer.param_groups[0]['lr'] + + # Post-processing stuffs + penul_1 = features[:batch_size] + penul_2 = features[4 * batch_size:5 * batch_size] + features = torch.cat([penul_1, + penul_2]) # only use original rotation + + # Linear evaluation + outputs_linear_eval = self.linear(features.detach()) + loss_linear = self.criterion(outputs_linear_eval, labels.repeat(2)) + + self.linear_optim.zero_grad() + loss_linear.backward() + self.linear_optim.step() + + n = n + 1 + + metrics = {} + metrics['epoch_idx'] = epoch_idx + metrics['loss'] = loss + + if self.config.num_gpus > 1: + self.dummy_net.backbone.load_state_dict( + self.net.module.state_dict()) + self.dummy_net.linear.load_state_dict( + self.linear.module.state_dict()) + self.dummy_net.simclr_layer.load_state_dict( + self.simclr_layer.module.state_dict()) + self.dummy_net.joint_distribution_layer.load_state_dict( + self.joint_distribution_layer.module.state_dict()) + self.dummy_net.shift_cls_layer.load_state_dict( + self.shift_cls_layer.module.state_dict()) + else: + self.dummy_net.backbone.load_state_dict(self.net.state_dict()) + self.dummy_net.linear.load_state_dict(self.linear.state_dict()) + self.dummy_net.simclr_layer.load_state_dict( + self.simclr_layer.state_dict()) + self.dummy_net.joint_distribution_layer.load_state_dict( + self.joint_distribution_layer.state_dict()) + self.dummy_net.shift_cls_layer.load_state_dict( + self.shift_cls_layer.state_dict()) + + return self.dummy_net, metrics + + def train_suplinear_epoch(self, epoch_idx): + self.net.train() + train_dataiter = iter(self.train_loader) + + for train_step in tqdm(range(1, + len(train_dataiter) + 1), + desc='Epoch {:03d}: '.format(epoch_idx), + position=0, + leave=True, + disable=not comm.is_main_process()): + self.net.eval() + batch = next(train_dataiter) + images = batch['data'].cuda() + labels = batch['label'].cuda() + + batch_size = images.size(0) + images = self.hflip(images) + images = torch.cat( + [torch.rot90(images, rot, (2, 3)) for rot in range(4)]) # 4B + rot_labels = torch.cat( + [torch.ones_like(labels) * k for k in range(4)], 0) # B -> 4B + joint_labels = torch.cat([ + labels + self.config.dataset.num_classes * i for i in range(4) + ], + dim=0) + + images = self.simclr_aug(images) # simclr augmentation + _, features = self.net(images, return_feature=True) + penultimate = features.detach() + + outputs = self.linear( + penultimate[0:batch_size] + ) # only use 0 degree samples for linear eval + outputs_rot = self.rotation_linear(penultimate) + outputs_joint = self.joint_linear(penultimate) + + loss_ce = self.criterion(outputs, labels) + loss_rot = self.criterion(outputs_rot, rot_labels) + loss_joint = self.criterion(outputs_joint, joint_labels) + + # CE loss + self.linear_optim.zero_grad() + loss_ce.backward() + self.linear_optim.step() + + # Rot loss + self.rotation_linear_optim.zero_grad() + loss_rot.backward() + self.rotation_linear_optim.step() + + # Joint loss + self.joint_linear_optim.zero_grad() + loss_joint.backward() + self.joint_linear_optim.step() + + # optimizer learning rate + # lr = self.linear_optim.param_groups[0]['lr'] + + self.linear_scheduler.step() + self.rot_scheduler.step() + self.joint_scheduler.step() + + metrics = {} + metrics['epoch_idx'] = epoch_idx + metrics['loss'] = loss_ce + loss_rot + loss_joint + + if self.config.num_gpus > 1: + self.dummy_net.backbone.load_state_dict( + self.net.module.state_dict()) + self.dummy_net.linear.load_state_dict( + self.linear.module.state_dict()) + self.dummy_net.simclr_layer.load_state_dict( + self.simclr_layer.module.state_dict()) + self.dummy_net.joint_distribution_layer.load_state_dict( + self.joint_distribution_layer.module.state_dict()) + self.dummy_net.shift_cls_layer.load_state_dict( + self.shift_cls_layer.module.state_dict()) + else: + self.dummy_net.backbone.load_state_dict(self.net.state_dict()) + self.dummy_net.linear.load_state_dict(self.linear.state_dict()) + self.dummy_net.simclr_layer.load_state_dict( + self.simclr_layer.state_dict()) + self.dummy_net.joint_distribution_layer.load_state_dict( + self.joint_distribution_layer.state_dict()) + self.dummy_net.shift_cls_layer.load_state_dict( + self.shift_cls_layer.state_dict()) + + return self.dummy_net, metrics + + +def get_similarity_matrix(outputs, chunk=2, multi_gpu=False): + """Compute similarity matrix. + + - outputs: (B', d) tensor for B' = B * chunk + - sim_matrix: (B', B') tensor + """ + + if multi_gpu: + outputs_gathered = [] + for out in outputs.chunk(chunk): + gather_t = [ + torch.empty_like(out) for _ in range(dist.get_world_size()) + ] + gather_t = torch.cat(distops.all_gather(gather_t, out)) + outputs_gathered.append(gather_t) + outputs = torch.cat(outputs_gathered) + + sim_matrix = torch.mm(outputs, outputs.t()) # (B', d), (d, B') -> (B', B') + + return sim_matrix + + +def Supervised_NT_xent(sim_matrix, + labels, + temperature=0.5, + chunk=2, + eps=1e-8, + multi_gpu=False): + """Compute NT_xent loss. + + - sim_matrix: (B', B') tensor for B' = B * chunk (first 2B are pos samples) + """ + + device = sim_matrix.device + + if multi_gpu: + gather_t = [ + torch.empty_like(labels) for _ in range(dist.get_world_size()) + ] + labels = torch.cat(distops.all_gather(gather_t, labels)) + labels = labels.repeat(2) + + logits_max, _ = torch.max(sim_matrix, dim=1, keepdim=True) + sim_matrix = sim_matrix - logits_max.detach() + + B = sim_matrix.size(0) // chunk # B = B' / chunk + + eye = torch.eye(B * chunk).to(device) # (B', B') + sim_matrix = torch.exp(sim_matrix / temperature) * (1 - eye + ) # remove diagonal + + denom = torch.sum(sim_matrix, dim=1, keepdim=True) + sim_matrix = -torch.log(sim_matrix / (denom + eps) + eps) # loss matrix + + labels = labels.contiguous().view(-1, 1) + Mask = torch.eq(labels, labels.t()).float().to(device) + # Mask = eye * torch.stack([labels == labels[i] + # for i in range(labels.size(0))]).float().to(device) + Mask = Mask / (Mask.sum(dim=1, keepdim=True) + eps) + + loss = torch.sum(Mask * sim_matrix) / (2 * B) + + return loss + + +def normalize(x, dim=1, eps=1e-8): + return x / (x.norm(dim=dim, keepdim=True) + eps) + + +def get_simclr_augmentation(config, image_size): + + # parameter for resizecrop + resize_scale = (config.resize_factor, 1.0) # resize scaling factor + if config.resize_fix: # if resize_fix is True, use same scale + resize_scale = (config.resize_factor, config.resize_factor) + + # Align augmentation + color_jitter = ColorJitterLayer(brightness=0.4, + contrast=0.4, + saturation=0.4, + hue=0.1, + p=0.8) + color_gray = RandomColorGrayLayer(p=0.2) + resize_crop = RandomResizedCropLayer(scale=resize_scale, size=image_size) + + # Transform define # + if config.dataset.name == 'imagenet': + # Using RandomResizedCrop at PIL transform + transform = nn.Sequential( + color_jitter, + color_gray, + ) + else: + transform = nn.Sequential( + color_jitter, + color_gray, + resize_crop, + ) + + return transform + + +# ----------Warmup Scheduler---------- +class GradualWarmupScheduler(_LRScheduler): + """Gradually warm-up(increasing) learning rate in optimizer. Proposed in + 'Accurate, Large Minibatch SGD: Training ImageNet in 1 Hour'. + + Args: + optimizer (Optimizer): Wrapped optimizer. + multiplier: target learning rate = base lr * multiplier + if multiplier > 1.0. if multiplier = 1.0, + lr starts from 0 and ends up with the base_lr. + total_epoch: target learning rate is reached at total_epoch, gradually + after_scheduler: after target_epoch, + use this scheduler (eg. ReduceLROnPlateau) + """ + def __init__(self, + optimizer, + multiplier, + total_epoch, + after_scheduler=None): + self.multiplier = multiplier + if self.multiplier < 1.: + raise ValueError( + 'multiplier should be greater than or equal to 1.') + self.total_epoch = total_epoch + self.after_scheduler = after_scheduler + self.finished = False + super(GradualWarmupScheduler, self).__init__(optimizer) + + def get_lr(self): + if self.last_epoch > self.total_epoch: + if self.after_scheduler: + if not self.finished: + self.after_scheduler.base_lrs = [ + base_lr * self.multiplier for base_lr in self.base_lrs + ] + self.finished = True + return self.after_scheduler.get_lr() + return [base_lr * self.multiplier for base_lr in self.base_lrs] + + if self.multiplier == 1.0: + return [ + base_lr * (float(self.last_epoch) / self.total_epoch) + for base_lr in self.base_lrs + ] + else: + return [ + base_lr * + ((self.multiplier - 1.) * self.last_epoch / self.total_epoch + + 1.) for base_lr in self.base_lrs + ] + + def step_ReduceLROnPlateau(self, metrics, epoch=None): + if epoch is None: + epoch = self.last_epoch + 1 + self.last_epoch = epoch if epoch != 0 else 1 + # ReduceLROnPlateau is called at the end of epoch, + # whereas others are called at beginning + if self.last_epoch <= self.total_epoch: + warmup_lr = [ + base_lr * + ((self.multiplier - 1.) * self.last_epoch / self.total_epoch + + 1.) for base_lr in self.base_lrs + ] + for param_group, lr in zip(self.optimizer.param_groups, warmup_lr): + param_group['lr'] = lr + else: + if epoch is None: + self.after_scheduler.step(metrics, None) + else: + self.after_scheduler.step(metrics, epoch - self.total_epoch) + + def step(self, epoch=None, metrics=None): + if type(self.after_scheduler) != ReduceLROnPlateau: + if self.finished and self.after_scheduler: + if epoch is None: + self.after_scheduler.step(None) + else: + self.after_scheduler.step(epoch - self.total_epoch) + else: + return super(GradualWarmupScheduler, self).step(epoch) + else: + self.step_ReduceLROnPlateau(metrics, epoch) + + +# ----------transform layers---------- +if torch.__version__ >= '1.4.0': + kwargs = {'align_corners': False} +else: + kwargs = {} + + +def rgb2hsv(rgb): + """Convert a 4-d RGB tensor to the HSV counterpart. + + Here, we compute hue using atan2() based on the definition in [1], + instead of using the common lookup table approach as in [2, 3]. + Those values agree when the angle is a multiple of 30°, + otherwise they may differ at most ~1.2°. + + References + [1] https://en.wikipedia.org/wiki/Hue + [2] https://www.rapidtables.com/convert/color/rgb-to-hsv.html + [3] https://github.com/scikit-image/scikit-image/ + blob/master/skimage/color/colorconv.py#L212 + """ + + r, g, b = rgb[:, 0, :, :], rgb[:, 1, :, :], rgb[:, 2, :, :] + + Cmax = rgb.max(1)[0] + Cmin = rgb.min(1)[0] + delta = Cmax - Cmin + + hue = torch.atan2(math.sqrt(3) * (g - b), 2 * r - g - b) + hue = (hue % (2 * math.pi)) / (2 * math.pi) + saturate = delta / Cmax + value = Cmax + hsv = torch.stack([hue, saturate, value], dim=1) + hsv[~torch.isfinite(hsv)] = 0. + return hsv + + +def hsv2rgb(hsv): + """Convert a 4-d HSV tensor to the RGB counterpart. + + >>> %timeit hsv2rgb(hsv) + 2.37 ms ± 13.4 µs per loop (mean ± std. dev. of 7 runs, 100 loops each) + >>> %timeit rgb2hsv_fast(rgb) + 298 µs ± 542 ns per loop (mean ± std. dev. of 7 runs, 1000 loops each) + >>> torch.allclose(hsv2rgb(hsv), hsv2rgb_fast(hsv), atol=1e-6) + True + + References + [1] https://en.wikipedia.org/wiki/HSL_and_HSV#HSV_to_RGB_alternative + """ + h, s, v = hsv[:, [0]], hsv[:, [1]], hsv[:, [2]] + c = v * s + + n = hsv.new_tensor([5, 3, 1]).view(3, 1, 1) + k = (n + h * 6) % 6 + t = torch.min(k, 4 - k) + t = torch.clamp(t, 0, 1) + + return v - c * t + + +class RandomResizedCropLayer(nn.Module): + def __init__(self, size=None, scale=(0.08, 1.0), ratio=(3. / 4., 4. / 3.)): + """Inception Crop size (tuple): size of forwarding image (C, W, H) + scale (tuple): range of size of the origin size cropped ratio (tuple): + + range of aspect ratio of the origin aspect ratio cropped. + """ + super(RandomResizedCropLayer, self).__init__() + + _eye = torch.eye(2, 3) + self.size = size + self.register_buffer('_eye', _eye) + self.scale = scale + self.ratio = ratio + + def forward(self, inputs, whbias=None): + _device = inputs.device + N = inputs.size(0) + _theta = self._eye.repeat(N, 1, 1) + + if whbias is None: + whbias = self._sample_latent(inputs) + + _theta[:, 0, 0] = whbias[:, 0] + _theta[:, 1, 1] = whbias[:, 1] + _theta[:, 0, 2] = whbias[:, 2] + _theta[:, 1, 2] = whbias[:, 3] + + grid = F.affine_grid(_theta, inputs.size(), **kwargs).to(_device) + output = F.grid_sample(inputs, + grid, + padding_mode='reflection', + **kwargs) + + if self.size is not None: + output = F.adaptive_avg_pool2d(output, self.size) + + return output + + def _clamp(self, whbias): + + w = whbias[:, 0] + h = whbias[:, 1] + w_bias = whbias[:, 2] + h_bias = whbias[:, 3] + + # Clamp with scale + w = torch.clamp(w, *self.scale) + h = torch.clamp(h, *self.scale) + + # Clamp with ratio + w = self.ratio[0] * h + torch.relu(w - self.ratio[0] * h) + w = self.ratio[1] * h - torch.relu(self.ratio[1] * h - w) + + # Clamp with bias range: w_bias \in (w - 1, 1 - w), + # h_bias \in (h - 1, 1 - h) + w_bias = w - 1 + torch.relu(w_bias - w + 1) + w_bias = 1 - w - torch.relu(1 - w - w_bias) + + h_bias = h - 1 + torch.relu(h_bias - h + 1) + h_bias = 1 - h - torch.relu(1 - h - h_bias) + + whbias = torch.stack([w, h, w_bias, h_bias], dim=0).t() + + return whbias + + def _sample_latent(self, inputs): + + _device = inputs.device + N, _, width, height = inputs.shape + + # N * 10 trial + area = width * height + target_area = np.random.uniform(*self.scale, N * 10) * area + log_ratio = (math.log(self.ratio[0]), math.log(self.ratio[1])) + aspect_ratio = np.exp(np.random.uniform(*log_ratio, N * 10)) + + # If doesn't satisfy ratio condition, then do central crop + w = np.round(np.sqrt(target_area * aspect_ratio)) + h = np.round(np.sqrt(target_area / aspect_ratio)) + cond = (0 < w) * (w <= width) * (0 < h) * (h <= height) + w = w[cond] + h = h[cond] + cond_len = w.shape[0] + if cond_len >= N: + w = w[:N] + h = h[:N] + else: + w = np.concatenate([w, np.ones(N - cond_len) * width]) + h = np.concatenate([h, np.ones(N - cond_len) * height]) + + w_bias = np.random.randint(w - width, width - w + 1) / width + h_bias = np.random.randint(h - height, height - h + 1) / height + w = w / width + h = h / height + + whbias = np.column_stack([w, h, w_bias, h_bias]) + whbias = torch.tensor(whbias, device=_device) + + return whbias + + +class HorizontalFlipRandomCrop(nn.Module): + def __init__(self, max_range): + super(HorizontalFlipRandomCrop, self).__init__() + self.max_range = max_range + _eye = torch.eye(2, 3) + self.register_buffer('_eye', _eye) + + def forward(self, input, sign=None, bias=None, rotation=None): + _device = input.device + N = input.size(0) + _theta = self._eye.repeat(N, 1, 1) + + if sign is None: + sign = torch.bernoulli(torch.ones(N, device=_device) * 0.5) * 2 - 1 + if bias is None: + bias = torch.empty( + (N, 2), device=_device).uniform_(-self.max_range, + self.max_range) + _theta[:, 0, 0] = sign + _theta[:, :, 2] = bias + + if rotation is not None: + _theta[:, 0:2, 0:2] = rotation + + grid = F.affine_grid(_theta, input.size(), **kwargs).to(_device) + output = F.grid_sample(input, + grid, + padding_mode='reflection', + **kwargs) + + return output + + def _sample_latent(self, N, device=None): + sign = torch.bernoulli(torch.ones(N, device=device) * 0.5) * 2 - 1 + bias = torch.empty( + (N, 2), device=device).uniform_(-self.max_range, self.max_range) + return sign, bias + + +class Rotation(nn.Module): + def __init__(self, max_range=4): + super(Rotation, self).__init__() + self.max_range = max_range + self.prob = 0.5 + + def forward(self, input, aug_index=None): + + _, _, H, W = input.size() + + if aug_index is None: + aug_index = np.random.randint(4) + + output = torch.rot90(input, aug_index, (2, 3)) + + _prob = input.new_full((input.size(0), ), self.prob) + _mask = torch.bernoulli(_prob).view(-1, 1, 1, 1) + output = _mask * input + (1 - _mask) * output + + else: + aug_index = aug_index % self.max_range + output = torch.rot90(input, aug_index, (2, 3)) + + return output + + +class CutPerm(nn.Module): + def __init__(self, max_range=4): + super(CutPerm, self).__init__() + self.max_range = max_range + self.prob = 0.5 + + def forward(self, input, aug_index=None): + + _, _, H, W = input.size() + + if aug_index is None: + aug_index = np.random.randint(4) + + output = self._cutperm(input, aug_index) + + _prob = input.new_full((input.size(0), ), self.prob) + _mask = torch.bernoulli(_prob).view(-1, 1, 1, 1) + output = _mask * input + (1 - _mask) * output + + else: + aug_index = aug_index % self.max_range + output = self._cutperm(input, aug_index) + + return output + + def _cutperm(self, inputs, aug_index): + + _, _, H, W = inputs.size() + h_mid = int(H / 2) + w_mid = int(W / 2) + + jigsaw_h = aug_index // 2 + jigsaw_v = aug_index % 2 + + if jigsaw_h == 1: + inputs = torch.cat( + (inputs[:, :, h_mid:, :], inputs[:, :, 0:h_mid, :]), dim=2) + if jigsaw_v == 1: + inputs = torch.cat( + (inputs[:, :, :, w_mid:], inputs[:, :, :, 0:w_mid]), dim=3) + + return inputs + + +class HorizontalFlipLayer(nn.Module): + def __init__(self): + """ + img_size : (int, int, int) + Height and width must be powers of 2. E.g. (32, 32, 1) or + (64, 128, 3). Last number indicates number of channels, e.g. 1 for + grayscale or 3 for RGB + """ + super(HorizontalFlipLayer, self).__init__() + + _eye = torch.eye(2, 3) + self.register_buffer('_eye', _eye) + + def forward(self, inputs): + _device = inputs.device + + N = inputs.size(0) + _theta = self._eye.repeat(N, 1, 1) + r_sign = torch.bernoulli(torch.ones(N, device=_device) * 0.5) * 2 - 1 + _theta[:, 0, 0] = r_sign + grid = F.affine_grid(_theta, inputs.size(), **kwargs).to(_device) + inputs = F.grid_sample(inputs, + grid, + padding_mode='reflection', + **kwargs) + + return inputs + + +class RandomColorGrayLayer(nn.Module): + def __init__(self, p): + super(RandomColorGrayLayer, self).__init__() + self.prob = p + + _weight = torch.tensor([[0.299, 0.587, 0.114]]) + self.register_buffer('_weight', _weight.view(1, 3, 1, 1)) + + def forward(self, inputs, aug_index=None): + + if aug_index == 0: + return inputs + + outputs = F.conv2d(inputs, self._weight) + gray = torch.cat([outputs, outputs, outputs], dim=1) + + if aug_index is None: + _prob = inputs.new_full((inputs.size(0), ), self.prob) + _mask = torch.bernoulli(_prob).view(-1, 1, 1, 1) + + gray = inputs * (1 - _mask) + gray * _mask + + return gray + + +class ColorJitterLayer(nn.Module): + def __init__(self, p, brightness, contrast, saturation, hue): + super(ColorJitterLayer, self).__init__() + self.prob = p + self.brightness = self._check_input(brightness, 'brightness') + self.contrast = self._check_input(contrast, 'contrast') + self.saturation = self._check_input(saturation, 'saturation') + self.hue = self._check_input(hue, + 'hue', + center=0, + bound=(-0.5, 0.5), + clip_first_on_zero=False) + + def _check_input(self, + value, + name, + center=1, + bound=(0, float('inf')), + clip_first_on_zero=True): + if isinstance(value, numbers.Number): + if value < 0: + raise ValueError( + 'If {} is a single number, it must be non negative.'. + format(name)) + value = [center - value, center + value] + if clip_first_on_zero: + value[0] = max(value[0], 0) + elif isinstance(value, (tuple, list)) and len(value) == 2: + if not bound[0] <= value[0] <= value[1] <= bound[1]: + raise ValueError('{} values should be between {}'.format( + name, bound)) + else: + raise TypeError( + '{} should be a single number or a list/tuple with length 2.'. + format(name)) + + # if value is 0 or (1., 1.) for brightness/contrast/saturation + # or (0., 0.) for hue, do nothing + if value[0] == value[1] == center: + value = None + return value + + def adjust_contrast(self, x): + if self.contrast: + factor = x.new_empty(x.size(0), 1, 1, 1).uniform_(*self.contrast) + means = torch.mean(x, dim=[2, 3], keepdim=True) + x = (x - means) * factor + means + return torch.clamp(x, 0, 1) + + def adjust_hsv(self, x): + f_h = x.new_zeros(x.size(0), 1, 1) + f_s = x.new_ones(x.size(0), 1, 1) + f_v = x.new_ones(x.size(0), 1, 1) + + if self.hue: + f_h.uniform_(*self.hue) + if self.saturation: + f_s = f_s.uniform_(*self.saturation) + if self.brightness: + f_v = f_v.uniform_(*self.brightness) + + return RandomHSVFunction.apply(x, f_h, f_s, f_v) + + def transform(self, inputs): + # Shuffle transform + if np.random.rand() > 0.5: + transforms = [self.adjust_contrast, self.adjust_hsv] + else: + transforms = [self.adjust_hsv, self.adjust_contrast] + + for t in transforms: + inputs = t(inputs) + + return inputs + + def forward(self, inputs): + _prob = inputs.new_full((inputs.size(0), ), self.prob) + _mask = torch.bernoulli(_prob).view(-1, 1, 1, 1) + return inputs * (1 - _mask) + self.transform(inputs) * _mask + + +class RandomHSVFunction(Function): + @staticmethod + def forward(ctx, x, f_h, f_s, f_v): + # ctx is a context object that can be used to stash information + # for backward computation + x = rgb2hsv(x) + h = x[:, 0, :, :] + h += (f_h * 255. / 360.) + h = (h % 1) + x[:, 0, :, :] = h + x[:, 1, :, :] = x[:, 1, :, :] * f_s + x[:, 2, :, :] = x[:, 2, :, :] * f_v + x = torch.clamp(x, 0, 1) + x = hsv2rgb(x) + return x + + @staticmethod + def backward(ctx, grad_output): + # We return as many input gradients as there were arguments. + # Gradients of non-Tensor arguments to forward must be None. + grad_input = None + if ctx.needs_input_grad[0]: + grad_input = grad_output.clone() + return grad_input, None, None, None + + +class NormalizeLayer(nn.Module): + """In order to certify radii in original coordinates rather than + standardized coordinates, we add the Gaussian noise _before_ standardizing, + which is why we have standardization be the first layer of the classifier + rather than as a part of preprocessing as is typical.""" + def __init__(self): + super(NormalizeLayer, self).__init__() + + def forward(self, inputs): + return (inputs - 0.5) / 0.5 diff --git a/OpenOOD/openood/trainers/cutmix_trainer.py b/OpenOOD/openood/trainers/cutmix_trainer.py new file mode 100644 index 0000000000000000000000000000000000000000..71fcecbdfb92e0d436c41794d53154dd1b79bf6c --- /dev/null +++ b/OpenOOD/openood/trainers/cutmix_trainer.py @@ -0,0 +1,114 @@ +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch.utils.data import DataLoader +from tqdm import tqdm + +import openood.utils.comm as comm +from openood.utils import Config + +from .lr_scheduler import cosine_annealing + + +class CutMixTrainer: + def __init__(self, net: nn.Module, train_loader: DataLoader, + config: Config) -> None: + + self.net = net + self.train_loader = train_loader + self.config = config + self.args = config.trainer.trainer_args + + self.optimizer = torch.optim.SGD( + net.parameters(), + config.optimizer.lr, + momentum=config.optimizer.momentum, + weight_decay=config.optimizer.weight_decay, + nesterov=True, + ) + + self.scheduler = torch.optim.lr_scheduler.LambdaLR( + self.optimizer, + lr_lambda=lambda step: cosine_annealing( + step, + config.optimizer.num_epochs * len(train_loader), + 1, + 1e-6 / config.optimizer.lr, + ), + ) + + def train_epoch(self, epoch_idx): + self.net.train() + + loss_avg = 0.0 + train_dataiter = iter(self.train_loader) + + for train_step in tqdm(range(1, + len(train_dataiter) + 1), + desc='Epoch {:03d}: '.format(epoch_idx), + position=0, + leave=True, + disable=not comm.is_main_process()): + batch = next(train_dataiter) + data = batch['data'].cuda() + target = batch['label'].cuda() + + # perform cutmix augmentation in a batch + r = np.random.rand(1) + if self.args.beta > 0 and r < self.args.cutmix_prob: + # generate mixed sample + lam = np.random.beta(self.args.beta, self.args.beta) + rand_index = torch.randperm(data.size()[0]).cuda() + target_a = target + target_b = target[rand_index] + bbx1, bby1, bbx2, bby2 = rand_bbox(data.size(), lam) + data[:, :, bbx1:bbx2, bby1:bby2] = data[rand_index, :, + bbx1:bbx2, bby1:bby2] + # adjust lambda to exactly match pixel ratio + lam = 1 - ((bbx2 - bbx1) * (bby2 - bby1) / + (data.size()[-1] * data.size()[-2])) + # forward + logits_classifier = self.net(data) + loss = F.cross_entropy( + logits_classifier, target_a) * lam + F.cross_entropy( + logits_classifier, target_b) * (1. - lam) + else: + # forward + logits_classifier = self.net(data) + loss = F.cross_entropy(logits_classifier, target) + + # backward + self.optimizer.zero_grad() + loss.backward() + self.optimizer.step() + self.scheduler.step() + + # exponential moving average, show smooth values + with torch.no_grad(): + loss_avg = loss_avg * 0.8 + float(loss) * 0.2 + + metrics = {} + metrics['epoch_idx'] = epoch_idx + metrics['loss'] = loss_avg + + return self.net, metrics + + +def rand_bbox(size, lam): + W = size[2] + H = size[3] + cut_rat = np.sqrt(1. - lam) + cut_w = np.int(W * cut_rat) + cut_h = np.int(H * cut_rat) + + # uniform + cx = np.random.randint(W) + cy = np.random.randint(H) + + bbx1 = np.clip(cx - cut_w // 2, 0, W) + bby1 = np.clip(cy - cut_h // 2, 0, H) + bbx2 = np.clip(cx + cut_w // 2, 0, W) + bby2 = np.clip(cy + cut_h // 2, 0, H) + + return bbx1, bby1, bbx2, bby2 diff --git a/OpenOOD/openood/trainers/cutpaste_trainer.py b/OpenOOD/openood/trainers/cutpaste_trainer.py new file mode 100644 index 0000000000000000000000000000000000000000..19813954d6384308caf355e372ea5a49716c14a4 --- /dev/null +++ b/OpenOOD/openood/trainers/cutpaste_trainer.py @@ -0,0 +1,80 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch.utils.data import DataLoader +from tqdm import tqdm + +from openood.utils import Config + +from .lr_scheduler import cosine_annealing + + +class CutPasteTrainer: + def __init__(self, net: nn.Module, train_loader: DataLoader, + config: Config) -> None: + + self.net = net + self.train_loader = train_loader + self.config = config + + self.optimizer = torch.optim.SGD( + net.parameters(), + config.optimizer.lr, + momentum=config.optimizer.momentum, + weight_decay=config.optimizer.weight_decay, + nesterov=True, + ) + + self.scheduler = torch.optim.lr_scheduler.LambdaLR( + self.optimizer, + lr_lambda=lambda step: cosine_annealing( + step, + config.optimizer.num_epochs * len(train_loader), + 1, + 1e-6 / config.optimizer.lr, + ), + ) + + def train_epoch(self, epoch_idx): + self.net.train() + + loss_avg = 0.0 + train_dataiter = iter(self.train_loader) + + embeds = [] + + for train_step in tqdm(range(1, + len(train_dataiter) + 1), + desc='Epoch {:03d}: '.format(epoch_idx), + position=0, + leave=True): + batch = next(train_dataiter) + data = torch.cat(batch['data'], 0) + data = data.cuda() + y = torch.arange(2) + y = y.repeat_interleave(len(batch['data'][0])) + y = y.cuda() + # forward + embed, logits_classifier = self.net(data) + loss = F.cross_entropy(logits_classifier, y) + + embeds.append(embed.cuda()) + + # backward + self.optimizer.zero_grad() + loss.backward() + self.optimizer.step() + self.scheduler.step() + + # exponential moving average, show smooth values + with torch.no_grad(): + loss_avg = loss_avg * 0.8 + float(loss) * 0.2 + + embeds = torch.cat(embeds) + embeds = torch.nn.functional.normalize(embeds, p=2, dim=1) + + metrics = {} + metrics['epoch_idx'] = epoch_idx + metrics['loss'] = loss_avg + + return self.net, metrics diff --git a/OpenOOD/openood/trainers/draem_trainer.py b/OpenOOD/openood/trainers/draem_trainer.py new file mode 100644 index 0000000000000000000000000000000000000000..b7501ca3ca9ab7fd235558d7f0186b8f3fc4eba9 --- /dev/null +++ b/OpenOOD/openood/trainers/draem_trainer.py @@ -0,0 +1,96 @@ +import torch +from torch import optim +from torch.utils.data import DataLoader +from tqdm import tqdm + +from openood.losses.draem_loss import get_draem_losses +from openood.utils import Config + + +def weights_init(m): + classname = m.__class__.__name__ + if classname.find('Conv') != -1: + m.weight.data.normal_(0.0, 0.02) + elif classname.find('BatchNorm') != -1: + m.weight.data.normal_(1.0, 0.02) + m.bias.data.fill_(0) + + +class DRAEMTrainer: + def __init__(self, net, train_loader: DataLoader, config: Config) -> None: + self.config = config + self.net = net + self.net['generative'].apply(weights_init) + self.net['discriminative'].apply(weights_init) + self.train_loader = train_loader + + self.optimizer = torch.optim.Adam([{ + 'params': + self.net['generative'].parameters(), + 'lr': + self.config.optimizer.lr + }, { + 'params': + self.net['discriminative'].parameters(), + 'lr': + self.config.optimizer.lr + }]) + + steps = [] + for step in self.config.optimizer.steps: + steps.append(self.config.optimizer.num_epochs * step) + + self.scheduler = optim.lr_scheduler.MultiStepLR(self.optimizer, + steps, + gamma=0.2, + last_epoch=-1) + + self.losses = get_draem_losses() + + def train_epoch(self, epoch_idx): + self.net['generative'].train() + self.net['discriminative'].train() + + loss_avg = 0.0 + train_dataiter = iter(self.train_loader) + + for train_step in tqdm(range(1, + len(train_dataiter) + 1), + desc='Epoch {:03d}: '.format(epoch_idx), + position=0, + leave=True): + sample_batched = next(train_dataiter) + gray_batch = sample_batched['data']['image'].cuda() + aug_gray_batch = sample_batched['data']['augmented_image'].cuda() + anomaly_mask = sample_batched['data']['anomaly_mask'].cuda() + + # forward + gray_rec = self.net['generative'](aug_gray_batch) + # conconcat origin and generated + joined_in = torch.cat((gray_rec, aug_gray_batch), dim=1) + + out_mask = self.net['discriminative'](joined_in) + out_mask_sm = torch.softmax(out_mask, dim=1) + + l2_loss = self.losses['l2'](gray_rec, gray_batch) + ssim_loss = self.losses['ssim'](gray_rec, gray_batch) + + segment_loss = self.losses['focal'](out_mask_sm, anomaly_mask) + loss = l2_loss + ssim_loss + segment_loss + + # backward + self.optimizer.zero_grad() + loss.backward() + self.optimizer.step() + + # exponential moving average, show smooth values + with torch.no_grad(): + loss_avg = loss_avg * 0.8 + float(loss) * 0.2 + + self.scheduler.step() + metrics = {} + metrics['epoch_idx'] = epoch_idx + metrics['loss_smoothed'] = loss_avg + metrics['loss'] = loss + + return self.net, metrics diff --git a/OpenOOD/openood/trainers/dropout_trainer.py b/OpenOOD/openood/trainers/dropout_trainer.py new file mode 100644 index 0000000000000000000000000000000000000000..f36199027aa7a2d9eb135937fa76b1dcc4a0a9f1 --- /dev/null +++ b/OpenOOD/openood/trainers/dropout_trainer.py @@ -0,0 +1,72 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch.utils.data import DataLoader +from tqdm import tqdm + +from openood.utils import Config + +from .lr_scheduler import cosine_annealing + + +class DropoutTrainer: + def __init__(self, net: nn.Module, train_loader: DataLoader, + config: Config) -> None: + + self.net = net + self.train_loader = train_loader + self.config = config + self.p = config.trainer.dropout_p + + self.optimizer = torch.optim.SGD( + net.parameters(), + config.optimizer.lr, + momentum=config.optimizer.momentum, + weight_decay=config.optimizer.weight_decay, + nesterov=True, + ) + + self.scheduler = torch.optim.lr_scheduler.LambdaLR( + self.optimizer, + lr_lambda=lambda step: cosine_annealing( + step, + config.optimizer.num_epochs * len(train_loader), + 1, + 1e-6 / config.optimizer.lr, + ), + ) + + def train_epoch(self, epoch_idx): + self.net.train() + + loss_avg = 0.0 + train_dataiter = iter(self.train_loader) + + for train_step in tqdm(range(1, + len(train_dataiter) + 1), + desc='Epoch {:03d}: '.format(epoch_idx), + position=0, + leave=True): + batch = next(train_dataiter) + data = batch['data'].cuda() + target = batch['label'].cuda() + + # forward + logits_classifier = self.net.forward_with_dropout(data, self.p) + loss = F.cross_entropy(logits_classifier, target) + + # backward + self.optimizer.zero_grad() + loss.backward() + self.optimizer.step() + self.scheduler.step() + + # exponential moving average, show smooth values + with torch.no_grad(): + loss_avg = loss_avg * 0.8 + float(loss) * 0.2 + + metrics = {} + metrics['epoch_idx'] = epoch_idx + metrics['loss'] = loss_avg + + return self.net, metrics diff --git a/OpenOOD/openood/trainers/dsvdd_trainer.py b/OpenOOD/openood/trainers/dsvdd_trainer.py new file mode 100644 index 0000000000000000000000000000000000000000..b58317595597b2c2fe0b0256a8d459f1bf0ff789 --- /dev/null +++ b/OpenOOD/openood/trainers/dsvdd_trainer.py @@ -0,0 +1,136 @@ +import numpy as np +import torch +import torch.optim as optim +from tqdm import tqdm + +from openood.utils import Config + + +class AETrainer: + def __init__(self, net, train_loader, config: Config): + self.config = config + self.net = net + self.train_loader = train_loader + if config.optimizer.name == 'adam': + self.optimizer = optim.Adam( + net.parameters(), + lr=config.lr, + weight_decay=config.weight_decay, + amsgrad=config.optimizer.name == 'amsgrad') + self.scheduler = optim.lr_scheduler.MultiStepLR( + self.optimizer, milestones=config.lr_milestones, gamma=0.1) + + def train_epoch(self, epoch_idx): + + self.net.train() + epoch_loss = 0 + train_dataiter = iter(self.train_loader) + + for train_step in tqdm(range(1, + len(train_dataiter) + 1), + desc='Epoch {:03d} '.format(epoch_idx), + position=0, + leave=True): + batch = next(train_dataiter) + inputs = batch['data'].cuda() + self.optimizer.zero_grad() + outputs = self.net(inputs) + scores = torch.sum((outputs - inputs)**2, + dim=tuple(range(1, outputs.dim()))) + loss = torch.mean(scores) + loss.backward() + self.optimizer.step() + self.scheduler.step() + epoch_loss += loss.item() + metrics = {} + metrics['epoch_idx'] = epoch_idx + metrics['loss'] = epoch_loss + return self.net, metrics + + +class DSVDDTrainer: + def __init__(self, net, train_loader, config: Config) -> None: + self.config = config + self.net = net + self.train_loader = train_loader + if config.optimizer.name == 'adam': + self.optimizer = optim.Adam( + net.parameters(), + lr=config.lr, + weight_decay=config.weight_decay, + amsgrad=config.optimizer.name == 'amsgrad') + self.scheduler = optim.lr_scheduler.MultiStepLR( + self.optimizer, milestones=config.lr_milestones, gamma=0.1) + + if self.config.c == 'None' and self.config.network.name != 'dcae': + self.config.c = init_center_c(train_loader, net) + self.c = self.config.c + + def train_epoch(self, epoch_idx): + self.net.train() + epoch_loss = 0 + train_dataiter = iter(self.train_loader) + for train_step in tqdm(range(1, + len(train_dataiter) + 1), + desc='Epoch {:03d}'.format(epoch_idx), + position=0, + leave=True): + batch = next(train_dataiter) + inputs = batch['data'].cuda() + self.optimizer.zero_grad() + outputs = self.net(inputs) + if self.config.network.name != 'dcae': + scores = torch.sum((outputs - self.c)**2, dim=1) + + # this is for pre-training the dcae network from the original paper + elif self.config.network.name == 'dcae': + scores = torch.sum((outputs - inputs)**2, + dim=tuple(range(1, outputs.dim()))) + else: + raise NotImplementedError + loss = torch.mean(scores) + loss.backward() + self.optimizer.step() + self.scheduler.step() + epoch_loss += loss.item() + metrics = {} + metrics['epoch_idx'] = epoch_idx + metrics['loss'] = epoch_loss + return self.net, metrics + + +def init_center_c(train_loader, net, eps=0.1): + """Initialize hypersphere center c as the mean from an initial forward pass + on the data.""" + n_samples = 0 + first_iter = True + train_dataiter = iter(train_loader) + net.eval() + with torch.no_grad(): + for train_step in tqdm(range(1, + len(train_dataiter) + 1), + desc='Initialize center', + position=0, + leave=True): + batch = next(train_dataiter) + inputs = batch['data'].cuda() + outputs = net(inputs) + if first_iter: + c = torch.zeros(outputs.shape[1]).cuda() + first_iter = False + n_samples += outputs.shape[0] + c += torch.sum(outputs, dim=0) + + c /= n_samples + + # If c_i is too close to 0, set to +-eps. + # Reason: a zero unit can be trivially matched with zero weights. + c[(abs(c) < eps) & (c < 0)] = -eps + c[(abs(c) < eps) & (c > 0)] = eps + + return c + + +def get_radius(dist: torch.Tensor, nu: float): + """Optimally solve for radius R via the (1-nu)-quantile of distances.""" + return np.quantile(np.sqrt(dist.clone().data.cpu().numpy()), 1 - nu) diff --git a/OpenOOD/openood/trainers/godin_trainer.py b/OpenOOD/openood/trainers/godin_trainer.py new file mode 100644 index 0000000000000000000000000000000000000000..630776281c2537430f0694a21572451cd7a691a2 --- /dev/null +++ b/OpenOOD/openood/trainers/godin_trainer.py @@ -0,0 +1,106 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch.utils.data import DataLoader +from tqdm import tqdm + +import openood.utils.comm as comm +from openood.utils import Config + +from .lr_scheduler import cosine_annealing + + +class GodinTrainer: + def __init__(self, net: nn.Module, train_loader: DataLoader, + config: Config) -> None: + + parameters = [] + h_parameters = [] + for name, parameter in net.named_parameters(): + if name in [ + 'h.h.weight', 'h.h.bias', 'module.h.h.weight', + 'module.h.h.bias' + ]: + h_parameters.append(parameter) + else: + parameters.append(parameter) + + self.net = net + self.train_loader = train_loader + self.config = config + + self.optimizer = torch.optim.SGD( + parameters, + config.optimizer.lr, + momentum=config.optimizer.momentum, + weight_decay=config.optimizer.weight_decay, + nesterov=True, + ) + + self.scheduler = torch.optim.lr_scheduler.LambdaLR( + self.optimizer, + lr_lambda=lambda step: cosine_annealing( + step, + config.optimizer.num_epochs * len(train_loader), + 1, + 1e-6 / config.optimizer.lr, + ), + ) + + # no weight decaty + self.h_optimizer = torch.optim.SGD( + h_parameters, + config.optimizer.lr, + momentum=config.optimizer.momentum, + nesterov=True, + ) + + # same as normal + self.h_scheduler = torch.optim.lr_scheduler.LambdaLR( + self.optimizer, + lr_lambda=lambda step: cosine_annealing( + step, + config.optimizer.num_epochs * len(train_loader), + 1, + 1e-6 / config.optimizer.lr, + ), + ) + + def train_epoch(self, epoch_idx): + self.net.train() + + loss_avg = 0.0 + train_dataiter = iter(self.train_loader) + + for train_step in tqdm(range(1, + len(train_dataiter) + 1), + desc='Epoch {:03d}: '.format(epoch_idx), + position=0, + leave=True, + disable=not comm.is_main_process()): + batch = next(train_dataiter) + data = batch['data'].cuda() + target = batch['label'].cuda() + + # forward + logits_classifier = self.net(data) + loss = F.cross_entropy(logits_classifier, target) + + # backward + self.optimizer.zero_grad() + self.h_optimizer.zero_grad() + loss.backward() + self.optimizer.step() + self.h_optimizer.step() + self.scheduler.step() + self.h_scheduler.step() + + # exponential moving average, show smooth values + with torch.no_grad(): + loss_avg = loss_avg * 0.8 + float(loss) * 0.2 + + metrics = {} + metrics['epoch_idx'] = epoch_idx + metrics['loss'] = loss_avg + + return self.net, metrics diff --git a/OpenOOD/openood/trainers/ish_trainer.py b/OpenOOD/openood/trainers/ish_trainer.py new file mode 100644 index 0000000000000000000000000000000000000000..f77b430f75d4b042029f94beb651e752fe6b1ec7 --- /dev/null +++ b/OpenOOD/openood/trainers/ish_trainer.py @@ -0,0 +1,247 @@ +import numpy as np +import sys +import torch +import torch.nn as nn +from functools import partial + +from torch.autograd import Function +from torch.functional import F + +from torch.utils.data import DataLoader +from tqdm import tqdm +import openood.utils.comm as comm +from openood.utils import Config +from .lr_scheduler import cosine_annealing + +import subprocess +import importlib.util + + +class ISHTrainer: + def __init__(self, net: nn.Module, train_loader: DataLoader, + config: Config) -> None: + self.net = net + self.train_loader = train_loader + self.config = config + self.optimizer = torch.optim.SGD( + [{'params': list(net.parameters())[: -2]}, {'params': list(net.parameters())[-2:], 'weight_decay': config.optimizer.weight_decay_fc}], + config.optimizer.lr, + momentum=config.optimizer.momentum, + weight_decay=config.optimizer.weight_decay, + nesterov=config.optimizer.nesterov, + ) + + self.scheduler = torch.optim.lr_scheduler.LambdaLR( + self.optimizer, + lr_lambda=lambda step: cosine_annealing( + step, + config.optimizer.num_epochs * len(train_loader), + 1, + 1e-6 / config.optimizer.lr, + ), + ) + + self.net = to_ish(self.net, strategy=config.trainer.trainer_args.mode, param=config.trainer.trainer_args.param, layer=config.trainer.trainer_args.layer) + + def train_epoch(self, epoch_idx): + self.net.train() + + loss_avg = 0.0 + + train_dataiter = iter(self.train_loader) + + + with tqdm(range(1, len(train_dataiter) + 1), desc='Epoch {:03d}'.format(epoch_idx), position=0, + leave=True, disable=not comm.is_main_process()) as tepoch: + + for train_step in tepoch: + batch = next(train_dataiter) + data = batch['data'].cuda() + target = batch['label'].cuda() + + # forward + logits_classifier, feature = self.net(data, return_feature=True) + loss = F.cross_entropy(logits_classifier, target) + + # backward + self.optimizer.zero_grad() + loss.backward() + self.optimizer.step() + self.scheduler.step() + + # exponential moving average, show smooth values + with torch.no_grad(): + loss_avg = loss_avg * 0.8 + float(loss) * 0.2 + + # comm.synchronize() + + metrics = {} + metrics['epoch_idx'] = epoch_idx + metrics['loss'] = self.save_metrics(loss_avg) + + + return self.net, metrics + + def save_metrics(self, loss_avg): + all_loss = comm.gather(loss_avg) + total_losses_reduced = np.mean([x for x in all_loss]) + + return total_losses_reduced + + + + +class _ISHTLinear(Function): + @staticmethod + def forward( + ctx, + x: torch.Tensor, + weight: nn.Parameter, + bias: nn.Parameter, + ish_reshaper + ): + ctx.ish_reshaper = ish_reshaper + ctx.x_shape = x.shape + ctx.has_bias = bias is not None + ctx.save_for_backward(ish_reshaper.select(x, ctx), weight) + return F.linear(x, weight, bias) + + @staticmethod + def backward(ctx, grad_output: torch.Tensor): + x, weight = ctx.saved_tensors + grad_bias = torch.sum(grad_output, list(range(grad_output.dim()-1))) if ctx.has_bias else None + ic, oc = weight.shape + x = ctx.ish_reshaper.pad(x, ctx) + grad_weight = grad_output.view(-1,ic).T.mm(x.view(-1,oc)) + grad_input = torch.matmul(grad_output, weight, out=x.view(ctx.x_shape)) + return grad_input, grad_weight, grad_bias, None + +_linear_forward = _ISHTLinear.apply + +def linear_forward(self, x): + if self.training: + x = _linear_forward(x, self.weight, self.bias, self.ish_reshaper) + else: + x = F.linear(x, self.weight, self.bias) + return x + +supports = { + nn.Linear: linear_forward, +} + +class ISHReshaper(object): + def __init__(self, strategy, param): + self.param = param + self.reserve = 1 - param + + self.select = getattr(self, f"cache_{strategy}") + self.pad = getattr(self, f"load_{strategy}") + + def cache_minksample_expscale(self, x: torch.Tensor, ctx=None): + shape = x.shape + x = x.reshape(shape[0], -1) + # calculate the sum of the input per sample + s1 = x.sum(dim=[1]) + + x, idxs = x.abs().topk(int(x.shape[1] * self.reserve), dim=1, sorted=False) + x.dropped = True # provide a flag for act judges + + # calculate new sum of the input per sample after pruning + s2 = x.sum(dim=[1]) + + # apply sharpening + scale = s1 / s2 + x = x * torch.exp(scale[:, None]) + + ctx.idxs = idxs + ctx.shape = shape + return x + + def load_minksample_expscale(self, x, ctx=None): + return torch.zeros( + ctx.shape, device=x.device, dtype=x.dtype + ).scatter_(1, ctx.idxs, x) + + def cache_expscale(self, x: torch.Tensor, ctx=None): + input = x.clone() + shape = x.shape + x = x.reshape(shape[0], -1) + # calculate the sum of the input per sample + s1 = x.sum(dim=[1]) + + x, idxs = x.abs().topk(int(x.shape[1] * self.reserve), dim=1, sorted=False) + x.dropped = True # provide a flag for act judges + + # calculate new sum of the input per sample after pruning + s2 = x.sum(dim=[1]) + + # apply sharpening + scale = s1 / s2 + + if len(shape) == 4: + input = input * torch.exp(scale[:, None, None, None]) + elif len(shape) == 2: + input = input * torch.exp(scale[:, None]) + else: + raise NotImplementedError + + ctx.idxs = idxs + ctx.shape = shape + return input + + def load_expscale(self, x, ctx=None): + return x + + + def cache_minksample_lnscale(self, x: torch.Tensor, ctx=None): + shape = x.shape + x = x.reshape(shape[0], -1) + # calculate the sum of the input per sample + s1 = x.sum(dim=[1]) + + x, idxs = x.abs().topk(int(x.shape[1] * self.reserve), dim=1, sorted=False) + x.dropped = True # provide a flag for act judges + + # calculate new sum of the input per sample after pruning + s2 = x.sum(dim=[1]) + + # apply sharpening + scale = s1 / s2 + x = x * scale[:, None] + + ctx.idxs = idxs + ctx.shape = shape + return x + + def load_minksample_lnscale(self, x, ctx=None): + return torch.zeros( + ctx.shape, device=x.device, dtype=x.dtype + ).scatter_(1, ctx.idxs, x) + + @staticmethod + def transfer(model, strategy, gamma, autocast): + _type = type(model) + ish_reshaper = ISHReshaper(strategy, gamma) + model.forward = partial(supports[_type], model) + model.ish_reshaper = ish_reshaper + print(f"{_type}.forward => ish.{strategy}.{_type}.forward") + + for child in model.children(): + ISHReshaper.transfer(child, strategy, gamma, autocast) + return model + + +def to_ish(model: nn.Module, strategy: str, param: float, autocast: bool = False, layer = None): + if layer == "r1": + if hasattr(model, 'module'): + ISHReshaper.transfer(model.module.fc, strategy, param, autocast) + else: + ISHReshaper.transfer(model.fc, strategy, param, autocast) + + elif layer == "all": + ISHReshaper.transfer(model, strategy, param, autocast) + + return model + + + diff --git a/OpenOOD/openood/trainers/kdad_trainer.py b/OpenOOD/openood/trainers/kdad_trainer.py new file mode 100644 index 0000000000000000000000000000000000000000..c8df16cbda475337de2944ee7d51592bac963735 --- /dev/null +++ b/OpenOOD/openood/trainers/kdad_trainer.py @@ -0,0 +1,65 @@ +import torch +from torch.autograd import Variable +from tqdm import tqdm + +from openood.losses.kdad_losses import DirectionOnlyLoss, MseDirectionLoss +from openood.utils import Config + + +class KdadTrainer: + def __init__(self, net, train_loader, config: Config): + self.vgg = net['vgg'] + self.model = net['model'] + self.train_loader = train_loader + self.config = config + # choose loss type + if self.config['direction_loss_only']: + self.criterion = DirectionOnlyLoss() + else: + self.criterion = MseDirectionLoss(self.config['lamda']) + self.optimizer = torch.optim.Adam(self.model.parameters(), + lr=float( + self.config['learning_rate'])) + + def train_epoch(self, epoch_idx): + + self.model.train() + epoch_loss = 0 + train_dataiter = iter(self.train_loader) + + for train_step in tqdm(range(1, + len(train_dataiter) + 1), + desc='Epoch {:03d}'.format(epoch_idx), + position=0, + leave=True): + batch = next(train_dataiter) + X = batch['data'] + if X.shape[1] == 1: + X = X.repeat(1, 3, 1, 1) + X = Variable(X).cuda() + + # compute respective output + output_pred = self.model.forward(X) + output_real = self.vgg(X) + + # compute loss + total_loss = self.criterion(output_pred, output_real) + + # Add loss to the list + epoch_loss += total_loss.item() + + # Clear the previous gradients + self.optimizer.zero_grad() + + # Compute gradients + total_loss.backward() + + # Adjust weights + self.optimizer.step() + net = {} + metrics = {} + metrics['epoch_idx'] = epoch_idx + metrics['epoch_loss'] = epoch_loss + net['vgg'] = self.vgg + net['model'] = self.model + return net, metrics diff --git a/OpenOOD/openood/trainers/logitnorm_trainer.py b/OpenOOD/openood/trainers/logitnorm_trainer.py new file mode 100644 index 0000000000000000000000000000000000000000..aaa14b6f7ee5b5c9b4d85b56724aafc367c4ff66 --- /dev/null +++ b/OpenOOD/openood/trainers/logitnorm_trainer.py @@ -0,0 +1,95 @@ +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch.utils.data import DataLoader +from tqdm import tqdm + +import openood.utils.comm as comm +from openood.utils import Config + +from .lr_scheduler import cosine_annealing + + +class LogitNormTrainer: + def __init__(self, net: nn.Module, train_loader: DataLoader, + config: Config) -> None: + + self.net = net + self.train_loader = train_loader + self.config = config + + self.optimizer = torch.optim.SGD( + net.parameters(), + config.optimizer.lr, + momentum=config.optimizer.momentum, + weight_decay=config.optimizer.weight_decay, + nesterov=True, + ) + + self.scheduler = torch.optim.lr_scheduler.LambdaLR( + self.optimizer, + lr_lambda=lambda step: cosine_annealing( + step, + config.optimizer.num_epochs * len(train_loader), + 1, + 1e-6 / config.optimizer.lr, + ), + ) + + self.loss_fn = LogitNormLoss(tau=config.trainer.trainer_args.tau) + + def train_epoch(self, epoch_idx): + self.net.train() + + loss_avg = 0.0 + train_dataiter = iter(self.train_loader) + + for train_step in tqdm(range(1, + len(train_dataiter) + 1), + desc='Epoch {:03d}: '.format(epoch_idx), + position=0, + leave=True, + disable=not comm.is_main_process()): + batch = next(train_dataiter) + data = batch['data'].cuda() + target = batch['label'].cuda() + + # forward + logits_classifier = self.net(data) + loss = self.loss_fn(logits_classifier, target) + + # backward + self.optimizer.zero_grad() + loss.backward() + self.optimizer.step() + self.scheduler.step() + + # exponential moving average, show smooth values + with torch.no_grad(): + loss_avg = loss_avg * 0.8 + float(loss) * 0.2 + + # comm.synchronize() + + metrics = {} + metrics['epoch_idx'] = epoch_idx + metrics['loss'] = self.save_metrics(loss_avg) + + return self.net, metrics + + def save_metrics(self, loss_avg): + all_loss = comm.gather(loss_avg) + total_losses_reduced = np.mean([x for x in all_loss]) + + return total_losses_reduced + + +class LogitNormLoss(nn.Module): + def __init__(self, tau=0.04): + super(LogitNormLoss, self).__init__() + self.tau = tau + + def forward(self, x, target): + norms = torch.norm(x, p=2, dim=-1, keepdim=True) + 1e-7 + logit_norm = torch.div(x, norms) / self.tau + return F.cross_entropy(logit_norm, target) diff --git a/OpenOOD/openood/trainers/lr_scheduler.py b/OpenOOD/openood/trainers/lr_scheduler.py new file mode 100644 index 0000000000000000000000000000000000000000..32d54ffec10a167e942d04c2b7c9d0a489bd757b --- /dev/null +++ b/OpenOOD/openood/trainers/lr_scheduler.py @@ -0,0 +1,6 @@ +import numpy as np + + +def cosine_annealing(step, total_steps, lr_max, lr_min): + return lr_min + (lr_max - lr_min) * 0.5 * \ + (1 + np.cos(step / total_steps * np.pi)) diff --git a/OpenOOD/openood/trainers/mcd_trainer.py b/OpenOOD/openood/trainers/mcd_trainer.py new file mode 100644 index 0000000000000000000000000000000000000000..9a9b37ee4cb2c0940313562214f9b7205467c07f --- /dev/null +++ b/OpenOOD/openood/trainers/mcd_trainer.py @@ -0,0 +1,95 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch.utils.data import DataLoader +from tqdm import tqdm + +import openood.utils.comm as comm +from openood.utils import Config + +from .base_trainer import BaseTrainer + + +class MCDTrainer(BaseTrainer): + def __init__( + self, + net: nn.Module, + train_loader: DataLoader, + train_unlabeled_loader: DataLoader, + config: Config, + ) -> None: + super().__init__(net, train_loader, config) + self.train_unlabeled_loader = train_unlabeled_loader + self.lambda_oe = config.trainer.lambda_oe + self.margin = config.trainer.margin + self.epoch_ft = config.trainer.start_epoch_ft + + def train_epoch(self, epoch_idx): + self.net.train() # enter train mode + + loss_avg = 0.0 + train_dataiter = iter(self.train_loader) + + if self.train_unlabeled_loader: + unlabeled_dataiter = iter(self.train_unlabeled_loader) + + for train_step in tqdm(range(1, + len(train_dataiter) + 1), + desc='Epoch {:03d}: '.format(epoch_idx), + position=0, + leave=True, + disable=not comm.is_main_process()): + batch = next(train_dataiter) + + data = batch['data'].cuda() + if epoch_idx < self.epoch_ft: + logits1, logits2 = self.net(data, return_double=True) + loss = F.cross_entropy(logits1, batch['label'].cuda()) \ + + F.cross_entropy(logits2, batch['label'].cuda()) + + elif self.train_unlabeled_loader and epoch_idx >= self.epoch_ft: + try: + unlabeled_batch = next(unlabeled_dataiter) + except StopIteration: + unlabeled_dataiter = iter(self.train_unlabeled_loader) + unlabeled_batch = next(unlabeled_dataiter) + + id_bs = data.size(0) + + unlabeled_data = unlabeled_batch['data'].cuda() + all_data = torch.cat([data, unlabeled_data]) + logits1, logits2 = self.net(all_data, return_double=True) + + logits1_id, logits2_id = logits1[:id_bs], logits2[:id_bs] + logits1_ood, logits2_ood = logits1[id_bs:], logits2[id_bs:] + + loss = F.cross_entropy(logits1_id, batch['label'].cuda()) \ + + F.cross_entropy(logits2_id, batch['label'].cuda()) + + ent = torch.mean(entropy(logits1_ood) - entropy(logits2_ood)) + loss_oe = F.relu(self.margin - ent) + + loss += self.lambda_oe * loss_oe + + # backward + self.optimizer.zero_grad() + loss.backward() + self.optimizer.step() + self.scheduler.step() + + # exponential moving average, show smooth values + with torch.no_grad(): + loss_avg = loss_avg * 0.8 + float(loss) * 0.2 + + metrics = {} + metrics['epoch_idx'] = epoch_idx + metrics['loss'] = self.save_metrics(loss_avg) + + return self.net, metrics + + +def entropy(logits): + score = torch.softmax(logits, dim=0) + logscore = torch.log(score) + entropy = torch.sum(-score * logscore, dim=0) + return entropy diff --git a/OpenOOD/openood/trainers/mixoe_trainer.py b/OpenOOD/openood/trainers/mixoe_trainer.py new file mode 100644 index 0000000000000000000000000000000000000000..49ba1f267ed73d115caf0361ddf39970d28c9a42 --- /dev/null +++ b/OpenOOD/openood/trainers/mixoe_trainer.py @@ -0,0 +1,154 @@ +import numpy as np + +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch.utils.data import DataLoader +from tqdm import tqdm + +import openood.utils.comm as comm +from openood.utils import Config + +from .base_trainer import BaseTrainer + + +class MixOETrainer(BaseTrainer): + def __init__( + self, + net: nn.Module, + train_loader: DataLoader, + train_unlabeled_loader: DataLoader, + config: Config, + ) -> None: + super().__init__(net, train_loader, config) + self.train_unlabeled_loader = train_unlabeled_loader + self.lambda_oe = config.trainer.lambda_oe + self.alpha = config.trainer.alpha + self.beta = config.trainer.beta + self.mix_op = config.trainer.mix_op + self.num_classes = config.dataset.num_classes + self.criterion = SoftCE() + + def train_epoch(self, epoch_idx): + self.net.train() # enter train mode + + loss_avg = 0.0 + train_dataiter = iter(self.train_loader) + + if self.train_unlabeled_loader: + unlabeled_dataiter = iter(self.train_unlabeled_loader) + + for train_step in tqdm(range(1, + len(train_dataiter) + 1), + desc='Epoch {:03d}: '.format(epoch_idx), + position=0, + leave=True, + disable=not comm.is_main_process()): + # manually drop last batch to avoid batch size mismatch + if train_step == len(train_dataiter): + continue + + batch = next(train_dataiter) + + try: + unlabeled_batch = next(unlabeled_dataiter) + except StopIteration: + unlabeled_dataiter = iter(self.train_unlabeled_loader) + unlabeled_batch = next(unlabeled_dataiter) + + if len(unlabeled_batch['data']) < len(batch['data']): + unlabeled_dataiter = iter(self.train_unlabeled_loader) + unlabeled_batch = next(unlabeled_dataiter) + + x, y = batch['data'].cuda(), batch['label'].cuda() + oe_x = unlabeled_batch['data'].cuda() + bs = x.size(0) + one_hot_y = torch.zeros(bs, self.num_classes).cuda() + one_hot_y.scatter_(1, y.view(-1, 1), 1) + + # ID loss + logits = self.net(x) + id_loss = F.cross_entropy(logits, y) + + # MixOE loss + # build mixed samples + lam = np.random.beta(self.alpha, self.beta) + + if self.mix_op == 'cutmix': + mixed_x = x.clone().detach() + bbx1, bby1, bbx2, bby2 = rand_bbox(x.size(), lam) + # adjust lambda to exactly match pixel ratio + lam = 1 - ((bbx2 - bbx1) * (bby2 - bby1) / + (x.size()[-1] * x.size()[-2])) + # we empirically find that pasting outlier patch into ID data performs better + # than pasting ID patch into outlier data + mixed_x[:, :, bbx1:bbx2, bby1:bby2] = oe_x[:, :, bbx1:bbx2, + bby1:bby2] + elif self.mix_op == 'mixup': + mixed_x = lam * x + (1 - lam) * oe_x + + # construct soft labels and compute loss + oe_y = torch.ones(oe_x.size(0), + self.num_classes).cuda() / self.num_classes + soft_labels = lam * one_hot_y + (1 - lam) * oe_y + mixed_loss = self.criterion(self.net(mixed_x), soft_labels) + + # Total loss + loss = id_loss + self.lambda_oe * mixed_loss + + # backward + self.optimizer.zero_grad() + loss.backward() + self.optimizer.step() + self.scheduler.step() + + # exponential moving average, show smooth values + with torch.no_grad(): + loss_avg = loss_avg * 0.8 + float(loss) * 0.2 + + metrics = {} + metrics['epoch_idx'] = epoch_idx + metrics['loss'] = self.save_metrics(loss_avg) + + return self.net, metrics + + +class SoftCE(nn.Module): + def __init__(self, reduction='mean'): + super(SoftCE, self).__init__() + self.reduction = reduction + + def forward(self, logits, soft_targets): + preds = logits.log_softmax(dim=-1) + assert preds.shape == soft_targets.shape + + loss = torch.sum(-soft_targets * preds, dim=-1) + + if self.reduction == 'mean': + return torch.mean(loss) + elif self.reduction == 'sum': + return torch.sum(loss) + elif self.reduction == 'none': + return loss + else: + raise ValueError("Reduction type '{:s}' is not supported!".format( + self.reduction)) + + +def rand_bbox(size, lam): + W = size[2] + H = size[3] + cut_rat = np.sqrt(1. - lam) + cut_w = int(W * cut_rat) + cut_h = int(H * cut_rat) + + # uniform + cx = np.random.randint(W) + cy = np.random.randint(H) + + bbx1 = np.clip(cx - cut_w // 2, 0, W) + bby1 = np.clip(cy - cut_h // 2, 0, H) + bbx2 = np.clip(cx + cut_w // 2, 0, W) + bby2 = np.clip(cy + cut_h // 2, 0, H) + + return bbx1, bby1, bbx2, bby2 diff --git a/OpenOOD/openood/trainers/mixup_trainer.py b/OpenOOD/openood/trainers/mixup_trainer.py new file mode 100644 index 0000000000000000000000000000000000000000..7c59f6e2ec650aba3dbe0ff35485af141e63706a --- /dev/null +++ b/OpenOOD/openood/trainers/mixup_trainer.py @@ -0,0 +1,98 @@ +import numpy as np +import torch +import torch.nn as nn +from torch.utils.data import DataLoader +from tqdm import tqdm + +import openood.utils.comm as comm +from openood.losses import soft_cross_entropy +from openood.utils import Config + +from .lr_scheduler import cosine_annealing + + +def prepare_mixup(batch, alpha=1.0, use_cuda=True): + """Returns mixed inputs, pairs of targets, and lambda.""" + if alpha > 0: + lam = np.random.beta(alpha, alpha) + else: + lam = 1 + + batch_size = batch['data'].size()[0] + if use_cuda: + index = torch.randperm(batch_size).cuda() + else: + index = torch.randperm(batch_size) + + return index, lam + + +def mixing(data, index, lam): + return lam * data + (1 - lam) * data[index] + + +class MixupTrainer: + def __init__(self, net: nn.Module, train_loader: DataLoader, + config: Config) -> None: + + self.net = net + self.train_loader = train_loader + self.config = config + self.alpha = self.config.trainer.trainer_args.alpha + + self.optimizer = torch.optim.SGD( + net.parameters(), + config.optimizer.lr, + momentum=config.optimizer.momentum, + weight_decay=config.optimizer.weight_decay, + nesterov=True, + ) + + self.scheduler = torch.optim.lr_scheduler.LambdaLR( + self.optimizer, + lr_lambda=lambda step: cosine_annealing( + step, + config.optimizer.num_epochs * len(train_loader), + 1, + 1e-6 / config.optimizer.lr, + ), + ) + + def train_epoch(self, epoch_idx): + self.net.train() + + loss_avg = 0.0 + train_dataiter = iter(self.train_loader) + + for train_step in tqdm(range(1, + len(train_dataiter) + 1), + desc='Epoch {:03d}: '.format(epoch_idx), + position=0, + leave=True, + disable=not comm.is_main_process()): + batch = next(train_dataiter) + + # mixup operation + index, lam = prepare_mixup(batch, self.alpha) + data_mix = mixing(batch['data'].cuda(), index, lam) + soft_label_mix = mixing(batch['soft_label'].cuda(), index, lam) + + # forward + logits_classifier = self.net(data_mix) + loss = soft_cross_entropy(logits_classifier, soft_label_mix) + + # backward + self.optimizer.zero_grad() + loss.backward() + self.optimizer.step() + self.scheduler.step() + + # exponential moving average, show smooth values + with torch.no_grad(): + loss_avg = loss_avg * 0.8 + float(loss) * 0.2 + + metrics = {} + metrics['epoch_idx'] = epoch_idx + metrics['loss'] = loss_avg + + return self.net, metrics diff --git a/OpenOOD/openood/trainers/mos_trainer.py b/OpenOOD/openood/trainers/mos_trainer.py new file mode 100644 index 0000000000000000000000000000000000000000..ff6e252825c1b370d97d72258073ec33b752f62b --- /dev/null +++ b/OpenOOD/openood/trainers/mos_trainer.py @@ -0,0 +1,331 @@ +from copy import deepcopy + +import numpy as np +import torch +import torch.nn as nn +from torch.utils.data import DataLoader +from tqdm import tqdm + +import openood.utils.comm as comm +from openood.utils import Config + + +def get_mixup(dataset_size): + return 0.0 if dataset_size < 20_000 else 0.1 + + +def get_group_slices(classes_per_group): + group_slices = [] + start = 0 + for num_cls in classes_per_group: + end = start + num_cls + 1 + group_slices.append([start, end]) + start = end + return torch.LongTensor(group_slices) + + +def get_schedule(dataset_size): + if dataset_size < 20_000: + return [100, 200, 300, 400, 500] + elif dataset_size < 500_000: + return [500, 3000, 6000, 9000, 10_000] + else: + return [500, 6000, 12_000, 18_000, 20_000] + + +def get_lr(step, dataset_size, base_lr=0.003): + """Returns learning-rate for `step` or None at the end.""" + supports = get_schedule(dataset_size) + # Linear warmup + if step < supports[0]: + return base_lr * step / supports[0] + # End of training + elif step >= supports[-1]: + return None + # Staircase decays by factor of 10 + else: + for s in supports[1:]: + if s < step: + base_lr /= 10 + return base_lr + + +def mixup_data(x, y, lam): + """Returns mixed inputs, pairs of targets, and lambda.""" + indices = torch.randperm(x.shape[0]).to(x.device) + + mixed_x = lam * x + (1 - lam) * x[indices] + y_a, y_b = y, y[indices] + return mixed_x, y_a, y_b + + +def mixup_criterion_group(criterion, pred, y_a, y_b, lam, group_slices): + return lam * calc_group_softmax_loss(criterion, pred, y_a, group_slices) \ + + (1 - lam) * calc_group_softmax_loss(criterion, + pred, y_b, group_slices) + + +def calc_group_softmax_loss(criterion, logits, labels, group_slices): + num_groups = group_slices.shape[0] + loss = 0 + for i in range(num_groups): + group_logit = logits[:, group_slices[i][0]:group_slices[i][1]] + group_label = labels[:, i] + + loss += criterion(group_logit, group_label) + + return loss + + +def calc_group_softmax_acc(logits, labels, group_slices): + num_groups = group_slices.shape[0] + loss = 0 + num_samples = logits.shape[0] + + all_group_max_score, all_group_max_class = [], [] + + smax = torch.nn.Softmax(dim=-1).cuda() + cri = torch.nn.CrossEntropyLoss(reduction='none').cuda() + + for i in range(num_groups): + group_logit = logits[:, group_slices[i][0]:group_slices[i][1]] + group_label = labels[:, i] + loss += cri(group_logit, group_label) + + group_softmax = smax(group_logit) + group_softmax = group_softmax[:, 1:] # disregard others category + group_max_score, group_max_class = torch.max(group_softmax, dim=1) + group_max_class += 1 # shift the class index by 1 + + all_group_max_score.append(group_max_score) + all_group_max_class.append(group_max_class) + + all_group_max_score = torch.stack(all_group_max_score, dim=1) + all_group_max_class = torch.stack(all_group_max_class, dim=1) + + final_max_score, max_group = torch.max(all_group_max_score, dim=1) + + pred_cls_within_group = all_group_max_class[torch.arange(num_samples), + max_group] + + gt_class, gt_group = torch.max(labels, dim=1) + + selected_groups = (max_group == gt_group) + + pred_acc = torch.zeros(logits.shape[0]).bool().cuda() + + pred_acc[selected_groups] = ( + pred_cls_within_group[selected_groups] == gt_class[selected_groups]) + + return loss, pred_acc + + +def topk(output, target, ks=(1, )): + """Returns one boolean vector for each k, whether the target is within the + output's top-k.""" + _, pred = output.topk(max(ks), 1, True, True) + pred = pred.t() + correct = pred.eq(target.view(1, -1).expand_as(pred)) + return [correct[:k].max(0)[0] for k in ks] + + +def run_eval(model, data_loader, step, group_slices, num_group): + # switch to evaluate mode + model.eval() + + all_c, all_top1 = [], [] + + train_dataiter = iter(data_loader) + for train_step in tqdm(range(1, + len(train_dataiter) + 1), + desc='Test : ', + position=0, + leave=True, + disable=not comm.is_main_process()): + batch = next(train_dataiter) + data = batch['data'].cuda() + group_label = batch['group_label'].cuda() + class_label = batch['class_label'].cuda() + labels = [] + for i in range(len(group_label)): + label = torch.zeros(num_group, dtype=torch.int64) + label[group_label[i]] = class_label[i] + 1 + labels.append(label.unsqueeze(0)) + labels = torch.cat(labels, dim=0).cuda() + + with torch.no_grad(): + x = data + y = labels + + # compute output, measure accuracy and record loss. + logits = model(x) + if group_slices is not None: + c, top1 = calc_group_softmax_acc(logits, y, group_slices) + else: + c = torch.nn.CrossEntropyLoss(reduction='none')(logits, y) + top1 = topk(logits, y, ks=(1, ))[0] + + all_c.extend(c.cpu()) # Also ensures a sync point. + all_top1.extend(top1.cpu()) + + model.train() + # print(f'Validation@{step} loss {np.mean(all_c):.5f}, ' + # f'top1 {np.mean(all_top1):.2%}') + + # writer.add_scalar('Val/loss', np.mean(all_c), step) + # writer.add_scalar('Val/top1', np.mean(all_top1), step) + return all_c, all_top1 + + +class MOSTrainer: + def __init__(self, net: nn.Module, train_loader: DataLoader, + config: Config) -> None: + + self.net = net.cuda() + self.train_loader = train_loader + self.config = config + self.lr = config.optimizer.lr + + trainable_params = filter(lambda p: p.requires_grad, net.parameters()) + self.optim = torch.optim.SGD(trainable_params, + lr=self.lr, + momentum=0.9) + self.optim.zero_grad() + self.net.train() + + # train_set len + self.train_set_len = config.dataset.train.batch_size * len( + train_loader) + self.mixup = get_mixup(self.train_set_len) + self.cri = torch.nn.CrossEntropyLoss().cuda() + + self.accum_steps = 0 + self.mixup_l = np.random.beta(self.mixup, + self.mixup) if self.mixup > 0 else 1 + + # if specified group_config + if (config.trainer.group_config.endswith('npy')): + self.classes_per_group = np.load(config.trainer.group_config) + elif (config.trainer.group_config.endswith('txt')): + self.classes_per_group = np.loadtxt(config.trainer.group_config, + dtype=int) + else: + self.cal_group_slices(self.train_loader) + + self.num_group = len(self.classes_per_group) + self.group_slices = get_group_slices(self.classes_per_group) + self.group_slices = self.group_slices.cuda() + + self.step = 0 + self.batch_split = 1 + + def cal_group_slices(self, train_loader): + # cal group config + group = {} + train_dataiter = iter(self.train_loader) + for train_step in tqdm(range(1, + len(train_dataiter) + 1), + desc='cal group_config', + position=0, + leave=True, + disable=not comm.is_main_process()): + batch = next(train_dataiter) + group_label = deepcopy(batch['group_label']) + class_label = deepcopy(batch['class_label']) + + for i in range(len(class_label)): + gl = group_label[i].item() + cl = class_label[i].item() + + try: + group[str(gl)] + except: + group[str(gl)] = [] + + if cl not in group[str(gl)]: + group[str(gl)].append(cl) + + self.classes_per_group = [] + for i in range(len(group)): + self.classes_per_group.append(max(group[str(i)]) + 1) + + def train_epoch(self, epoch_idx): + total_loss = 0 + + train_dataiter = iter(self.train_loader) + for train_step in tqdm(range(1, + len(train_dataiter) + 1), + desc='Epoch {:03d}: '.format(epoch_idx), + position=0, + leave=True, + disable=not comm.is_main_process()): + batch = next(train_dataiter) + data = batch['data'].cuda() + group_label = batch['group_label'].cuda() + class_label = batch['class_label'].cuda() + + labels = [] + for i in range(len(group_label)): + label = torch.zeros(self.num_group, dtype=torch.int64) + label[group_label[i]] = class_label[i] + 1 + labels.append(label.unsqueeze(0)) + labels = torch.cat(labels, dim=0).cuda() + + # Update learning-rate, including stop training if over. + lr = get_lr(self.step, self.train_set_len, self.lr) + if lr is None: + break + for param_group in self.optim.param_groups: + param_group['lr'] = lr + + if self.mixup > 0.0: + x, y_a, y_b = mixup_data(data, labels, self.mixup_l) + + logits = self.net(data) + + y_a = y_a.cuda() + y_b = y_b.cuda() + if self.mixup > 0.0: + c = mixup_criterion_group(self.cri, logits, y_a, y_b, + self.mixup_l, self.group_slices) + else: + c = calc_group_softmax_loss(self.cri, logits, labels, + self.group_slices) + + c_num = float(c.data.cpu().numpy()) # Also ensures a sync point. + # # Accumulate grads + (c / self.batch_split).backward() + self.accum_steps += 1 + + # accstep = f' ({self.accum_steps}/{self.batch_split})' \ + # if self.batch_split > 1 else '' + # print( + # f'[step {self.step}{accstep}]: loss={c_num:.5f} (lr={lr:.1e})') + + total_loss += c_num + + # Update params + # if self.accum_steps == self.batch_split: + self.optim.step() + self.optim.zero_grad() + + self.step += 1 + self.accum_steps = 0 + # Sample new mixup ratio for next batch + self.mixup_l = np.random.beta(self.mixup, + self.mixup) if self.mixup > 0 else 1 + + # torch.save(self.net.state_dict(), + # os.path.join(self.config.output_dir, 'mos_epoch_latest.ckpt')) + + # step, all_top1 = run_eval(self.net, self.train_loader, self.step, self.group_slices, + # self.num_group) + + loss_avg = total_loss / len(train_dataiter) + + metrics = {} + metrics['epoch_idx'] = epoch_idx + metrics['loss'] = loss_avg + # metrics['acc'] = np.mean(all_top1) # the acc used in there is the top1 acc + + return self.net, metrics, self.num_group, self.group_slices diff --git a/OpenOOD/openood/trainers/npos_trainer.py b/OpenOOD/openood/trainers/npos_trainer.py new file mode 100644 index 0000000000000000000000000000000000000000..3bb9730a439ef60563eff494eab6161628a3dbb1 --- /dev/null +++ b/OpenOOD/openood/trainers/npos_trainer.py @@ -0,0 +1,449 @@ +import faiss.contrib.torch_utils +import math +import time +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch.distributions import MultivariateNormal +from torch.utils.data import DataLoader +from tqdm import tqdm + +import openood.utils.comm as comm +from openood.utils import Config + + +class NPOSTrainer: + def __init__(self, net: nn.Module, train_loader: DataLoader, + val_loader: DataLoader, config: Config) -> None: + + self.net = net + self.train_loader = train_loader + self.config = config + + # a bunch of constants or hyperparams + self.n_cls = config.dataset.num_classes + self.sample_number = config.trainer.trainer_args.sample_number + self.sample_from = config.trainer.trainer_args.sample_from + try: + self.penultimate_dim = net.backbone.feature_size + except AttributeError: + self.penultimate_dim = net.backbone.module.feature_size + self.start_epoch_KNN = config.trainer.trainer_args.start_epoch_KNN + self.K = config.trainer.trainer_args.K + self.select = config.trainer.trainer_args.select + self.cov_mat = config.trainer.trainer_args.cov_mat + self.pick_nums = config.trainer.trainer_args.pick_nums + self.w_disp = config.trainer.trainer_args.w_disp + self.w_comp = config.trainer.trainer_args.w_comp + self.loss_weight = config.trainer.trainer_args.loss_weight + self.temp = config.trainer.trainer_args.temp + self.ID_points_num = config.trainer.trainer_args.ID_points_num + + res = faiss.StandardGpuResources() + self.KNN_index = faiss.GpuIndexFlatL2(res, self.penultimate_dim) + + self.number_dict = {} + for i in range(self.n_cls): + self.number_dict[i] = 0 + + if self.config.num_gpus > 1: + params = [{ + 'params': net.module.backbone.parameters() + }, { + 'params': net.module.head.parameters() + }, { + 'params': + net.module.mlp.parameters(), + 'lr': + config.optimizer.lr * config.optimizer.mlp_decay_rate + }] + else: + params = [{ + 'params': net.backbone.parameters() + }, { + 'params': net.head.parameters() + }, { + 'params': + net.mlp.parameters(), + 'lr': + config.optimizer.lr * config.optimizer.mlp_decay_rate + }] + + self.optimizer = torch.optim.SGD( + params, + lr=config.optimizer.lr, + momentum=config.optimizer.momentum, + weight_decay=config.optimizer.weight_decay, + nesterov=True, + ) + + if config.dataset.train.batch_size \ + * config.num_gpus * config.num_machines > 256: + config.optimizer.warm = True + + if config.optimizer.warm: + self.warmup_from = 0.001 + self.warm_epochs = 10 + if config.optimizer.cosine: + eta_min = config.optimizer.lr * \ + (config.optimizer.lr_decay_rate**3) + self.warmup_to = eta_min + (config.optimizer.lr - eta_min) * ( + 1 + math.cos(math.pi * self.warm_epochs / + config.optimizer.num_epochs)) / 2 + else: + self.warmup_to = config.optimizer.lr + + self.criterion_comp = CompLoss(self.n_cls, + temperature=self.temp).cuda() + # V2: EMA style prototypes + self.criterion_disp = DispLoss(self.n_cls, + config.network.feat_dim, + config.trainer.trainer_args.proto_m, + self.net, + val_loader, + temperature=self.temp).cuda() + + def train_epoch(self, epoch_idx): + adjust_learning_rate(self.config, self.optimizer, epoch_idx - 1) + + self.net.train() + + loss_avg = 0.0 + train_dataiter = iter(self.train_loader) + + data_dict = torch.zeros(self.n_cls, self.sample_number, + self.penultimate_dim).cuda() + + for train_step in tqdm(range(1, + len(train_dataiter) + 1), + desc='Epoch {:03d}: '.format(epoch_idx), + position=0, + leave=True, + disable=not comm.is_main_process()): + warmup_learning_rate(self.config, self.warm_epochs, + self.warmup_from, + self.warmup_to, epoch_idx - 1, train_step, + len(train_dataiter), self.optimizer) + + batch = next(train_dataiter) + data = batch['data'] + target = batch['label'] + + data = torch.cat([data[0], data[1]], dim=0).cuda() + target = target.repeat(2).cuda() + + # forward + penultimate = self.net.backbone(data) + features = self.net.head(penultimate) + + sum_temp = 0 + for index in range(self.n_cls): + sum_temp += self.number_dict[index] + lr_reg_loss = torch.zeros(1).cuda()[0] + + if sum_temp == self.n_cls * self.sample_number \ + and epoch_idx < self.start_epoch_KNN: + # maintaining an ID data queue for each class. + target_numpy = target.cpu().data.numpy() + for index in range(len(target)): + dict_key = target_numpy[index] + data_dict[dict_key] = torch.cat( + (data_dict[dict_key][1:], + penultimate[index].detach().view(1, -1)), 0) + elif sum_temp == self.n_cls * self.sample_number \ + and epoch_idx >= self.start_epoch_KNN: + target_numpy = target.cpu().data.numpy() + for index in range(len(target)): + dict_key = target_numpy[index] + data_dict[dict_key] = torch.cat( + (data_dict[dict_key][1:], + penultimate[index].detach().view(1, -1)), 0) + # Standard Gaussian distribution + new_dis = MultivariateNormal( + torch.zeros(self.penultimate_dim).cuda(), + torch.eye(self.penultimate_dim).cuda()) + negative_samples = new_dis.rsample((self.sample_from, )) + for index in range(self.n_cls): + ID = data_dict[index] + sample_point = generate_outliers( + ID, + input_index=self.KNN_index, + negative_samples=negative_samples, + ID_points_num=self.ID_points_num, + K=self.K, + select=self.select, + cov_mat=self.cov_mat, + sampling_ratio=1.0, + pic_nums=self.pick_nums, + depth=self.penultimate_dim) + if index == 0: + ood_samples = sample_point + else: + ood_samples = torch.cat((ood_samples, sample_point), 0) + + if len(ood_samples) != 0: + energy_score_for_fg = self.net.mlp(penultimate) + energy_score_for_bg = self.net.mlp(ood_samples) + input_for_lr = torch.cat( + (energy_score_for_fg, energy_score_for_bg), + 0).squeeze() + labels_for_lr = torch.cat( + (torch.ones(len(energy_score_for_fg)).cuda(), + torch.zeros(len(energy_score_for_bg)).cuda()), -1) + criterion_BCE = torch.nn.BCEWithLogitsLoss() + lr_reg_loss = criterion_BCE(input_for_lr.view(-1), + labels_for_lr) + else: + target_numpy = target.cpu().data.numpy() + for index in range(len(target)): + dict_key = target_numpy[index] + if self.number_dict[dict_key] < self.sample_number: + data_dict[dict_key][self.number_dict[ + dict_key]] = penultimate[index].detach() + self.number_dict[dict_key] += 1 + normed_features = F.normalize(features, dim=1) + + disp_loss = self.criterion_disp(normed_features, target) + comp_loss = self.criterion_comp(normed_features, + self.criterion_disp.prototypes, + target) + + loss = self.w_disp * disp_loss + self.w_comp * comp_loss + loss = self.loss_weight * lr_reg_loss + loss + + # backward + self.optimizer.zero_grad() + loss.backward() + self.optimizer.step() + + # exponential moving average, show smooth values + with torch.no_grad(): + loss_avg = loss_avg * 0.8 + float(loss) * 0.2 + + # comm.synchronize() + + metrics = {} + metrics['epoch_idx'] = epoch_idx + metrics['loss'] = self.save_metrics(loss_avg) + + return self.net, metrics + + def save_metrics(self, loss_avg): + all_loss = comm.gather(loss_avg) + total_losses_reduced = np.mean([x for x in all_loss]) + + return total_losses_reduced + + +def adjust_learning_rate(config, optimizer, epoch): + lr = config.optimizer.lr + if config.optimizer.cosine: + eta_min = lr * (config.optimizer.lr_decay_rate**3) + lr = eta_min + (lr - eta_min) * ( + 1 + math.cos(math.pi * epoch / config.optimizer.num_epochs)) / 2 + else: + steps = np.sum(epoch > np.asarray(config.optimizer.lr_decay_epochs)) + if steps > 0: + lr = lr * (config.optimizer.lr_decay_rate**steps) + + for param_group in optimizer.param_groups: + param_group['lr'] = lr + + +def warmup_learning_rate(config, warm_epochs, warmup_from, warmup_to, epoch, + batch_id, total_batches, optimizer): + if config.optimizer.warm and epoch <= warm_epochs: + p = (batch_id + (epoch - 1) * total_batches) / \ + (warm_epochs * total_batches) + lr = warmup_from + p * (warmup_to - warmup_from) + + for param_group in optimizer.param_groups: + param_group['lr'] = lr + + +class CompLoss(nn.Module): + def __init__(self, n_cls, temperature=0.07, base_temperature=0.07): + super(CompLoss, self).__init__() + self.n_cls = n_cls + self.temperature = temperature + self.base_temperature = base_temperature + + def forward(self, features, prototypes, labels): + device = torch.device('cuda') + + proxy_labels = torch.arange(0, self.n_cls).to(device) + batch_size = features.shape[0] + labels = labels.contiguous().view(-1, 1) + if labels.shape[0] != batch_size: + raise ValueError('Num of labels does not match num of features') + mask = torch.eq(labels, proxy_labels.T).float().to(device) + + # compute logits + anchor_feature = features + contrast_feature = prototypes / prototypes.norm(dim=-1, keepdim=True) + anchor_dot_contrast = torch.div( + torch.matmul(anchor_feature, contrast_feature.T), self.temperature) + # for numerical stability + logits_max, _ = torch.max(anchor_dot_contrast, dim=1, keepdim=True) + logits = anchor_dot_contrast - logits_max.detach() + + # compute log_prob + exp_logits = torch.exp(logits) + log_prob = logits - torch.log(exp_logits.sum(1, keepdim=True)) + # compute mean of log-likelihood over positive + mean_log_prob_pos = (mask * log_prob).sum(1) + loss = -(self.temperature / + self.base_temperature) * mean_log_prob_pos.mean() + return loss + + +class DispLoss(nn.Module): + def __init__(self, + n_cls, + feat_dim, + proto_m, + model, + loader, + temperature=0.1, + base_temperature=0.1): + super(DispLoss, self).__init__() + self.n_cls = n_cls + self.feat_dim = feat_dim + self.proto_m = proto_m + self.temperature = temperature + self.base_temperature = base_temperature + self.register_buffer('prototypes', + torch.zeros(self.n_cls, self.feat_dim)) + self.model = model + self.loader = loader + self.init_class_prototypes() + + def forward(self, features, labels): + prototypes = self.prototypes + num_cls = self.n_cls + for j in range(len(features)): + prototypes[labels[j].item()] = F.normalize( + prototypes[labels[j].item()] * self.proto_m + features[j] * + (1 - self.proto_m), + dim=0) + self.prototypes = prototypes.detach() + labels = torch.arange(0, num_cls).cuda() + labels = labels.contiguous().view(-1, 1) + + mask = (1 - torch.eq(labels, labels.T).float()).cuda() + + logits = torch.div(torch.matmul(prototypes, prototypes.T), + self.temperature) + + logits_mask = torch.scatter(torch.ones_like(mask), 1, + torch.arange(num_cls).view(-1, 1).cuda(), + 0) + mask = mask * logits_mask + mean_prob_neg = torch.log( + (mask * torch.exp(logits)).sum(1) / mask.sum(1)) + mean_prob_neg = mean_prob_neg[~torch.isnan(mean_prob_neg)] + loss = self.temperature / self.base_temperature * mean_prob_neg.mean() + return loss + + def init_class_prototypes(self): + """Initialize class prototypes.""" + self.model.eval() + start = time.time() + prototype_counts = [0] * self.n_cls + with torch.no_grad(): + prototypes = torch.zeros(self.n_cls, self.feat_dim).cuda() + for i, batch in enumerate(self.loader): + input = batch['data'] + target = batch['label'] + input, target = input.cuda(), target.cuda() + features = self.model(input) + for j, feature in enumerate(features): + prototypes[target[j].item()] += feature + prototype_counts[target[j].item()] += 1 + for cls in range(self.n_cls): + prototypes[cls] /= prototype_counts[cls] + # measure elapsed time + duration = time.time() - start + print(f'Time to initialize prototypes: {duration:.3f}') + prototypes = F.normalize(prototypes, dim=1) + self.prototypes = prototypes + + +def generate_outliers(ID, + input_index, + negative_samples, + ID_points_num=2, + K=20, + select=1, + cov_mat=0.1, + sampling_ratio=1.0, + pic_nums=30, + depth=342): + length = negative_samples.shape[0] + data_norm = torch.norm(ID, p=2, dim=1, keepdim=True) + normed_data = ID / data_norm + rand_ind = np.random.choice(normed_data.shape[0], + int(normed_data.shape[0] * sampling_ratio), + replace=False) + index = input_index + index.add(normed_data[rand_ind]) + minD_idx, k_th = KNN_dis_search_decrease(ID, index, K, select) + minD_idx = minD_idx[np.random.choice(select, int(pic_nums), replace=False)] + data_point_list = torch.cat( + [ID[i:i + 1].repeat(length, 1) for i in minD_idx]) + negative_sample_cov = cov_mat * negative_samples.cuda().repeat(pic_nums, 1) + negative_sample_list = negative_sample_cov + data_point_list + point = KNN_dis_search_distance(negative_sample_list, index, K, + ID_points_num, length, depth) + + index.reset() + return point + + +def KNN_dis_search_distance(target, + index, + K=50, + num_points=10, + length=2000, + depth=342): + ''' + data_point: Queue for searching k-th points + target: the target of the search + K + ''' + # Normalize the features + target_norm = torch.norm(target, p=2, dim=1, keepdim=True) + normed_target = target / target_norm + + distance, output_index = index.search(normed_target, K) + k_th_distance = distance[:, -1] + k_th = k_th_distance.view(length, -1) + # target_new = target.view(length, -1, depth) + k_th_distance, minD_idx = torch.topk(k_th, num_points, dim=0) + minD_idx = minD_idx.squeeze() + point_list = [] + for i in range(minD_idx.shape[1]): + point_list.append(i * length + minD_idx[:, i]) + return target[torch.cat(point_list)] + + +def KNN_dis_search_decrease( + target, + index, + K=50, + select=1, +): + ''' + data_point: Queue for searching k-th points + target: the target of the search + K + ''' + # Normalize the features + target_norm = torch.norm(target, p=2, dim=1, keepdim=True) + normed_target = target / target_norm + + distance, output_index = index.search(normed_target, K) + k_th_distance = distance[:, -1] + k_th_distance, minD_idx = torch.topk(k_th_distance, select) + return minD_idx, k_th_distance diff --git a/OpenOOD/openood/trainers/oe_trainer.py b/OpenOOD/openood/trainers/oe_trainer.py new file mode 100644 index 0000000000000000000000000000000000000000..6e0c4db1139da06acd6da72c90e82a3b02ebf8e8 --- /dev/null +++ b/OpenOOD/openood/trainers/oe_trainer.py @@ -0,0 +1,75 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch.utils.data import DataLoader +from tqdm import tqdm + +import openood.utils.comm as comm +from openood.utils import Config + +from .base_trainer import BaseTrainer + + +class OETrainer(BaseTrainer): + def __init__( + self, + net: nn.Module, + train_loader: DataLoader, + train_unlabeled_loader: DataLoader, + config: Config, + ) -> None: + super().__init__(net, train_loader, config) + self.train_unlabeled_loader = train_unlabeled_loader + self.lambda_oe = config.trainer.lambda_oe + + def train_epoch(self, epoch_idx): + self.net.train() # enter train mode + + loss_avg = 0.0 + train_dataiter = iter(self.train_loader) + + if self.train_unlabeled_loader: + unlabeled_dataiter = iter(self.train_unlabeled_loader) + + for train_step in tqdm(range(1, + len(train_dataiter) + 1), + desc='Epoch {:03d}: '.format(epoch_idx), + position=0, + leave=True, + disable=not comm.is_main_process()): + batch = next(train_dataiter) + + try: + unlabeled_batch = next(unlabeled_dataiter) + except StopIteration: + unlabeled_dataiter = iter(self.train_unlabeled_loader) + unlabeled_batch = next(unlabeled_dataiter) + + data = torch.cat((batch['data'], unlabeled_batch['data'])).cuda() + batch_size = batch['data'].size(0) + + # forward + logits_classifier = self.net(data) + loss = F.cross_entropy(logits_classifier[:batch_size], + batch['label'].cuda()) + + loss_oe = -( + logits_classifier[batch_size:].mean(1) - + torch.logsumexp(logits_classifier[batch_size:], dim=1)).mean() + loss += self.lambda_oe * loss_oe + + # backward + self.optimizer.zero_grad() + loss.backward() + self.optimizer.step() + self.scheduler.step() + + # exponential moving average, show smooth values + with torch.no_grad(): + loss_avg = loss_avg * 0.8 + float(loss) * 0.2 + + metrics = {} + metrics['epoch_idx'] = epoch_idx + metrics['loss'] = self.save_metrics(loss_avg) + + return self.net, metrics diff --git a/OpenOOD/openood/trainers/opengan_trainer.py b/OpenOOD/openood/trainers/opengan_trainer.py new file mode 100644 index 0000000000000000000000000000000000000000..22554b231f9096197d9ae39399b4d8b86402a5bd --- /dev/null +++ b/OpenOOD/openood/trainers/opengan_trainer.py @@ -0,0 +1,131 @@ +import random + +import torch +import torch.nn as nn +import torch.optim as optim +from tqdm import tqdm + +import openood.utils.comm as comm + + +def weights_init(m): + classname = m.__class__.__name__ + if classname.find('Conv') != -1: + nn.init.normal_(m.weight.data, 0.0, 0.02) + elif classname.find('BatchNorm') != -1: + nn.init.normal_(m.weight.data, 1.0, 0.02) + nn.init.constant_(m.bias.data, 0) + + +class OpenGanTrainer: + def __init__(self, net, feat_loader, config) -> None: + + manualSeed = 999 + print('Random Seed: ', manualSeed) + random.seed(manualSeed) + torch.manual_seed(manualSeed) + + self.config = config + self.netG = net['netG'] + self.netD = net['netD'] + self.netG.apply(weights_init) + self.netD.apply(weights_init) + self.feat_loader = feat_loader + + self.nz = self.config.network.nz + + self.real_label = 1 + self.fake_label = 0 + + optimizer_config = self.config.optimizer + self.optimizerD = optim.Adam(self.netD.parameters(), + lr=optimizer_config.lr / 1.5, + betas=(optimizer_config.beta1, 0.999)) + self.optimizerG = optim.Adam(self.netG.parameters(), + lr=optimizer_config.lr, + betas=(optimizer_config.beta1, 0.999)) + + self.criterion = nn.BCELoss() + + self.G_losses = [] + self.D_losses = [] + + def train_epoch(self, epoch_idx): + + feat_dataiter = iter(self.feat_loader) + + for train_step in tqdm(range(1, + len(feat_dataiter) + 1), + desc='Epoch {:03d}: '.format(epoch_idx), + position=0, + leave=True, + disable=not comm.is_main_process()): + data = next(feat_dataiter)['data'] + ############################ + # (1) Update D network: maximize log(D(x)) + log(1 - D(G(z))) + ########################### + # Train with all-real batch + self.netD.zero_grad() + # Format batch + loaded_data = data.cuda() + b_size = loaded_data.size(0) + label = torch.full((b_size, ), self.real_label).cuda() + label = label.to(torch.float32) + + # Forward pass real batch through D + output = self.netD(loaded_data).view(-1) + # import pdb + # pdb.set_trace() + # Calculate loss on all-real batch + errD_real = self.criterion(output, label) + # Calculate gradients for D in backward pass + errD_real.backward() + D_x = output.mean().item() + + # Train with all-fake batch + # Generate batch of latent vectors + noise = torch.randn(b_size, self.nz, 1, 1).cuda() + # Generate fake image batch with G + fake = self.netG(noise) + label.fill_(self.fake_label) + # Classify all fake batch with D + output = self.netD(fake.detach()).view(-1) + # Calculate D's loss on the all-fake batch + errD_fake = self.criterion(output, label) + # Calculate the gradients for this batch + errD_fake.backward() + D_G_z1 = output.mean().item() + # Add the gradients from the all-real and all-fake batches + errD = errD_real + errD_fake + # Update D + self.optimizerD.step() + + ############################ + # (2) Update G network: maximize log(D(G(z))) + ########################### + self.netG.zero_grad() + label.fill_( + self.real_label) # fake labels are real for generator cost + # Since we just updated D, + # perform another forward pass of all-fake batch through D + output = self.netD(fake).view(-1) + # Calculate G's loss based on this output + errG = self.criterion(output, label) + # Calculate gradients for G + errG.backward() + D_G_z2 = output.mean().item() + # Update G + self.optimizerG.step() + + # Save Losses for plotting later, if needed + self.G_losses.append(errG.item()) + self.D_losses.append(errD.item()) + + return { + 'netG': self.netG, + 'netD': self.netD + }, { + 'G_losses': self.G_losses, + 'D_losses': self.D_losses, + 'epoch_idx': epoch_idx + } diff --git a/OpenOOD/openood/trainers/rd4ad_trainer.py b/OpenOOD/openood/trainers/rd4ad_trainer.py new file mode 100644 index 0000000000000000000000000000000000000000..c16f80ed124e79f4c1f54a3a18e1c202ed6cfe97 --- /dev/null +++ b/OpenOOD/openood/trainers/rd4ad_trainer.py @@ -0,0 +1,53 @@ +import torch + +from torchvision.datasets import ImageFolder +import numpy as np +import random +import os +from torch.utils.data import DataLoader +import torch.backends.cudnn as cudnn +import argparse +from torch.nn import functional as F +from tqdm import tqdm +from openood.utils import Config +from openood.losses.rd4ad_loss import loss_function + +class Rd4adTrainer: + def __init__(self, net, train_loader, config: Config): + self.config = config + self.train_loader = train_loader + self.encoder = net['encoder'] + self.bn = net['bn'] + self.decoder = net['decoder'] + if config.optimizer.name == 'adam': + self.optimizer=torch.optim.Adam(list(self.decoder.parameters())+list(self.bn.parameters()), lr=config.optimizer.lr, betas=config.optimizer.betas) + + def train_epoch(self, epoch_idx): + self.encoder.eval() + self.bn.train() + self.decoder.train() + train_dataiter = iter(self.train_loader) + epoch_loss = 0 + for train_step in tqdm(range(1, + len(train_dataiter) + 1), + desc='Epoch {:03d} '.format(epoch_idx), + position=0, + leave=True): + batch = next(train_dataiter) + img = batch['data'].cuda() + feature_list = self.encoder.forward(img,return_feature_list=True)[1] + inputs = feature_list[1:4] + outputs = self.decoder(self.bn(inputs)) + loss = loss_function(inputs, outputs) + self.optimizer.zero_grad() + loss.backward() + self.optimizer.step() + epoch_loss += loss.item() + metrics = {} + net = {} + metrics['epoch_idx'] = epoch_idx + metrics['loss'] = epoch_loss + net['encoder'] = self.encoder + net['bn'] = self.bn + net['decoder'] = self.decoder + return net, metrics diff --git a/OpenOOD/openood/trainers/regmixup_trainer.py b/OpenOOD/openood/trainers/regmixup_trainer.py new file mode 100644 index 0000000000000000000000000000000000000000..79dce7c4749f9b4f05ad1e89742815e30f3c0813 --- /dev/null +++ b/OpenOOD/openood/trainers/regmixup_trainer.py @@ -0,0 +1,100 @@ +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch.utils.data import DataLoader +from tqdm import tqdm + +import openood.utils.comm as comm +from openood.utils import Config +from .lr_scheduler import cosine_annealing + + +# https://github.com/FrancescoPinto/RegMixup/blob/main/models/regmixup.py +def mixup_data(x, y, alpha=1.0): + """Returns mixed inputs, pairs of targets, and lambda.""" + + if alpha > 0: + lam = np.random.beta(alpha, alpha) + else: + lam = 1 + + batch_size = x.size()[0] + index = torch.randperm(batch_size).cuda() + mixed_x = lam * x + (1 - lam) * x[index] + y_a, y_b = y, y[index] + return mixed_x, y_a, y_b, lam + + +def regmixup_criterion(criterion, pred, y_a, y_b, lam): + return lam * criterion(pred, y_a) + (1 - lam) * criterion(pred, y_b) + + +class RegMixupTrainer: + def __init__(self, net: nn.Module, train_loader: DataLoader, + config: Config) -> None: + + self.net = net + self.train_loader = train_loader + self.config = config + self.alpha = self.config.trainer.trainer_args.alpha + + self.optimizer = torch.optim.SGD( + net.parameters(), + config.optimizer.lr, + momentum=config.optimizer.momentum, + weight_decay=config.optimizer.weight_decay, + nesterov=True, + ) + + self.scheduler = torch.optim.lr_scheduler.LambdaLR( + self.optimizer, + lr_lambda=lambda step: cosine_annealing( + step, + config.optimizer.num_epochs * len(train_loader), + 1, + 1e-6 / config.optimizer.lr, + ), + ) + + def train_epoch(self, epoch_idx): + self.net.train() + + loss_avg = 0.0 + train_dataiter = iter(self.train_loader) + + for train_step in tqdm(range(1, + len(train_dataiter) + 1), + desc='Epoch {:03d}: '.format(epoch_idx), + position=0, + leave=True, + disable=not comm.is_main_process()): + batch = next(train_dataiter) + x, y = batch['data'].cuda(), batch['label'].cuda() + + # mixup operation + mixup_x, part_y_a, part_y_b, lam = mixup_data(x, y, self.alpha) + targets_a = torch.cat([y, part_y_a]) + targets_b = torch.cat([y, part_y_b]) + x = torch.cat([x, mixup_x], dim=0) + + # forward + logits = self.net(x) + loss = regmixup_criterion(F.cross_entropy, logits, targets_a, + targets_b, lam) + + # backward + self.optimizer.zero_grad() + loss.backward() + self.optimizer.step() + self.scheduler.step() + + # exponential moving average, show smooth values + with torch.no_grad(): + loss_avg = loss_avg * 0.8 + float(loss) * 0.2 + + metrics = {} + metrics['epoch_idx'] = epoch_idx + metrics['loss'] = loss_avg + + return self.net, metrics diff --git a/OpenOOD/openood/trainers/rotpred_trainer.py b/OpenOOD/openood/trainers/rotpred_trainer.py new file mode 100644 index 0000000000000000000000000000000000000000..74627019fae782121b89fc65c9a069b621a71219 --- /dev/null +++ b/OpenOOD/openood/trainers/rotpred_trainer.py @@ -0,0 +1,97 @@ +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch.utils.data import DataLoader +from tqdm import tqdm + +import openood.utils.comm as comm +from openood.utils import Config + +from .lr_scheduler import cosine_annealing + + +class RotPredTrainer: + def __init__(self, net: nn.Module, train_loader: DataLoader, + config: Config) -> None: + + self.net = net + self.train_loader = train_loader + self.config = config + + self.optimizer = torch.optim.SGD( + net.parameters(), + config.optimizer.lr, + momentum=config.optimizer.momentum, + weight_decay=config.optimizer.weight_decay, + nesterov=True, + ) + + self.scheduler = torch.optim.lr_scheduler.LambdaLR( + self.optimizer, + lr_lambda=lambda step: cosine_annealing( + step, + config.optimizer.num_epochs * len(train_loader), + 1, + 1e-6 / config.optimizer.lr, + ), + ) + + def train_epoch(self, epoch_idx): + self.net.train() + + loss_avg = 0.0 + train_dataiter = iter(self.train_loader) + + for train_step in tqdm(range(1, + len(train_dataiter) + 1), + desc='Epoch {:03d}: '.format(epoch_idx), + position=0, + leave=True, + disable=not comm.is_main_process()): + batch = next(train_dataiter) + data = batch['data'].cuda() + target = batch['label'].cuda() + + batch_size = len(data) + x_90 = torch.rot90(data, 1, [2, 3]) + x_180 = torch.rot90(data, 2, [2, 3]) + x_270 = torch.rot90(data, 3, [2, 3]) + + x_rot = torch.cat([data, x_90, x_180, x_270]) + y_rot = torch.cat([ + torch.zeros(batch_size), + torch.ones(batch_size), + 2 * torch.ones(batch_size), + 3 * torch.ones(batch_size), + ]).long().cuda() + + # forward + logits, logits_rot = self.net(x_rot, return_rot_logits=True) + loss_cls = F.cross_entropy(logits[:batch_size], target) + loss_rot = F.cross_entropy(logits_rot, y_rot) + loss = loss_cls + loss_rot + + # backward + self.optimizer.zero_grad() + loss.backward() + self.optimizer.step() + self.scheduler.step() + + # exponential moving average, show smooth values + with torch.no_grad(): + loss_avg = loss_avg * 0.8 + float(loss) * 0.2 + + # comm.synchronize() + + metrics = {} + metrics['epoch_idx'] = epoch_idx + metrics['loss'] = self.save_metrics(loss_avg) + + return self.net, metrics + + def save_metrics(self, loss_avg): + all_loss = comm.gather(loss_avg) + total_losses_reduced = np.mean([x for x in all_loss]) + + return total_losses_reduced diff --git a/OpenOOD/openood/trainers/rts_trainer.py b/OpenOOD/openood/trainers/rts_trainer.py new file mode 100644 index 0000000000000000000000000000000000000000..2df5ecb1959c13129545956987a73d5f093c8f2a --- /dev/null +++ b/OpenOOD/openood/trainers/rts_trainer.py @@ -0,0 +1,79 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch.utils.data import DataLoader +from tqdm import tqdm + +from openood.utils import Config + +from .lr_scheduler import cosine_annealing + + + +class RTSTrainer: + def __init__(self, net: nn.Module, train_loader: DataLoader, + config: Config) -> None: + + self.net = net + self.train_loader = train_loader + self.config = config + + self.optimizer = torch.optim.SGD( + net.parameters(), + config.optimizer.lr, + momentum=config.optimizer.momentum, + weight_decay=config.optimizer.weight_decay, + nesterov=True, + ) + + self.scheduler = torch.optim.lr_scheduler.LambdaLR( + self.optimizer, + lr_lambda=lambda step: cosine_annealing( + step, + config.optimizer.num_epochs * len(train_loader), + 1, + 1e-6 / config.optimizer.lr, + ), + ) + + + + def train_epoch(self, epoch_idx): + self.net.train() + + loss_avg = 0.0 + train_dataiter = iter(self.train_loader) + + for train_step in tqdm(range(1, + len(train_dataiter) + 1), + desc='Epoch {:03d}: '.format(epoch_idx), + position=0, + leave=True): + batch = next(train_dataiter) + data = batch['data'].cuda() + target = batch['label'].cuda() + + # forward + logits_classifier, variance = self.net(data, return_var=True) + epsilon = torch.randn_like(variance) + temperature = torch.sum(variance * epsilon * epsilon, dim=1, keepdim=True) / (self.config.network.dof - 2) + loss_kl = ((variance - torch.log(variance + 1e-8) - 1) * 0.5).mean() + loss_head = F.cross_entropy(logits_classifier / temperature, target) + loss = loss_head + self.config.network.kl_scale * loss_kl + + # backward + self.optimizer.zero_grad() + loss.backward() + nn.utils.clip_grad_norm_(parameters=self.net.parameters(), max_norm=2.5, norm_type=2) + self.optimizer.step() + self.scheduler.step() + + # exponential moving average, show smooth values + with torch.no_grad(): + loss_avg = loss_avg * 0.8 + float(loss) * 0.2 + + metrics = {} + metrics['epoch_idx'] = epoch_idx + metrics['loss'] = loss_avg + + return self.net, metrics diff --git a/OpenOOD/openood/trainers/sae_trainer.py b/OpenOOD/openood/trainers/sae_trainer.py new file mode 100644 index 0000000000000000000000000000000000000000..aa3060217290e2aaf3256204b7f4a0230c1c7019 --- /dev/null +++ b/OpenOOD/openood/trainers/sae_trainer.py @@ -0,0 +1,145 @@ +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F +from sklearn.mixture import GaussianMixture +from torch.utils.data import DataLoader +from tqdm import tqdm + +from openood.losses import soft_cross_entropy +from openood.postprocessors.gmm_postprocessor import compute_single_GMM_score +from openood.postprocessors.mds_ensemble_postprocessor import ( + process_feature_type, reduce_feature_dim, tensor2list) +from openood.utils import Config + +from .lr_scheduler import cosine_annealing +from .mixup_trainer import mixing, prepare_mixup + + +class SAETrainer: + def __init__(self, net: nn.Module, train_loader: DataLoader, + config: Config) -> None: + + self.net = net + self.train_loader = train_loader + self.config = config + self.trainer_args = self.config.trainer.trainer_args + + self.optimizer = torch.optim.SGD( + net.parameters(), + config.optimizer.lr, + momentum=config.optimizer.momentum, + weight_decay=config.optimizer.weight_decay, + nesterov=True, + ) + + self.scheduler = torch.optim.lr_scheduler.LambdaLR( + self.optimizer, + lr_lambda=lambda step: cosine_annealing( + step, + config.optimizer.num_epochs * len(train_loader), + 1, + 1e-6 / config.optimizer.lr, + ), + ) + + @torch.no_grad() + def setup(self): + feature_all = None + label_all = [] + # collect features + for batch in tqdm(self.train_loader, + desc='Compute GMM Stats [Collecting]'): + data = batch['data_aux'].cuda() + label = batch['label'] + _, feature_list = self.net(data, return_feature_list=True) + label_all.extend(tensor2list(label)) + feature_processed = process_feature_type( + feature_list[0], self.trainer_args.feature_type) + if isinstance(feature_all, type(None)): + feature_all = tensor2list(feature_processed) + else: + feature_all.extend(tensor2list(feature_processed)) + label_all = np.array(label_all) + + # reduce feature dim and perform gmm estimation + feature_all = np.array(feature_all) + transform_matrix = reduce_feature_dim(feature_all, label_all, + self.trainer_args.reduce_dim) + feature_all = np.dot(feature_all, transform_matrix) + # GMM estimation + gm = GaussianMixture(n_components=self.trainer_args.num_clusters, + random_state=0, + covariance_type='tied').fit(feature_all) + feature_mean = gm.means_ + feature_prec = gm.precisions_ + component_weight = gm.weights_ + + self.feature_mean = torch.Tensor(feature_mean).cuda() + self.feature_prec = torch.Tensor(feature_prec).cuda() + self.component_weight = torch.Tensor(component_weight).cuda() + self.transform_matrix = torch.Tensor(transform_matrix).cuda() + + def train_epoch(self, epoch_idx): + self.net.train() + + loss_avg = 0.0 + train_dataiter = iter(self.train_loader) + + for train_step in tqdm(range(1, + len(train_dataiter) + 1), + desc='Epoch {:03d}: '.format(epoch_idx), + position=0, + leave=True): + batch = next(train_dataiter) + data = batch['data'].cuda() + target = batch['label'].cuda() + + # mixup operation + index, lam = prepare_mixup(batch, self.trainer_args.alpha) + data_mix = mixing(batch['data'].cuda(), index, lam) + soft_label_mix = mixing(batch['soft_label'].cuda(), index, lam) + + # classfication loss + logits_cls = self.net(data) + loss_clsstd = F.cross_entropy(logits_cls, target) # standard cls + logits_mix = self.net(data_mix) + loss_clsmix = soft_cross_entropy(logits_mix, soft_label_mix) + + # source awareness enhancement + prob_id = compute_single_GMM_score(self.net, data, + self.feature_mean, + self.feature_prec, + self.component_weight, + self.transform_matrix, 0, + self.trainer_args.feature_type) + prob_ood = compute_single_GMM_score(self.net, data_mix, + self.feature_mean, + self.feature_prec, + self.component_weight, + self.transform_matrix, 0, + self.trainer_args.feature_type) + loss_sae_id = 1 - torch.mean(prob_id) + loss_sae_ood = torch.mean(prob_ood) + + # loss + loss = self.trainer_args.loss_weight[0] * loss_clsstd \ + + self.trainer_args.loss_weight[1] * loss_clsmix \ + + self.trainer_args.loss_weight[2] * loss_sae_id \ + + self.trainer_args.loss_weight[3] * loss_sae_ood + + # backward + self.optimizer.zero_grad() + loss.backward() + self.optimizer.step() + self.scheduler.step() + + # exponential moving average, show smooth values + with torch.no_grad(): + loss_avg = loss_avg * 0.8 + float(loss) * 0.2 + + metrics = {} + metrics['epoch_idx'] = epoch_idx + metrics['loss'] = loss_avg + + return self.net, metrics diff --git a/OpenOOD/openood/trainers/udg_trainer.py b/OpenOOD/openood/trainers/udg_trainer.py new file mode 100644 index 0000000000000000000000000000000000000000..8967df52590099a0eb3c0b74d3334bcc0c72633f --- /dev/null +++ b/OpenOOD/openood/trainers/udg_trainer.py @@ -0,0 +1,382 @@ +import time + +import faiss +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch.utils.data import DataLoader +from tqdm import tqdm + +import openood.utils.comm as comm +from openood.utils import Config + +from ..losses import rew_ce, rew_sce +from .base_trainer import BaseTrainer + + +class UDGTrainer(BaseTrainer): + def __init__( + self, + net: nn.Module, + train_loader: DataLoader, + train_unlabeled_loader: DataLoader, + config: Config, + ) -> None: + super().__init__(net, train_loader, config) + + self.train_unlabeled_loader = train_unlabeled_loader + + self.num_clusters = config.trainer.num_clusters + self.purity_ind_thresh = config.trainer.purity_ind_thresh + self.purity_ood_thresh = config.trainer.purity_ood_thresh + self.oe_enhance_ratio = config.trainer.oe_enhance_ratio + self.lambda_oe = config.trainer.lambda_oe + self.lambda_aux = config.trainer.lambda_aux + + # Init clustering algorithm + self.k_means = KMeans(k=config.trainer.num_clusters, + pca_dim=config.trainer.pca_dim) + + def train_epoch(self, epoch_idx): + self._run_clustering(epoch_idx) + metrics = self._compute_loss(epoch_idx) + + return self.net, metrics + + def _compute_loss(self, epoch_idx): + self.net.train() # enter train mode + + loss_avg, loss_cls_avg, loss_oe_avg, loss_aux_avg = 0.0, 0.0, 0.0, 0.0 + train_dataiter = iter(self.train_loader) + unlabeled_dataiter = iter(self.train_unlabeled_loader) + for train_step in tqdm(range(1, + len(train_dataiter) + 1), + desc='Epoch {:03d}: '.format(epoch_idx), + position=0, + leave=True, + disable=not comm.is_main_process()): + batch = next(train_dataiter) + try: + unlabeled_batch = next(unlabeled_dataiter) + except StopIteration: + unlabeled_dataiter = iter(self.train_unlabeled_loader) + unlabeled_batch = next(unlabeled_dataiter) + data = batch['data'].cuda() + unlabeled_data = unlabeled_batch['data'].cuda() + + # concat labeled and unlabeled data + logits_cls, logits_aux = self.net(data, return_aux=True) + logits_oe_cls, logits_oe_aux = self.net(unlabeled_data, + return_aux=True) + + # classification loss + concat_logits_cls = torch.cat([logits_cls, logits_oe_cls]) + concat_label = torch.cat([ + batch['label'], + unlabeled_batch['pseudo_label'].type_as(batch['label']), + ]) + loss_cls = F.cross_entropy( + concat_logits_cls[concat_label != -1], + concat_label[concat_label != -1].cuda(), + ) + # oe loss + concat_softlabel = torch.cat( + [batch['soft_label'], unlabeled_batch['pseudo_softlabel']]) + concat_conf = torch.cat( + [batch['ood_conf'], unlabeled_batch['ood_conf']]) + loss_oe = rew_sce( + concat_logits_cls[concat_label == -1], + concat_softlabel[concat_label == -1].cuda(), + concat_conf[concat_label == -1].cuda(), + ) + # aux loss + concat_logits_aux = torch.cat([logits_aux, logits_oe_aux]) + concat_cluster_id = torch.cat( + [batch['cluster_id'], unlabeled_batch['cluster_id']]) + concat_cluster_reweight = torch.cat([ + batch['cluster_reweight'], unlabeled_batch['cluster_reweight'] + ]) + loss_aux = rew_ce( + concat_logits_aux, + concat_cluster_id.cuda(), + concat_cluster_reweight.cuda(), + ) + + # loss addition + loss = loss_cls + self.lambda_oe * loss_oe \ + + self.lambda_aux * loss_aux + # backward + self.optimizer.zero_grad() + loss.backward() + self.optimizer.step() + self.scheduler.step() + + with torch.no_grad(): + # exponential moving average, show smooth values + loss_cls_avg = loss_cls_avg * 0.8 + float(loss_cls) * 0.2 + loss_oe_avg = loss_oe_avg * 0.8 + float( + self.lambda_oe * loss_oe) * 0.2 + loss_aux_avg = (loss_aux_avg * 0.8 + + float(self.lambda_aux * loss_aux) * 0.2) + loss_avg = loss_avg * 0.8 + float(loss) * 0.2 + + metrics = {} + metrics['epoch_idx'] = epoch_idx + metrics['train_cls_loss'] = loss_cls_avg + metrics['train_oe_loss'] = loss_oe_avg + metrics['train_aux_loss'] = loss_aux_avg + metrics['loss'] = loss_avg + + return metrics + + def _run_clustering(self, epoch_idx): + self.net.eval() + + start_time = time.time() + # get data from train loader + print('Clustering: starting gather training features...', flush=True) + # gather train image feature + train_idx_list, unlabeled_idx_list, feature_list, train_label_list = ( + [], + [], + [], + [], + ) + train_dataiter = iter(self.train_loader) + for step in tqdm(range(1, + len(train_dataiter) + 1), + desc='Epoch {:03d} ID Clustering: '.format(epoch_idx), + position=0, + leave=True, + disable=not comm.is_main_process()): + batch = next(train_dataiter) + index = batch['index'] + label = batch['label'] + # we use no augmented image for clustering + data = batch['data_aux'].cuda() + _, feature = self.net(data, return_feature=True) + feature = feature.detach() + # evaluation + for idx in range(len(data)): + train_idx_list.append(index[idx].tolist()) + train_label_list.append(label[idx].tolist()) + feature_list.append(feature[idx].cpu().tolist()) + num_train_data = len(feature_list) + train_idx_list = np.array(train_idx_list, dtype=int) + train_label_list = np.array(train_label_list, dtype=int) + train_label_list = sort_array(train_label_list, train_idx_list) + # in-distribution samples always have pseudo labels == actual labels + self.train_loader.dataset.pseudo_label = train_label_list + + torch.cuda.empty_cache() + + # gather unlabeled image feature in order + unlabeled_conf_list, unlabeled_pseudo_list = [], [] + unlabeled_dataiter = iter(self.train_unlabeled_loader) + for step in tqdm(range(1, + len(unlabeled_dataiter) + 1), + desc='Epoch {:03d} OE Clustering: '.format(epoch_idx), + position=0, + leave=True, + disable=not comm.is_main_process()): + batch = next(unlabeled_dataiter) + index = batch['index'] + # we use no augmented image for clustering + data = batch['data_aux'].cuda() + logit, feature = self.net(data, return_feature=True) + feature = feature.detach() + logit = logit.detach() + score = torch.softmax(logit, dim=1) + conf, pseudo = torch.max(score, dim=1) + # evaluation + for idx in range(len(data)): + unlabeled_idx_list.append(index[idx].tolist()) + feature_list.append(feature[idx].cpu().tolist()) + unlabeled_conf_list.append(conf[idx].cpu().tolist()) + unlabeled_pseudo_list.append(pseudo[idx].cpu().tolist()) + feature_list = np.array(feature_list) + unlabeled_idx_list = np.array(unlabeled_idx_list, dtype=int) + unlabeled_conf_list = np.array(unlabeled_conf_list) + unlabeled_pseudo_list = np.array(unlabeled_pseudo_list) + unlabeled_conf_list = sort_array(unlabeled_conf_list, + unlabeled_idx_list) + unlabeled_pseudo_list = sort_array(unlabeled_pseudo_list, + unlabeled_idx_list) + torch.cuda.empty_cache() + + print('\nAssigning Cluster Labels...', flush=True) + cluster_id = self.k_means.cluster(feature_list) + train_cluster_id = cluster_id[:num_train_data] + unlabeled_cluster_id = cluster_id[num_train_data:] + # assign cluster id to samples. Sorted by shuffle-recording index. + train_cluster_id = sort_array(train_cluster_id, train_idx_list) + unlabeled_cluster_id = sort_array(unlabeled_cluster_id, + unlabeled_idx_list) + self.train_loader.dataset.cluster_id = train_cluster_id + self.train_unlabeled_loader.dataset.cluster_id = unlabeled_cluster_id + cluster_id = np.concatenate([train_cluster_id, unlabeled_cluster_id]) + # reweighting based on samples in clusters + cluster_stat = np.zeros(self.num_clusters) + cluster_id_list, cluster_id_counts = np.unique(cluster_id, + return_counts=True) + for cluster_idx, counts in zip(cluster_id_list, cluster_id_counts): + cluster_stat[cluster_idx] = counts + inv_class_freq = 1 / (cluster_stat + 1e-10) + sample_weight = np.power(inv_class_freq, 0.5) + sample_weight *= 1 / sample_weight.mean() + sample_weight_list = np.array([sample_weight[i] for i in cluster_id]) + self.train_loader.dataset.cluster_reweight \ + = sample_weight_list[:num_train_data] + self.train_unlabeled_loader.dataset.cluster_reweight \ + = sample_weight_list[num_train_data:] + + print('In-Distribution Filtering (with OOD Enhancement)...', + flush=True) + old_train_pseudo_label \ + = self.train_loader.dataset.pseudo_label + old_unlabeled_pseudo_label \ + = self.train_unlabeled_loader.dataset.pseudo_label + old_pseudo_label = np.append(old_train_pseudo_label, + old_unlabeled_pseudo_label).astype(int) + new_pseudo_label = (-1 * np.ones_like(old_pseudo_label)).astype(int) + # process ood confidence for oe loss enhancement (ole) + new_ood_conf = np.ones_like(old_pseudo_label).astype(float) + + total_num_to_filter = 0 + purity_ind_thresh = self.purity_ind_thresh + purity_ood_thresh = self.purity_ood_thresh + # pick out clusters with purity over threshold + for cluster_idx in range(self.num_clusters): + label_in_cluster, label_counts = np.unique( + old_pseudo_label[cluster_id == cluster_idx], + return_counts=True) + cluster_size = len(old_pseudo_label[cluster_id == cluster_idx]) + purity = label_counts / cluster_size # purity list for each label + # idf + if np.any(purity > purity_ind_thresh): + majority_label = label_in_cluster[purity > purity_ind_thresh][ + 0] # first element in the list + new_pseudo_label[cluster_id == cluster_idx] = majority_label + # this might also change some ID but nvm + if majority_label > 0: # ID cluster + num_to_filter = len(label_in_cluster == -1) + total_num_to_filter += num_to_filter + # ole + elif np.any(purity > purity_ood_thresh): + majority_label = label_in_cluster[ + purity > purity_ood_thresh][0] + if majority_label == -1: + new_ood_conf[cluster_id == + cluster_idx] = self.oe_enhance_ratio + print(f'{total_num_to_filter} sample(s) filtered!', flush=True) + + self.train_unlabeled_loader.dataset.pseudo_label = new_pseudo_label[ + num_train_data:] + self.train_unlabeled_loader.dataset.ood_conf = new_ood_conf[ + num_train_data:] + + print('Randomize Auxiliary Head...', flush=True) + if hasattr(self.net, 'fc_aux'): + # reset auxiliary branch + self.net.fc_aux.weight.data.normal_(mean=0.0, std=0.01) + self.net.fc_aux.bias.data.zero_() + else: + # reset fc for unsupervised learning (baseline) + self.net.fc.weight.data.normal_(mean=0.0, std=0.01) + self.net.fc.bias.data.zero_() + + print( + '# Online Clustering Completed! Duration: {:.2f}s #'.format( + time.time() - start_time), + flush=True, + ) + + +def preprocess_features(npdata, pca=256): + """Preprocess an array of features. + Args: + npdata (np.array N * ndim): features to preprocess + pca (int): dim of output + Returns: + np.array of dim N * pca: data PCA-reduced, whitened and L2-normalized + """ + _, ndim = npdata.shape + npdata = npdata.astype('float32') + + # Apply PCA-whitening with Faiss + mat = faiss.PCAMatrix(ndim, pca, eigen_power=-0.5) + mat.train(npdata) + assert mat.is_trained + npdata = mat.apply_py(npdata) + + # L2 normalization + row_sums = np.linalg.norm(npdata, axis=1) + npdata = npdata / row_sums[:, np.newaxis] + + return npdata + + +def run_kmeans(x, nmb_clusters, verbose=False): + """Runs kmeans on 1 GPU. + + Args: + x: data + nmb_clusters (int): number of clusters + Returns: + list: ids of data in each cluster + """ + n_data, d = x.shape + + # faiss implementation of k-means + clus = faiss.Clustering(d, nmb_clusters) + + # Change faiss seed at each k-means so that the randomly picked + # initialization centroids do not correspond to the same feature ids + # from an epoch to another. + clus.seed = np.random.randint(1234) + + clus.niter = 20 + clus.max_points_per_centroid = 10000000 + res = faiss.StandardGpuResources() + flat_config = faiss.GpuIndexFlatConfig() + flat_config.useFloat16 = False + flat_config.device = 0 + index = faiss.GpuIndexFlatL2(res, d, flat_config) + + # perform the training + clus.train(x, index) + _, I = index.search(x, 1) + return I.reshape(-1, ) + + +class KMeans(object): + def __init__(self, k, pca_dim): + self.k = k + self.pca_dim = pca_dim + + def cluster(self, data, verbose=True): + """Performs k-means clustering. + Args: + x_data (np.array N * dim): data to cluster + """ + # PCA-reducing, whitening and L2-normalization + xb = preprocess_features(data, pca=self.pca_dim) + + if np.isnan(xb).any(): + row_sums = np.linalg.norm(data, axis=1) + data_norm = data / row_sums[:, np.newaxis] + if np.isnan(data_norm).any(): + I = run_kmeans(data_norm, self.k, verbose) + else: + I = run_kmeans(data, self.k, verbose) + else: + # cluster the data + I = run_kmeans(xb, self.k, verbose) + return I + + +def sort_array(old_array, index_array): + sorted_array = np.ones_like(old_array) + sorted_array[index_array] = old_array + return sorted_array diff --git a/OpenOOD/openood/trainers/utils.py b/OpenOOD/openood/trainers/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..2aed19fac05b1423c130e75c0a3241461b1eccd6 --- /dev/null +++ b/OpenOOD/openood/trainers/utils.py @@ -0,0 +1,84 @@ +from torch.utils.data import DataLoader + +from openood.utils import Config + +from .arpl_gan_trainer import ARPLGANTrainer +from .arpl_trainer import ARPLTrainer +from .augmix_trainer import AugMixTrainer +from .base_trainer import BaseTrainer +from .cider_trainer import CIDERTrainer +from .conf_branch_trainer import ConfBranchTrainer +from .csi_trainer import CSITrainer +from .cutmix_trainer import CutMixTrainer +from .cutpaste_trainer import CutPasteTrainer +from .draem_trainer import DRAEMTrainer +from .dropout_trainer import DropoutTrainer +from .dsvdd_trainer import AETrainer, DSVDDTrainer +from .godin_trainer import GodinTrainer +from .kdad_trainer import KdadTrainer +from .logitnorm_trainer import LogitNormTrainer +from .mcd_trainer import MCDTrainer +from .mixup_trainer import MixupTrainer +from .mos_trainer import MOSTrainer +from .npos_trainer import NPOSTrainer +from .oe_trainer import OETrainer +from .opengan_trainer import OpenGanTrainer +from .rd4ad_trainer import Rd4adTrainer +from .sae_trainer import SAETrainer +from .udg_trainer import UDGTrainer +from .vos_trainer import VOSTrainer +from .rts_trainer import RTSTrainer +from .rotpred_trainer import RotPredTrainer +from .regmixup_trainer import RegMixupTrainer +from .mixoe_trainer import MixOETrainer +from .ish_trainer import ISHTrainer + + +def get_trainer(net, train_loader: DataLoader, val_loader: DataLoader, + config: Config): + if type(train_loader) is DataLoader: + trainers = { + 'base': BaseTrainer, + 'augmix': AugMixTrainer, + 'mixup': MixupTrainer, + 'regmixup': RegMixupTrainer, + 'sae': SAETrainer, + 'draem': DRAEMTrainer, + 'kdad': KdadTrainer, + 'conf_branch': ConfBranchTrainer, + 'dcae': AETrainer, + 'dsvdd': DSVDDTrainer, + 'npos': NPOSTrainer, + 'opengan': OpenGanTrainer, + 'kdad': KdadTrainer, + 'godin': GodinTrainer, + 'arpl': ARPLTrainer, + 'arpl_gan': ARPLGANTrainer, + 'mos': MOSTrainer, + 'vos': VOSTrainer, + 'cider': CIDERTrainer, + 'cutpaste': CutPasteTrainer, + 'cutmix': CutMixTrainer, + 'dropout': DropoutTrainer, + 'csi': CSITrainer, + 'logitnorm': LogitNormTrainer, + 'rd4ad': Rd4adTrainer, + 'rts': RTSTrainer, + 'rotpred': RotPredTrainer, + 'ish': ISHTrainer, + } + if config.trainer.name in ['cider', 'npos']: + return trainers[config.trainer.name](net, train_loader, val_loader, + config) + else: + return trainers[config.trainer.name](net, train_loader, config) + + else: + trainers = { + 'oe': OETrainer, + 'mcd': MCDTrainer, + 'udg': UDGTrainer, + 'mixoe': MixOETrainer + } + return trainers[config.trainer.name](net, train_loader[0], + train_loader[1], config) diff --git a/OpenOOD/openood/trainers/vos_trainer.py b/OpenOOD/openood/trainers/vos_trainer.py new file mode 100644 index 0000000000000000000000000000000000000000..eeadbc479e4982acd93f151704b804684ec66da8 --- /dev/null +++ b/OpenOOD/openood/trainers/vos_trainer.py @@ -0,0 +1,193 @@ +import numpy as np +import torch +import torch.nn.functional as F +from torch.distributions.multivariate_normal import MultivariateNormal +from tqdm import tqdm + +import openood.utils.comm as comm +from openood.utils import Config + + +def cosine_annealing(step, total_steps, lr_max, lr_min): + return lr_min + (lr_max - + lr_min) * 0.5 * (1 + np.cos(step / total_steps * np.pi)) + + +class VOSTrainer: + def __init__(self, net, train_loader, config: Config): + self.train_loader = train_loader + self.config = config + self.net = net + weight_energy = torch.nn.Linear(config.num_classes, 1).cuda() + torch.nn.init.uniform_(weight_energy.weight) + self.logistic_regression = torch.nn.Linear(1, 2).cuda() + self.optimizer = torch.optim.SGD( + list(net.parameters()) + list(weight_energy.parameters()) + + list(self.logistic_regression.parameters()), + config.optimizer['lr'], + momentum=config.optimizer['momentum'], + weight_decay=config.optimizer['weight_decay'], + nesterov=True) + self.scheduler = torch.optim.lr_scheduler.LambdaLR( + self.optimizer, + lr_lambda=lambda step: cosine_annealing( + step, config.optimizer['num_epochs'] * len(train_loader), 1, + 1e-6 / config.optimizer['lr'])) + self.number_dict = {} + for i in range(self.config['num_classes']): + self.number_dict[i] = 0 + self.data_dict = torch.zeros(self.config['num_classes'], + self.config['sample_number'], + self.config['feature_dim']).cuda() + + def train_epoch(self, epoch_idx): + self.net.train() + loss_avg = 0.0 + sample_number = self.config['sample_number'] + num_classes = self.config['num_classes'] + train_dataiter = iter(self.train_loader) + eye_matrix = torch.eye(self.config['feature_dim'], device='cuda') + + for train_step in tqdm(range(1, + len(train_dataiter) + 1), + desc='Epoch {:03d}'.format(epoch_idx), + position=0, + leave=True, + disable=not comm.is_main_process()): + batch = next(train_dataiter) + images = batch['data'].cuda() + labels = batch['label'].cuda() + + x, output = self.net.forward(images, return_feature=True) + + sum_temp = 0 + for index in range(num_classes): + sum_temp += self.number_dict[index] + lr_reg_loss = torch.zeros(1).cuda()[0] + if (sum_temp == num_classes * sample_number + and epoch_idx < self.config['start_epoch']): + target_numpy = labels.cpu().data.numpy() + for index in range(len(labels)): + dict_key = target_numpy[index] + self.data_dict[dict_key] = torch.cat( + (self.data_dict[dict_key][1:], + output[index].detach().view(1, -1)), 0) + elif (sum_temp == num_classes * sample_number + and epoch_idx >= self.config['start_epoch']): + target_numpy = labels.cpu().data.numpy() + for index in range(len(labels)): + dict_key = target_numpy[index] + self.data_dict[dict_key] = torch.cat( + (self.data_dict[dict_key][1:], + output[index].detach().view(1, -1)), 0) + for index in range(num_classes): + if index == 0: + X = self.data_dict[index] - self.data_dict[index].mean( + 0) + mean_embed_id = self.data_dict[index].mean(0).view( + 1, -1) + else: + X = torch.cat((X, self.data_dict[index] - + self.data_dict[index].mean(0)), 0) + mean_embed_id = torch.cat( + (mean_embed_id, self.data_dict[index].mean(0).view( + 1, -1)), 0) + + temp_precision = torch.mm(X.t(), X) / len(X) + temp_precision += 0.0001 * eye_matrix + for index in range(num_classes): + new_dis = MultivariateNormal( + loc=mean_embed_id[index], + covariance_matrix=temp_precision) + negative_samples = new_dis.rsample( + (self.config['sample_from'], )) + prob_density = new_dis.log_prob(negative_samples) + cur_samples, index_prob = torch.topk( + -prob_density, self.config['select']) + if index == 0: + ood_samples = negative_samples[index_prob] + else: + ood_samples = torch.cat( + (ood_samples, negative_samples[index_prob]), 0) + if len(ood_samples) != 0: + + energy_score_for_fg = log_sum_exp(x, + num_classes=num_classes, + dim=1) + try: + predictions_ood = self.net.fc(ood_samples) + except AttributeError: + predictions_ood = self.net.module.fc(ood_samples) + + energy_score_for_bg = log_sum_exp(predictions_ood, + num_classes=num_classes, + dim=1) + + input_for_lr = torch.cat( + (energy_score_for_fg, energy_score_for_bg), -1) + labels_for_lr = torch.cat( + (torch.ones(len(output)).cuda(), + torch.zeros(len(ood_samples)).cuda()), -1) + + output1 = self.logistic_regression(input_for_lr.view( + -1, 1)) + + lr_reg_loss = F.cross_entropy(output1, + labels_for_lr.long()) + else: + target_numpy = labels.cpu().data.numpy() + for index in range(len(labels)): + dict_key = target_numpy[index] + + if self.number_dict[dict_key] < sample_number: + self.data_dict[dict_key][self.number_dict[ + dict_key]] = output[index].detach() + self.number_dict[dict_key] += 1 + self.optimizer.zero_grad() + loss = F.cross_entropy(x, labels) + loss += self.config.trainer['loss_weight'] * lr_reg_loss + loss.backward() + + self.optimizer.step() + self.scheduler.step() + + loss_avg = loss_avg * 0.8 + float(loss) * 0.2 + + metrics = {} + metrics['loss'] = loss_avg + metrics['epoch_idx'] = epoch_idx + return self.net, metrics + + +def log_sum_exp(value, num_classes=10, dim=None, keepdim=False): + """Numerically stable implementation of the operation.""" + value.exp().sum(dim, keepdim).log() + + # TODO: torch.max(value, dim=None) threw an error at time of writing + weight_energy = torch.nn.Linear(num_classes, 1).cuda() + if dim is not None: + m, _ = torch.max(value, dim=dim, keepdim=True) + value0 = value - m + if keepdim is False: + m = m.squeeze(dim) + + output = m + torch.log( + torch.sum(F.relu(weight_energy.weight) * torch.exp(value0), + dim=dim, + keepdim=keepdim)) + # set lower bound + out_list = output.cpu().detach().numpy().tolist() + for i in range(len(out_list)): + if out_list[i] < -1: + out_list[i] = -1 + else: + continue + output = torch.Tensor(out_list).cuda() + return output + else: + m = torch.max(value) + sum_exp = torch.sum(torch.exp(value - m)) + # if isinstance(sum_exp, Number): + # return m + math.log(sum_exp) + # else: + return m + torch.log(sum_exp) diff --git a/OpenOOD/openood/utils/__init__.py b/OpenOOD/openood/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8db23d62b88f09b5ab19b7b7dd2a35ab80a56c2f --- /dev/null +++ b/OpenOOD/openood/utils/__init__.py @@ -0,0 +1,3 @@ +from .config import Config, setup_config +from .launch import launch +from .logger import setup_logger diff --git a/OpenOOD/openood/utils/__pycache__/__init__.cpython-311.pyc b/OpenOOD/openood/utils/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..de023ab3cb16dfce837ce4d65feb892a2edab4fc Binary files /dev/null and b/OpenOOD/openood/utils/__pycache__/__init__.cpython-311.pyc differ diff --git a/OpenOOD/openood/utils/__pycache__/__init__.cpython-37.pyc b/OpenOOD/openood/utils/__pycache__/__init__.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..722469d2f6f71834293e2f64d7897791b73d3d6d Binary files /dev/null and b/OpenOOD/openood/utils/__pycache__/__init__.cpython-37.pyc differ diff --git a/OpenOOD/openood/utils/__pycache__/comm.cpython-311.pyc b/OpenOOD/openood/utils/__pycache__/comm.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..531a368cb0dc94aaf5a33511c604b22ed1fa1230 Binary files /dev/null and b/OpenOOD/openood/utils/__pycache__/comm.cpython-311.pyc differ diff --git a/OpenOOD/openood/utils/__pycache__/comm.cpython-37.pyc b/OpenOOD/openood/utils/__pycache__/comm.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1b14110c192b3d02d4a3c48691d0a8abe84561e8 Binary files /dev/null and b/OpenOOD/openood/utils/__pycache__/comm.cpython-37.pyc differ diff --git a/OpenOOD/openood/utils/__pycache__/config.cpython-311.pyc b/OpenOOD/openood/utils/__pycache__/config.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..19a328d02a7f59d20c0453a4f3d8486dfd21f0e5 Binary files /dev/null and b/OpenOOD/openood/utils/__pycache__/config.cpython-311.pyc differ diff --git a/OpenOOD/openood/utils/__pycache__/config.cpython-37.pyc b/OpenOOD/openood/utils/__pycache__/config.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0a15400b1a9c03fa359b49925234efdd99f26015 Binary files /dev/null and b/OpenOOD/openood/utils/__pycache__/config.cpython-37.pyc differ diff --git a/OpenOOD/openood/utils/__pycache__/launch.cpython-311.pyc b/OpenOOD/openood/utils/__pycache__/launch.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..91ddf2b9d439b0a205449a0d07fdda5b1c0db27d Binary files /dev/null and b/OpenOOD/openood/utils/__pycache__/launch.cpython-311.pyc differ diff --git a/OpenOOD/openood/utils/__pycache__/launch.cpython-37.pyc b/OpenOOD/openood/utils/__pycache__/launch.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..479d791eac55d350321e5a8c411b77fe5209509b Binary files /dev/null and b/OpenOOD/openood/utils/__pycache__/launch.cpython-37.pyc differ diff --git a/OpenOOD/openood/utils/__pycache__/logger.cpython-311.pyc b/OpenOOD/openood/utils/__pycache__/logger.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0c16569f6abfe812b268fbe4a60719f9641218c2 Binary files /dev/null and b/OpenOOD/openood/utils/__pycache__/logger.cpython-311.pyc differ diff --git a/OpenOOD/openood/utils/__pycache__/logger.cpython-37.pyc b/OpenOOD/openood/utils/__pycache__/logger.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..00b12d7ad156a31e59391c1bc1106d41a5a90ccf Binary files /dev/null and b/OpenOOD/openood/utils/__pycache__/logger.cpython-37.pyc differ diff --git a/OpenOOD/openood/utils/comm.py b/OpenOOD/openood/utils/comm.py new file mode 100644 index 0000000000000000000000000000000000000000..78b3276669a792f0693c336a24a5931a8abae98e --- /dev/null +++ b/OpenOOD/openood/utils/comm.py @@ -0,0 +1,199 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +"""This file contains primitives for multi-gpu communication. + +This is useful when doing distributed training. +""" + +import functools + +import numpy as np +import torch +import torch.distributed as dist + +_LOCAL_PROCESS_GROUP = None +""" +A torch process group which only includes processes +that on the same machine as the current process. +This variable is set when processes are spawned +by `launch()` in "engine/launch.py". +""" + + +def get_world_size() -> int: + if not dist.is_available(): + return 1 + if not dist.is_initialized(): + return 1 + return dist.get_world_size() + + +def get_rank() -> int: + if not dist.is_available(): + return 0 + if not dist.is_initialized(): + return 0 + return dist.get_rank() + + +def get_local_rank() -> int: + """ + Returns: + The rank of the current process + within the local (per-machine) process group. + """ + if not dist.is_available(): + return 0 + if not dist.is_initialized(): + return 0 + assert ( + _LOCAL_PROCESS_GROUP is not None + ), 'Local process group is not created! '\ + 'Please use launch() to spawn processes!' + return dist.get_rank(group=_LOCAL_PROCESS_GROUP) + + +def get_local_size() -> int: + """ + Returns: + The size of the per-machine process group, + i.e. the number of processes per machine. + """ + if not dist.is_available(): + return 1 + if not dist.is_initialized(): + return 1 + return dist.get_world_size(group=_LOCAL_PROCESS_GROUP) + + +def is_main_process() -> bool: + return get_rank() == 0 + + +def synchronize(): + """Helper function to synchronize (barrier) among all processes when using + distributed training.""" + if not dist.is_available(): + return + if not dist.is_initialized(): + return + world_size = dist.get_world_size() + if world_size == 1: + return + if dist.get_backend() == dist.Backend.NCCL: + # This argument is needed to avoid warnings. + # It's valid only for NCCL backend. + dist.barrier(device_ids=[torch.cuda.current_device()]) + else: + dist.barrier() + + +@functools.lru_cache() +def _get_global_gloo_group(): + """Return a process group based on gloo backend, containing all the ranks + The result is cached.""" + if dist.get_backend() == 'nccl': + return dist.new_group(backend='gloo') + else: + return dist.group.WORLD + + +def all_gather(data, group=None): + """Run all_gather on arbitrary picklable data (not necessarily tensors). + + Args: + data: any picklable object + group: a torch process group. By default, will use a group which + contains all ranks on gloo backend. + + Returns: + list[data]: list of data gathered from each rank + """ + if get_world_size() == 1: + return [data] + if group is None: + group = _get_global_gloo_group( + ) # use CPU group by default, to reduce GPU RAM usage. + world_size = dist.get_world_size(group) + if world_size == 1: + return [data] + + output = [None for _ in range(world_size)] + dist.all_gather_object(output, data, group=group) + return output + + +def gather(data, dst=0, group=None): + """Run gather on arbitrary picklable data (not necessarily tensors). + + Args: + data: any picklable object + dst (int): destination rank + group: a torch process group. By default, will use a group which + contains all ranks on gloo backend. + + Returns: + list[data]: on dst, a list of data gathered from each rank. Otherwise, + an empty list. + """ + if get_world_size() == 1: + return [data] + if group is None: + group = _get_global_gloo_group() + world_size = dist.get_world_size(group=group) + if world_size == 1: + return [data] + rank = dist.get_rank(group=group) + + if rank == dst: + output = [None for _ in range(world_size)] + dist.gather_object(data, output, dst=dst, group=group) + return output + else: + dist.gather_object(data, None, dst=dst, group=group) + return [] + + +def shared_random_seed(): + """ + Returns: + int: a random number that is the same across all workers. + If workers need a shared RNG, they can use this shared seed to + create one. + + All workers must call this function, otherwise it will deadlock. + """ + ints = np.random.randint(2**31) + all_ints = all_gather(ints) + return all_ints[0] + + +def reduce_dict(input_dict, average=True): + """Reduce the values in the dictionary from all processes so that process + with rank 0 has the reduced results. + + Args: + input_dict (dict): inputs to be reduced. + All the values must be scalar CUDA Tensor. + average (bool): whether to do average or sum + + Returns: + a dict with the same keys as input_dict, after reduction. + """ + world_size = get_world_size() + if world_size < 2: + return input_dict + with torch.no_grad(): + names = [] + values = [] + # sort the keys so that they are consistent across processes + for k in sorted(input_dict.keys()): + names.append(k) + values.append(input_dict[k]) + values = torch.stack(values, dim=0) + dist.reduce(values, dst=0) + if dist.get_rank() == 0 and average: + # only main process gets accumulated, so only divide by + # world_size in this case + values /= world_size + reduced_dict = {k: v for k, v in zip(names, values)} + return reduced_dict diff --git a/OpenOOD/openood/utils/config.py b/OpenOOD/openood/utils/config.py new file mode 100644 index 0000000000000000000000000000000000000000..2c7d42ac213f82270b8b222fc5171b6492f7f1f0 --- /dev/null +++ b/OpenOOD/openood/utils/config.py @@ -0,0 +1,375 @@ +import argparse +import os +import re + +import yaml + + + + +def setup_config(config_process_order=('merge', 'parse_args', 'parse_refs')): + """Parsing configuration files and command line augments. + + This method reads the command line to + 1. extract and stack YAML config files, + 2. collect modification in command line arguments, + so that the finalized configuration file is generated. + + Note: + The default arguments allow the following equivalent code: + config = merge_configs(*config) + --> merge multiple YAML config files + config.parse_args(unknown_args) + --> use command line arguments to overwrite default settings + config.parse_refs() + --> replace '@{xxx.yyy}'-like values with referenced values + It is recommended to merge before parse_args so that the latter configs + can re-use references in the previous configs. + For example, if + config1.key1 = jkyang + config1.key2 = '@{key1}' + config2.key1 = yzang + config3 = merge_configs(config1, config2) + config3.parse_refs() + then + config3.key2 will be yzang rather than jkyang + + Return: + An object of . + Can be understanded as a dictionary. + """ + + parser = argparse.ArgumentParser() + parser.add_argument('--config', dest='config', nargs='+', required=True) + opt, unknown_args = parser.parse_known_args() + config = [Config(path) for path in opt.config] + + + for process in config_process_order: + if process == 'merge': + config = merge_configs(*config) + elif process == 'parse_args': + if isinstance(config, Config): + config.parse_args(unknown_args, strict=False) + else: + for cfg in config: + cfg.parse_args(unknown_args) + elif process == 'parse_refs': + if isinstance(config, Config): + config.parse_refs() + else: + for cfg in config: + cfg.parse_refs() + else: + raise ValueError('unknown config process name: {}'.format(process)) + + # manually modify 'output_dir' + unknown_args = list_to_args(unknown_args) + + # import pdb + # pdb.set_trace() + + config.output_dir = os.path.join(unknown_args["proj_ROOT"], "results", config.exp_name) + + return config + + +def list_to_args(input_list): + args = {} + for i in range(0, len(input_list), 2): + if i + 1 < len(input_list): + args[input_list[i].lstrip('-')] = input_list[i + 1] + return args + +def parse_config(config): + config_process_order = ('merge', 'parse_refs') + for process in config_process_order: + if process == 'merge': + config = merge_configs(*config) + elif process == 'parse_refs': + if isinstance(config, Config): + config.parse_refs() + else: + for cfg in config: + cfg.parse_refs() + else: + raise ValueError('unknown config process name: {}'.format(process)) + # manually modify 'output_dir' + config.output_dir = os.path.join(config.output_dir, config.exp_name) + + return config + + +class Config(dict): + def __init__(self, *args, **kwargs): + super(Config, self).__init__() + for arg in args: + if arg == ' ': + continue # hard code remove white space in config file list + if isinstance(arg, str): + if arg.endswith('.yml'): + with open(arg, 'r') as f: + raw_dict = yaml.safe_load(f) + else: + raise Exception('unknown file format %s' % arg) + init_assign(self, raw_dict) + elif isinstance(arg, dict): + init_assign(self, arg) + else: + raise TypeError('arg should be an instance of or ') + if kwargs: + init_assign(self, kwargs) + + def __call__(self, *args, **kwargs): + return Config(self, *args, **kwargs) + + def __repr__(self, indent=4, prefix=''): + r = [] + for key, value in sorted(self.items()): + if isinstance(value, Config): + r.append('{}{}:'.format(prefix, key)) + r.append(value.__repr__(indent, prefix + ' ' * indent)) + else: + r.append('{}{}: {}'.format(prefix, key, value)) + return '\n'.join(r) + + def __setstate__(self, state): + init_assign(self, state) + + def __getstate__(self): + d = dict() + for key, value in self.items(): + if type(value) is Config: + value = value.__getstate__() + d[key] = value + return d + + # access by '.' -> access by '[]' + def __getattr__(self, key): + try: + return self[key] + except KeyError: + return None + + def __setattr__(self, key, value): + self[key] = value + + def __delattr__(self, key): + del self[key] + + # access by '[]' + def __getitem__(self, key): + sub_cfg, sub_key = consume_dots(self, key, create_default=False) + return dict.__getitem__(sub_cfg, sub_key) + + def __setitem__(self, key, value): + sub_cfg, sub_key = consume_dots(self, key, create_default=True) + if sub_cfg.__contains__(sub_key) and value == '_DELETE_CONFIG_': + dict.__delitem__(sub_cfg, sub_key) + else: + dict.__setitem__(sub_cfg, sub_key, value) + + def __delitem__(self, key): + sub_cfg, sub_key = consume_dots(self, key, create_default=False) + dict.__delitem__(sub_cfg, sub_key) + + # access by 'in' + def __contains__(self, key): + try: + sub_cfg, sub_key = consume_dots(self, key, create_default=False) + except KeyError: + return False + return dict.__contains__(sub_cfg, sub_key) + + # traverse keys / values/ items + def all_keys(self, only_leaf=True): + for key in traverse_dfs(self, + 'key', + continue_type=Config, + only_leaf=only_leaf): + yield key + + def all_values(self, only_leaf=True): + for value in traverse_dfs(self, + 'value', + continue_type=Config, + only_leaf=only_leaf): + yield value + + def all_items(self, only_leaf=True): + for key, value in traverse_dfs(self, + 'item', + continue_type=Config, + only_leaf=only_leaf): + yield key, value + + # for command line arguments + def parse_args(self, cmd_args=None, strict=True): + unknown_args = [] + if cmd_args is None: + import sys + cmd_args = sys.argv[1:] + index = 0 + while index < len(cmd_args): + arg = cmd_args[index] + err_msg = 'invalid command line argument pattern: %s' % arg + assert arg.startswith('--'), err_msg + assert len(arg) > 2, err_msg + assert arg[2] != '-', err_msg + + arg = arg[2:] + if '=' in arg: + key, full_value_str = arg.split('=') + index += 1 + else: + assert len( + cmd_args) > index + 1, 'incomplete command line arguments' + key = arg + full_value_str = cmd_args[index + 1] + index += 2 + if ':' in full_value_str: + value_str, value_type_str = full_value_str.split(':') + value_type = eval(value_type_str) + else: + value_str = full_value_str + value_type = None + + if key not in self: + if strict: + raise KeyError(key) + else: + unknown_args.extend(['--' + key, full_value_str]) + continue + + if value_type is None: + value_type = type(self[key]) + + if value_type is bool: + self[key] = { + 'true': True, + 'True': True, + '1': True, + 'false': False, + 'False': False, + '0': False, + }[value_str] + else: + self[key] = value_type(value_str) + + return unknown_args + + # for key reference + def parse_refs(self, subconf=None, stack_depth=1, max_stack_depth=10): + if stack_depth > max_stack_depth: + raise Exception( + ('Recursively calling `parse_refs` too many times' + 'with stack depth > {}. ' + 'A circular reference may exists in your config.\n' + 'If deeper calling stack is really needed,' + 'please call `parse_refs` with extra argument like: ' + '`parse_refs(max_stack_depth=9999)`').format(max_stack_depth)) + if subconf is None: + subconf = self + for key in subconf.keys(): + value = subconf[key] + if type(value) is str and '@' in value: + if value.count('@') == 1 and value.startswith( + '@{') and value.endswith('}'): + # pure reference + ref_key = value[2:-1] + ref_value = self[ref_key] + subconf[key] = ref_value + else: + # compositional references + ref_key_list = re.findall("'@{(.+?)}'", value) + ref_key_list = list(set(ref_key_list)) + ref_value_list = [ + self[ref_key] for ref_key in ref_key_list + ] + origin_ref_key_list = [ + "'@{" + ref_key + "}'" for ref_key in ref_key_list + ] + for origin_ref_key, ref_value in zip( + origin_ref_key_list, ref_value_list): + value = value.replace(origin_ref_key, str(ref_value)) + subconf[key] = value + for key in subconf.keys(): + value = subconf[key] + if type(value) is Config: + self.parse_refs(value, stack_depth + 1) + + +def merge_configs(*configs): + final_config = Config() + for i in range(len(configs)): + config = configs[i] + if not isinstance(config, Config): + raise TypeError( + 'config.merge_configs expect `Config` type inputs, ' + 'but got `{}`.\n' + 'Correct usage: merge_configs(config1, config2, ...)\n' + 'Incorrect usage: merge_configs([configs1, configs2, ...])'. + format(type(config))) + final_config = final_config(dict(config.all_items())) + return final_config + + +def consume_dots(config, key, create_default): + sub_keys = key.split('.', 1) + sub_key = sub_keys[0] + + if sub_key in Config.__dict__: + raise KeyError( + '"{}" is a preserved API name, ' + 'which should not be used as normal dictionary key'.format( + sub_key)) + + if not dict.__contains__(config, sub_key) and len(sub_keys) == 2: + if create_default: + dict.__setitem__(config, sub_key, Config()) + else: + raise KeyError(key) + + if len(sub_keys) == 1: + return config, sub_key + else: + sub_config = dict.__getitem__(config, sub_key) + if type(sub_config) != Config: + if create_default: + sub_config = Config() + dict.__setitem__(config, sub_key, sub_config) + else: + raise KeyError(key) + return consume_dots(sub_config, sub_keys[1], create_default) + + +def traverse_dfs(root, mode, continue_type, only_leaf, key_prefix=''): + for key, value in root.items(): + full_key = '.'.join([key_prefix, key]).strip('.') + child_kvs = [] + if type(value) == continue_type: + for kv in traverse_dfs(value, mode, continue_type, only_leaf, + full_key): + child_kvs.append(kv) + # equivalent: + # if not (len(child_kvs) > 0 and + # type(value) == continue_type and + # only_leaf) + if len(child_kvs + ) == 0 or type(value) != continue_type or not only_leaf: + yield { + 'key': full_key, + 'value': value, + 'item': (full_key, value) + }[mode] + for kv in child_kvs: + yield kv + + +def init_assign(config, d): + for full_key, value in traverse_dfs(d, + 'item', + continue_type=dict, + only_leaf=True): + sub_cfg, sub_key = consume_dots(config, full_key, create_default=True) + sub_cfg[sub_key] = value diff --git a/OpenOOD/openood/utils/launch.py b/OpenOOD/openood/utils/launch.py new file mode 100644 index 0000000000000000000000000000000000000000..d8c4680bee71ccbcb8272d99918d6bd009d13c24 --- /dev/null +++ b/OpenOOD/openood/utils/launch.py @@ -0,0 +1,132 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import logging +from datetime import timedelta + +import torch +import torch.distributed as dist +import torch.multiprocessing as mp + +from openood.utils import comm + +__all__ = ['DEFAULT_TIMEOUT', 'launch'] + +DEFAULT_TIMEOUT = timedelta(minutes=30) + + +def _find_free_port(): + import socket + + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + # Binding to port 0 will cause the OS to find an available port for us + sock.bind(('', 0)) + port = sock.getsockname()[1] + sock.close() + # NOTE: there is still a chance the port could be taken by other processes. + return port + + +def launch( + main_func, + num_gpus_per_machine, + num_machines=1, + machine_rank=0, + dist_url=None, + args=(), + timeout=DEFAULT_TIMEOUT, +): + """Launch multi-gpu or distributed training. This function must be called + on all machines involved in the training. It will spawn child processes + (defined by ``num_gpus_per_machine``) on each machine. + + Args: + main_func: a function that will be called by `main_func(*args)` + num_gpus_per_machine (int): number of GPUs per machine + num_machines (int): the total number of machines + machine_rank (int): the rank of this machine + dist_url (str): url to connect to for distributed jobs, + including protocol e.g. "tcp://127.0.0.1:8686". + Can be set to "auto" to automatically select a free port on localhost + timeout (timedelta): timeout of the distributed workers + args (tuple): arguments passed to main_func + """ + world_size = num_machines * num_gpus_per_machine + if world_size > 1: + # https://github.com/pytorch/pytorch/pull/14391 + # TODO prctl in spawned processes + + if dist_url == 'auto': + assert num_machines == 1, \ + 'dist_url=auto not supported in multi-machine jobs.' + port = _find_free_port() + dist_url = f'tcp://127.0.0.1:{port}' + if num_machines > 1 and dist_url.startswith('file://'): + logger = logging.getLogger(__name__) + logger.warning( + 'file:// is not a reliable init_method in multi-machine jobs.' + 'Prefer tcp://' + ) + + mp.spawn( + _distributed_worker, + nprocs=num_gpus_per_machine, + args=( + main_func, + world_size, + num_gpus_per_machine, + machine_rank, + dist_url, + args, + timeout, + ), + daemon=False, + ) + else: + main_func(*args) + + +def _distributed_worker( + local_rank, + main_func, + world_size, + num_gpus_per_machine, + machine_rank, + dist_url, + args, + timeout=DEFAULT_TIMEOUT, +): + assert torch.cuda.is_available( + ), 'cuda is not available. Please check your installation.' + global_rank = machine_rank * num_gpus_per_machine + local_rank + try: + dist.init_process_group( + backend='NCCL', + init_method=dist_url, + world_size=world_size, + rank=global_rank, + timeout=timeout, + ) + except Exception as e: + logger = logging.getLogger(__name__) + logger.error('Process group URL: {}'.format(dist_url)) + raise e + + # Setup the local process group + # (which contains ranks within the same machine) + assert comm._LOCAL_PROCESS_GROUP is None + num_machines = world_size // num_gpus_per_machine + for i in range(num_machines): + ranks_on_i = list( + range(i * num_gpus_per_machine, (i + 1) * num_gpus_per_machine)) + pg = dist.new_group(ranks_on_i) + if i == machine_rank: + comm._LOCAL_PROCESS_GROUP = pg + + assert num_gpus_per_machine <= torch.cuda.device_count() + torch.cuda.set_device(local_rank) + + # synchronize is needed here to prevent a possible timeout + # after calling init_process_group + # See: https://github.com/facebookresearch/maskrcnn-benchmark/issues/172 + comm.synchronize() + + main_func(*args) diff --git a/OpenOOD/openood/utils/logger.py b/OpenOOD/openood/utils/logger.py new file mode 100644 index 0000000000000000000000000000000000000000..a1320c0ae743859143ac9d74cff498886a92f3dc --- /dev/null +++ b/OpenOOD/openood/utils/logger.py @@ -0,0 +1,125 @@ +import errno +import os +import os.path as osp +import sys + +import yaml + +import openood.utils.comm as comm + + +def mkdir_if_missing(dirname): + """Create dirname if it is missing.""" + if not osp.exists(dirname): + try: + os.makedirs(dirname) + except OSError as e: + if e.errno != errno.EEXIST: + raise + + +class Logger: + """Write console output to external text file. + + Imported from + `` + + Args: + fpath (str): directory to save logging file. + + Examples: + >>> import sys + >>> import os.path as osp + >>> save_dir = 'output/experiment-1' + >>> log_name = 'train.log' + >>> sys.stdout = Logger(osp.join(save_dir, log_name)) + """ + def __init__(self, fpath=None): + self.console = sys.stdout + self.file = None + if fpath is not None: + mkdir_if_missing(osp.dirname(fpath)) + self.file = open(fpath, 'w') + + def __del__(self): + self.close() + + def __enter__(self): + pass + + def __exit__(self, *args): + self.close() + + def write(self, msg): + self.console.write(msg) + if self.file is not None: + self.file.write(msg) + + def flush(self): + self.console.flush() + if self.file is not None: + self.file.flush() + os.fsync(self.file.fileno()) + + def close(self): + self.console.close() + if self.file is not None: + self.file.close() + + +def setup_logger(config): + """generate exp directory to save configs, logger, checkpoints, etc. + + Args: + config: all configs of the experiment + """ + print('------------------ Config --------------------------', flush=True) + print(config, flush=True) + print(u'\u2500' * 70, flush=True) + + output = config.output_dir + + if config.save_output and comm.is_main_process(): + print('Output dir: {}'.format(output), flush=True) + if osp.isdir(output): + if config.merge_option == 'default': + ans = input('Exp dir already exists, merge it? (y/n)') + if ans in ['yes', 'Yes', 'YES', 'y', 'Y', 'can']: + save_logger(config, output) + elif ans in ['no', 'No', 'NO', 'n', 'N']: + print('Quitting the process...', flush=True) + quit() + else: + raise ValueError('Unexpected Input.') + elif config.merge_option == 'merge': + save_logger(config, output) + elif config.merge_option == 'pass': + if os.path.exists(os.path.join(config.save_output, 'ood.csv')): + print('Exp dir already exists, quitting the process...', + flush=True) + quit() + else: + save_logger(config, output) + else: + save_logger(config, output) + else: + print('No output directory.', flush=True) + + comm.synchronize() + + +def save_logger(config, output): + print('Output directory path: {}'.format(output), flush=True) + os.makedirs(output, exist_ok=True) + # Save config + # FIXME: saved config file is not beautified. + config_save_path = osp.join(output, 'config.yml') + with open(config_save_path, 'w') as f: + yaml.dump(config, + f, + default_flow_style=False, + sort_keys=False, + indent=2) + # save log file + fpath = osp.join(output, 'log.txt') + sys.stdout = Logger(fpath) diff --git a/OpenOOD/output.txt b/OpenOOD/output.txt new file mode 100644 index 0000000000000000000000000000000000000000..6d4f8bb7a310eebec8d347a9038010ab6f7c3147 --- /dev/null +++ b/OpenOOD/output.txt @@ -0,0 +1,52 @@ +==> Building model.. +Postprocessor_name is ignored because postprocessor is passed +Performing inference on bronze2 test set... +Processing near ood... +Performing inference on imagenet22k_container dataset... +Computing metrics on imagenet22k_container dataset... +FPR@95: 48.49, AUROC: 82.61 AUPR_IN: 43.38, AUPR_OUT: 98.88 +────────────────────────────────────────────────────────────────────── + +Performing inference on ssb_hard dataset... +Computing metrics on ssb_hard dataset... +FPR@95: 48.10, AUROC: 80.16 AUPR_IN: 49.74, AUPR_OUT: 97.79 +────────────────────────────────────────────────────────────────────── + +Performing inference on ninco dataset... +Computing metrics on ninco dataset... +FPR@95: 57.75, AUROC: 76.60 AUPR_IN: 71.68, AUPR_OUT: 82.72 +────────────────────────────────────────────────────────────────────── + +Computing mean metrics... +FPR@95: 51.45, AUROC: 79.79 AUPR_IN: 54.93, AUPR_OUT: 93.13 +────────────────────────────────────────────────────────────────────── + +Processing far ood... +Performing inference on inaturalist dataset... +Computing metrics on inaturalist dataset... +FPR@95: 50.19, AUROC: 76.31 AUPR_IN: 67.05, AUPR_OUT: 86.42 +────────────────────────────────────────────────────────────────────── + +Performing inference on textures dataset... +Computing metrics on textures dataset... +FPR@95: 62.99, AUROC: 76.62 AUPR_IN: 71.68, AUPR_OUT: 81.33 +────────────────────────────────────────────────────────────────────── + +Performing inference on openimage_o dataset... +Computing metrics on openimage_o dataset... +FPR@95: 53.44, AUROC: 77.72 AUPR_IN: 59.19, AUPR_OUT: 92.63 +────────────────────────────────────────────────────────────────────── + +Computing mean metrics... +FPR@95: 55.54, AUROC: 76.88 AUPR_IN: 65.98, AUPR_OUT: 86.80 +────────────────────────────────────────────────────────────────────── + + FPR@95 AUROC AUPR_IN AUPR_OUT ACC +imagenet22k_container 48.49 82.61 43.38 98.88 74.76 +ssb_hard 48.10 80.16 49.74 97.79 74.76 +ninco 57.75 76.60 71.68 82.72 74.76 +nearood 51.45 79.79 54.93 93.13 74.76 +inaturalist 50.19 76.31 67.05 86.42 74.76 +textures 62.99 76.62 71.68 81.33 74.76 +openimage_o 53.44 77.72 59.19 92.63 74.76 +farood 55.54 76.88 65.98 86.80 74.76 diff --git a/OpenOOD/output2.txt b/OpenOOD/output2.txt new file mode 100644 index 0000000000000000000000000000000000000000..f723545c49bc6c3f7e056b1334df22c90ceca0d6 --- /dev/null +++ b/OpenOOD/output2.txt @@ -0,0 +1,212 @@ +None +['main.py', '--config', 'configs/datasets/bronze2/bronze2.yml', 'configs/datasets/bronze2/bronze2_ood.yml', 'configs/networks/opengan.yml', 'configs/pipelines/test/test_opengan.yml', 'configs/preprocessors/base_preprocessor.yml', 'configs/postprocessors/opengan.yml', '--num_workers', '8', '--network.backbone.name', 'OursBronze2', '--network.backbone.pretrained', 'True', '--network.backbone.checkpoint', './results/bronze2_ours_resnet50_415_NotLine_train/s0/model_state_dict_epoch90.pth', '--evaluator.ood_scheme', 'ood', '--seed', '0', '--proj_ROOT', '/home/zhourixin/OOD_Folder/CODE/other_methods/openOOD_code/OpenOOD'] +------------------ Config -------------------------- +dataset: + image_size: 400 + interpolation: bilinear + name: bronze2 + normalization_type: imagenet + num_classes: 11 + num_gpus: 1 + num_machines: 1 + num_workers: 8 + pre_size: 420 + split_names: ['train', 'val', 'test'] + test: + batch_size: 128 + data_dir: /data/bronze_ID_and_OOD/bronze2NotLine/image_not_line + dataset_class: Bronze2ExcelDataset + imglist_pth: /data/bronze_ID_and_OOD/bronze2NotLine/not_line_ding_gui_train_val_test/ding_gui_not_line_test.xlsx + shuffle: False + xml_path: /data/bronze_ID_and_OOD/bronze2NotLine/xmls + train: + batch_size: 128 + data_dir: /data/bronze_ID_and_OOD/bronze2NotLine/image_not_line + dataset_class: Bronze2ExcelDataset + imglist_pth: /data/bronze_ID_and_OOD/bronze2NotLine/not_line_ding_gui_train_val_test/ding_gui_not_line_train.xlsx + shuffle: True + xml_path: /data/bronze_ID_and_OOD/bronze2NotLine/xmls + val: + batch_size: 128 + data_dir: /data/bronze_ID_and_OOD/bronze2NotLine/image_not_line + dataset_class: Bronze2ExcelDataset + imglist_pth: /data/bronze_ID_and_OOD/bronze2NotLine/not_line_ding_gui_train_val_test/ding_gui_not_line_val.xlsx + shuffle: False + xml_path: /data/bronze_ID_and_OOD/bronze2NotLine/xmls +evaluator: + name: ood + ood_scheme: ood +exp_name: bronze2_opengan_test_ood_ood_opengan_default/s0/ood +machine_rank: 0 +mark: default +merge_option: default +network: + backbone: + checkpoint: ./results/bronze2_ours_resnet50_415_NotLine_train/s0/model_state_dict_epoch90.pth + image_size: 400 + name: OursBronze2 + num_classes: 11 + num_gpus: 1 + pretrained: True + checkpoint: ['/home/zhourixin/OOD_Folder/CODE/other_methods/openOOD_code/OpenOOD/results/bronze2_opengan_opengan_e90_lr0.0001_default/s0/best_GNet.ckpt', '/home/zhourixin/OOD_Folder/CODE/other_methods/openOOD_code/OpenOOD/results/bronze2_opengan_opengan_e90_lr0.0001_default/s0/best_DNet.ckpt', None] + image_size: 400 + name: opengan + nc: 512 + ndf: 64 + ngf: 64 + num_classes: 11 + num_gpus: 1 + nz: 100 + pretrained: True +num_gpus: 1 +num_machines: 1 +num_workers: 8 +ood_dataset: + batch_size: 32 + dataset_class: ImglistDataset + farood: + datasets: ['inaturalist', 'textures', 'openimageo'] + inaturalist: + data_dir: ./data/images_largescale + imglist_pth: /home/zhourixin/OOD_Folder/CODE/other_methods/openOOD_code/OpenOOD/data/benchmark_imglist/imagenet/test_inaturalist.txt + openimageo: + data_dir: ./data/images_largescale + imglist_pth: /home/zhourixin/OOD_Folder/CODE/other_methods/openOOD_code/OpenOOD/data/benchmark_imglist/imagenet/test_openimage_o.txt + textures: + data_dir: ./data/images_classic + imglist_pth: /home/zhourixin/OOD_Folder/CODE/other_methods/openOOD_code/OpenOOD/data/benchmark_imglist/imagenet/test_textures.txt + image_size: 224 + interpolation: bilinear + midood: + datasets: ['ssb_hard', 'ninco'] + ninco: + data_dir: ./data/images_largescale + imglist_pth: /home/zhourixin/OOD_Folder/CODE/other_methods/openOOD_code/OpenOOD/data/benchmark_imglist/imagenet/test_ninco.txt + ssb_hard: + data_dir: ./data/images_largescale + imglist_pth: /home/zhourixin/OOD_Folder/CODE/other_methods/openOOD_code/OpenOOD/data/benchmark_imglist/imagenet/test_ssb_hard.txt + name: bronze2_ood + nearood: + bronzeM_containerS: + data_dir: ./data/images_largescale + imglist_pth: /home/zhourixin/OOD_Folder/CODE/other_methods/openOOD_code/OpenOOD/data/images_largescale/transfer_dataset/container_structure_bronze_material/container_structure_bronze_material_test.txt + bronzeS_containerM: + data_dir: ./data/images_largescale + imglist_pth: /home/zhourixin/OOD_Folder/CODE/other_methods/openOOD_code/OpenOOD/data/images_largescale/transfer_dataset/bronze_structure_container_material/bronze_structure_container_material_test.txt + bronze_Line: + data_dir: ./data/images_largescale + imglist_pth: /home/zhourixin/OOD_Folder/CODE/other_methods/openOOD_code/OpenOOD/data/images_largescale/bronze_line/bronze2_Line_OOD_list.txt + datasets: ['imagenet21k_container', 'imagenet21k_container_refine', 'bronzeS_containerM', 'bronzeM_containerS', 'bronze_Line'] + imagenet21k_container: + data_dir: ./data/images_largescale + imglist_pth: /home/zhourixin/OOD_Folder/CODE/other_methods/openOOD_code/OpenOOD/data/benchmark_imglist/imagenet21k_container/imagenet21k_container_file-list.txt + imagenet21k_container_refine: + data_dir: ./data/images_largescale + imglist_pth: /home/zhourixin/OOD_Folder/CODE/other_methods/openOOD_code/OpenOOD/data/benchmark_imglist/imagenet21k_container_refine/imagenet21k_container_file-list-refine.txt + num_classes: 11 + num_gpus: 1 + num_machines: 1 + num_workers: 8 + pre_size: 256 + shuffle: False + split_names: ['val', 'nearood', 'midood', 'farood'] + val: + data_dir: /home/zhourixin/OOD_Folder/CODE/other_methods/openOOD_code/OpenOOD/data/images_largescale/ + imglist_pth: /home/zhourixin/OOD_Folder/CODE/other_methods/openOOD_code/OpenOOD/data/benchmark_imglist/imagenet/val_openimage_o.txt +output_dir: /home/zhourixin/OOD_Folder/CODE/other_methods/openOOD_code/OpenOOD/results/bronze2_opengan_test_ood_ood_opengan_default/s0/ood +pipeline: + name: test_ood +postprocessor: + APS_mode: False + name: opengan +preprocessor: + name: base +recorder: + save_csv: True + save_scores: True +save_output: True +seed: 0 +────────────────────────────────────────────────────────────────────── +Output dir: /home/zhourixin/OOD_Folder/CODE/other_methods/openOOD_code/OpenOOD/results/bronze2_opengan_test_ood_ood_opengan_default/s0/ood +Output directory path: /home/zhourixin/OOD_Folder/CODE/other_methods/openOOD_code/OpenOOD/results/bronze2_opengan_test_ood_ood_opengan_default/s0/ood +Model Loading OursBronze2 Completed! +Model Loading opengan Completed! + + +────────────────────────────────────────────────────────────────────── + +Start evaluation... + +Accuracy 77.25% +────────────────────────────────────────────────────────────────────── +Performing inference on bronze2 dataset... +────────────────────────────────────────────────────────────────────── +Processing nearood... +Performing inference on imagenet21k_container dataset... +Computing metrics on imagenet21k_container dataset... +FPR@95: 42.59, AUROC: 78.24 AUPR_IN: 45.59, AUPR_OUT: 97.96 +ACC: 77.25 +────────────────────────────────────────────────────────────────────── +Performing inference on imagenet21k_container_refine dataset... +Computing metrics on imagenet21k_container_refine dataset... +FPR@95: 43.36, AUROC: 78.25 AUPR_IN: 50.35, AUPR_OUT: 96.96 +ACC: 77.25 +────────────────────────────────────────────────────────────────────── +Performing inference on bronzeS_containerM dataset... +Computing metrics on bronzeS_containerM dataset... +FPR@95: 79.18, AUROC: 58.59 AUPR_IN: 13.29, AUPR_OUT: 94.57 +ACC: 77.25 +────────────────────────────────────────────────────────────────────── +Performing inference on bronzeM_containerS dataset... +Computing metrics on bronzeM_containerS dataset... +FPR@95: 45.50, AUROC: 78.24 AUPR_IN: 46.28, AUPR_OUT: 97.25 +ACC: 77.25 +────────────────────────────────────────────────────────────────────── +Performing inference on bronze_Line dataset... +Computing metrics on bronze_Line dataset... +FPR@95: 50.44, AUROC: 81.32 AUPR_IN: 96.28, AUPR_OUT: 37.85 +ACC: 77.25 +────────────────────────────────────────────────────────────────────── +Computing mean metrics... +FPR@95: 52.21, AUROC: 74.93 AUPR_IN: 50.36, AUPR_OUT: 84.92 +ACC: 77.25 +────────────────────────────────────────────────────────────────────── +────────────────────────────────────────────────────────────────────── +Processing midood... +Performing inference on ssb_hard dataset... +Computing metrics on ssb_hard dataset... +FPR@95: 45.74, AUROC: 72.27 AUPR_IN: 49.79, AUPR_OUT: 95.52 +ACC: 77.25 +────────────────────────────────────────────────────────────────────── +Performing inference on ninco dataset... +Computing metrics on ninco dataset... +FPR@95: 48.55, AUROC: 74.51 AUPR_IN: 71.97, AUPR_OUT: 77.21 +ACC: 77.25 +────────────────────────────────────────────────────────────────────── +Computing mean metrics... +FPR@95: 47.14, AUROC: 73.39 AUPR_IN: 60.88, AUPR_OUT: 86.37 +ACC: 77.25 +────────────────────────────────────────────────────────────────────── +────────────────────────────────────────────────────────────────────── +Processing farood... +Performing inference on inaturalist dataset... +Computing metrics on inaturalist dataset... +FPR@95: 39.33, AUROC: 81.56 AUPR_IN: 73.59, AUPR_OUT: 88.53 +ACC: 77.25 +────────────────────────────────────────────────────────────────────── +Performing inference on textures dataset... +Computing metrics on textures dataset... +FPR@95: 71.89, AUROC: 73.95 AUPR_IN: 63.27, AUPR_OUT: 79.95 +ACC: 77.25 +────────────────────────────────────────────────────────────────────── +Performing inference on openimageo dataset... +Computing metrics on openimageo dataset... +FPR@95: 45.78, AUROC: 76.90 AUPR_IN: 60.92, AUPR_OUT: 90.75 +ACC: 77.25 +────────────────────────────────────────────────────────────────────── +Computing mean metrics... +FPR@95: 52.33, AUROC: 77.47 AUPR_IN: 65.93, AUPR_OUT: 86.41 +ACC: 77.25 +────────────────────────────────────────────────────────────────────── +Time used for eval_ood: 2829s +Completed! diff --git a/OpenOOD/output_OOD_post_method.txt b/OpenOOD/output_OOD_post_method.txt new file mode 100644 index 0000000000000000000000000000000000000000..aff8ea4990368e8cac2fd9fc382ffa1a11902070 --- /dev/null +++ b/OpenOOD/output_OOD_post_method.txt @@ -0,0 +1,1050 @@ +Postprocessor_name is ignored because postprocessor is passed +Loaded pre-computed scores from file. +Processing near ood... +Inference has been performed on imagenet22k_container dataset... +Computing metrics on imagenet22k_container dataset... +FPR@95: 68.97, AUROC: 71.35 AUPR_IN: 13.53, AUPR_OUT: 97.54 +────────────────────────────────────────────────────────────────────── + +Inference has been performed on imagenet22k_container_refine dataset... +Computing metrics on imagenet22k_container_refine dataset... +FPR@95: 70.03, AUROC: 69.85 AUPR_IN: 17.51, AUPR_OUT: 96.16 +────────────────────────────────────────────────────────────────────── + +Inference has been performed on bronzeS_containerM dataset... +Computing metrics on bronzeS_containerM dataset... +FPR@95: 73.41, AUROC: 69.93 AUPR_IN: 19.26, AUPR_OUT: 96.32 +────────────────────────────────────────────────────────────────────── + +Inference has been performed on bronzeM_containerS dataset... +Computing metrics on bronzeM_containerS dataset... +FPR@95: 70.00, AUROC: 72.64 AUPR_IN: 21.46, AUPR_OUT: 96.73 +────────────────────────────────────────────────────────────────────── + +Inference has been performed on bronze_Line dataset... +Computing metrics on bronze_Line dataset... +FPR@95: 89.84, AUROC: 62.33 AUPR_IN: 90.49, AUPR_OUT: 21.62 +────────────────────────────────────────────────────────────────────── + +Computing mean metrics... +FPR@95: 74.45, AUROC: 69.22 AUPR_IN: 32.45, AUPR_OUT: 81.67 +────────────────────────────────────────────────────────────────────── + +Processing mid ood... +Performing inference on ssb_hard dataset... +Computing metrics on ssb_hard dataset... +FPR@95: 55.50, AUROC: 79.80 AUPR_IN: 34.14, AUPR_OUT: 97.12 +────────────────────────────────────────────────────────────────────── + +Performing inference on ninco dataset... +Computing metrics on ninco dataset... +FPR@95: 64.86, AUROC: 76.79 AUPR_IN: 66.04, AUPR_OUT: 80.93 +────────────────────────────────────────────────────────────────────── + +Computing mean metrics... +FPR@95: 60.18, AUROC: 78.30 AUPR_IN: 50.09, AUPR_OUT: 89.02 +────────────────────────────────────────────────────────────────────── + +Processing far ood... +Inference has been performed on inaturalist dataset... +Computing metrics on inaturalist dataset... +FPR@95: 65.24, AUROC: 69.78 AUPR_IN: 54.39, AUPR_OUT: 84.24 +────────────────────────────────────────────────────────────────────── + +Inference has been performed on textures dataset... +Computing metrics on textures dataset... +FPR@95: 74.60, AUROC: 71.81 AUPR_IN: 61.78, AUPR_OUT: 78.02 +────────────────────────────────────────────────────────────────────── + +Inference has been performed on openimage_o dataset... +Computing metrics on openimage_o dataset... +FPR@95: 64.69, AUROC: 74.91 AUPR_IN: 46.34, AUPR_OUT: 91.40 +────────────────────────────────────────────────────────────────────── + +Computing mean metrics... +FPR@95: 68.18, AUROC: 72.17 AUPR_IN: 54.17, AUPR_OUT: 84.55 +────────────────────────────────────────────────────────────────────── + + FPR@95 AUROC AUPR_IN AUPR_OUT ACC +imagenet22k_container 68.97 71.35 13.53 97.54 77.20 +imagenet22k_container_refine 70.03 69.85 17.51 96.16 77.20 +bronzeS_containerM 73.41 69.93 19.26 96.32 77.20 +bronzeM_containerS 70.00 72.64 21.46 96.73 77.20 +bronze_Line 89.84 62.33 90.49 21.62 77.20 +nearood 74.45 69.22 32.45 81.67 77.20 +ssb_hard 55.50 79.80 34.14 97.12 77.20 +ninco 64.86 76.79 66.04 80.93 77.20 +midood 60.18 78.30 50.09 89.02 77.20 +inaturalist 65.24 69.78 54.39 84.24 77.20 +textures 74.60 71.81 61.78 78.02 77.20 +openimage_o 64.69 74.91 46.34 91.40 77.20 +farood 68.18 72.17 54.17 84.55 77.20 +Loaded pre-computed scores from file. +Processing near ood... +Inference has been performed on imagenet22k_container dataset... +Computing metrics on imagenet22k_container dataset... +FPR@95: 67.49, AUROC: 73.23 AUPR_IN: 14.91, AUPR_OUT: 98.12 +────────────────────────────────────────────────────────────────────── + +Inference has been performed on imagenet22k_container_refine dataset... +Computing metrics on imagenet22k_container_refine dataset... +FPR@95: 68.55, AUROC: 72.17 AUPR_IN: 19.22, AUPR_OUT: 97.02 +────────────────────────────────────────────────────────────────────── + +Inference has been performed on bronzeS_containerM dataset... +Computing metrics on bronzeS_containerM dataset... +FPR@95: 72.15, AUROC: 71.39 AUPR_IN: 21.09, AUPR_OUT: 96.96 +────────────────────────────────────────────────────────────────────── + +Inference has been performed on bronzeM_containerS dataset... +Computing metrics on bronzeM_containerS dataset... +FPR@95: 68.55, AUROC: 71.57 AUPR_IN: 22.73, AUPR_OUT: 96.97 +────────────────────────────────────────────────────────────────────── + +Inference has been performed on bronze_Line dataset... +Computing metrics on bronze_Line dataset... +FPR@95: 88.78, AUROC: 60.35 AUPR_IN: 90.35, AUPR_OUT: 17.33 +────────────────────────────────────────────────────────────────────── + +Computing mean metrics... +FPR@95: 73.11, AUROC: 69.75 AUPR_IN: 33.66, AUPR_OUT: 81.28 +────────────────────────────────────────────────────────────────────── + +Processing mid ood... +Performing inference on ssb_hard dataset... +Computing metrics on ssb_hard dataset... +FPR@95: 54.57, AUROC: 80.32 AUPR_IN: 35.50, AUPR_OUT: 97.99 +────────────────────────────────────────────────────────────────────── + +Performing inference on ninco dataset... +Computing metrics on ninco dataset... +FPR@95: 63.76, AUROC: 76.17 AUPR_IN: 66.48, AUPR_OUT: 82.80 +────────────────────────────────────────────────────────────────────── + +Computing mean metrics... +FPR@95: 59.16, AUROC: 78.25 AUPR_IN: 50.99, AUPR_OUT: 90.39 +────────────────────────────────────────────────────────────────────── + +Processing far ood... +Inference has been performed on inaturalist dataset... +Computing metrics on inaturalist dataset... +FPR@95: 64.02, AUROC: 69.84 AUPR_IN: 55.59, AUPR_OUT: 84.07 +────────────────────────────────────────────────────────────────────── + +Inference has been performed on textures dataset... +Computing metrics on textures dataset... +FPR@95: 75.56, AUROC: 71.23 AUPR_IN: 60.52, AUPR_OUT: 76.26 +────────────────────────────────────────────────────────────────────── + +Inference has been performed on openimage_o dataset... +Computing metrics on openimage_o dataset... +FPR@95: 63.73, AUROC: 74.39 AUPR_IN: 47.20, AUPR_OUT: 91.77 +────────────────────────────────────────────────────────────────────── + +Computing mean metrics... +FPR@95: 67.77, AUROC: 71.82 AUPR_IN: 54.44, AUPR_OUT: 84.03 +────────────────────────────────────────────────────────────────────── + + FPR@95 AUROC AUPR_IN AUPR_OUT ACC +imagenet22k_container 67.49 73.23 14.91 98.12 77.20 +imagenet22k_container_refine 68.55 72.17 19.22 97.02 77.20 +bronzeS_containerM 72.15 71.39 21.09 96.96 77.20 +bronzeM_containerS 68.55 71.57 22.73 96.97 77.20 +bronze_Line 88.78 60.35 90.35 17.33 77.20 +nearood 73.11 69.75 33.66 81.28 77.20 +ssb_hard 54.57 80.32 35.50 97.99 77.20 +ninco 63.76 76.17 66.48 82.80 77.20 +midood 59.16 78.25 50.99 90.39 77.20 +inaturalist 64.02 69.84 55.59 84.07 77.20 +textures 75.56 71.23 60.52 76.26 77.20 +openimage_o 63.73 74.39 47.20 91.77 77.20 +farood 67.77 71.82 54.44 84.03 77.20 +Postprocessor_name is ignored because postprocessor is passed +Loaded pre-computed scores from file. +Processing near ood... +Inference has been performed on imagenet22k_container dataset... +Computing metrics on imagenet22k_container dataset... +FPR@95: 88.17, AUROC: 81.03 AUPR_IN: 10.63, AUPR_OUT: 98.97 +────────────────────────────────────────────────────────────────────── + +Inference has been performed on imagenet22k_container_refine dataset... +Computing metrics on imagenet22k_container_refine dataset... +FPR@95: 88.52, AUROC: 82.01 AUPR_IN: 15.52, AUPR_OUT: 98.60 +────────────────────────────────────────────────────────────────────── + +Inference has been performed on bronzeS_containerM dataset... +Computing metrics on bronzeS_containerM dataset... +FPR@95: 80.87, AUROC: 72.90 AUPR_IN: 15.47, AUPR_OUT: 97.14 +────────────────────────────────────────────────────────────────────── + +Inference has been performed on bronzeM_containerS dataset... +Computing metrics on bronzeM_containerS dataset... +FPR@95: 77.52, AUROC: 81.14 AUPR_IN: 19.07, AUPR_OUT: 98.39 +────────────────────────────────────────────────────────────────────── + +Inference has been performed on bronze_Line dataset... +Computing metrics on bronze_Line dataset... +FPR@95: 55.34, AUROC: 85.02 AUPR_IN: 96.97, AUPR_OUT: 50.47 +────────────────────────────────────────────────────────────────────── + +Computing mean metrics... +FPR@95: 78.08, AUROC: 80.42 AUPR_IN: 31.53, AUPR_OUT: 88.71 +────────────────────────────────────────────────────────────────────── + +Processing mid ood... +Performing inference on ssb_hard dataset... +Computing metrics on ssb_hard dataset... +FPR@95: 80.48, AUROC: 82.88 AUPR_IN: 19.58, AUPR_OUT: 98.54 +────────────────────────────────────────────────────────────────────── + +Performing inference on ninco dataset... +Computing metrics on ninco dataset... +FPR@95: 94.50, AUROC: 74.69 AUPR_IN: 49.87, AUPR_OUT: 86.41 +────────────────────────────────────────────────────────────────────── + +Computing mean metrics... +FPR@95: 87.49, AUROC: 78.78 AUPR_IN: 34.73, AUPR_OUT: 92.48 +────────────────────────────────────────────────────────────────────── + +Processing far ood... +Inference has been performed on inaturalist dataset... +Computing metrics on inaturalist dataset... +FPR@95: 79.61, AUROC: 81.23 AUPR_IN: 54.15, AUPR_OUT: 90.81 +────────────────────────────────────────────────────────────────────── + +Inference has been performed on textures dataset... +Computing metrics on textures dataset... +FPR@95: 96.40, AUROC: 71.88 AUPR_IN: 49.39, AUPR_OUT: 83.23 +────────────────────────────────────────────────────────────────────── + +Inference has been performed on openimage_o dataset... +Computing metrics on openimage_o dataset... +FPR@95: 87.91, AUROC: 79.27 AUPR_IN: 35.22, AUPR_OUT: 94.71 +────────────────────────────────────────────────────────────────────── + +Computing mean metrics... +FPR@95: 87.97, AUROC: 77.46 AUPR_IN: 46.25, AUPR_OUT: 89.58 +────────────────────────────────────────────────────────────────────── + + FPR@95 AUROC AUPR_IN AUPR_OUT ACC +imagenet22k_container 88.17 81.03 10.63 98.97 77.20 +imagenet22k_container_refine 88.52 82.01 15.52 98.60 77.20 +bronzeS_containerM 80.87 72.90 15.47 97.14 77.20 +bronzeM_containerS 77.52 81.14 19.07 98.39 77.20 +bronze_Line 55.34 85.02 96.97 50.47 77.20 +nearood 78.08 80.42 31.53 88.71 77.20 +ssb_hard 80.48 82.88 19.58 98.54 77.20 +ninco 94.50 74.69 49.87 86.41 77.20 +midood 87.49 78.78 34.73 92.48 77.20 +inaturalist 79.61 81.23 54.15 90.81 77.20 +textures 96.40 71.88 49.39 83.23 77.20 +openimage_o 87.91 79.27 35.22 94.71 77.20 +farood 87.97 77.46 46.25 89.58 77.20 +Postprocessor_name is ignored because postprocessor is passed +Loaded pre-computed scores from file. +Processing near ood... +Inference has been performed on imagenet22k_container dataset... +Computing metrics on imagenet22k_container dataset... +FPR@95: 14.05, AUROC: 95.47 AUPR_IN: 83.16, AUPR_OUT: 99.71 +────────────────────────────────────────────────────────────────────── + +Inference has been performed on imagenet22k_container_refine dataset... +Computing metrics on imagenet22k_container_refine dataset... +FPR@95: 14.95, AUROC: 95.17 AUPR_IN: 84.61, AUPR_OUT: 99.52 +────────────────────────────────────────────────────────────────────── + +Inference has been performed on bronzeS_containerM dataset... +Computing metrics on bronzeS_containerM dataset... +FPR@95: 36.72, AUROC: 87.03 AUPR_IN: 61.03, AUPR_OUT: 98.52 +────────────────────────────────────────────────────────────────────── + +Inference has been performed on bronzeM_containerS dataset... +Computing metrics on bronzeM_containerS dataset... +FPR@95: 22.44, AUROC: 92.20 AUPR_IN: 75.85, AUPR_OUT: 99.19 +────────────────────────────────────────────────────────────────────── + +Inference has been performed on bronze_Line dataset... +Computing metrics on bronze_Line dataset... +FPR@95: 93.73, AUROC: 57.11 AUPR_IN: 88.52, AUPR_OUT: 15.44 +────────────────────────────────────────────────────────────────────── + +Computing mean metrics... +FPR@95: 36.38, AUROC: 85.39 AUPR_IN: 78.63, AUPR_OUT: 82.48 +────────────────────────────────────────────────────────────────────── + +Processing mid ood... +Performing inference on ssb_hard dataset... +Computing metrics on ssb_hard dataset... +FPR@95: 10.03, AUROC: 96.95 AUPR_IN: 90.26, AUPR_OUT: 99.69 +────────────────────────────────────────────────────────────────────── + +Performing inference on ninco dataset... +Computing metrics on ninco dataset... +FPR@95: 7.11, AUROC: 98.11 AUPR_IN: 97.65, AUPR_OUT: 98.59 +────────────────────────────────────────────────────────────────────── + +Computing mean metrics... +FPR@95: 8.57, AUROC: 97.53 AUPR_IN: 93.95, AUPR_OUT: 99.14 +────────────────────────────────────────────────────────────────────── + +Processing far ood... +Inference has been performed on inaturalist dataset... +Computing metrics on inaturalist dataset... +FPR@95: 1.64, AUROC: 99.48 AUPR_IN: 99.11, AUPR_OUT: 99.75 +────────────────────────────────────────────────────────────────────── + +Inference has been performed on textures dataset... +Computing metrics on textures dataset... +FPR@95: 1.83, AUROC: 99.61 AUPR_IN: 99.47, AUPR_OUT: 99.74 +────────────────────────────────────────────────────────────────────── + +Inference has been performed on openimage_o dataset... +Computing metrics on openimage_o dataset... +FPR@95: 6.88, AUROC: 98.34 AUPR_IN: 95.70, AUPR_OUT: 99.58 +────────────────────────────────────────────────────────────────────── + +Computing mean metrics... +FPR@95: 3.45, AUROC: 99.14 AUPR_IN: 98.09, AUPR_OUT: 99.69 +────────────────────────────────────────────────────────────────────── + + FPR@95 AUROC AUPR_IN AUPR_OUT ACC +imagenet22k_container 14.05 95.47 83.16 99.71 77.20 +imagenet22k_container_refine 14.95 95.17 84.61 99.52 77.20 +bronzeS_containerM 36.72 87.03 61.03 98.52 77.20 +bronzeM_containerS 22.44 92.20 75.85 99.19 77.20 +bronze_Line 93.73 57.11 88.52 15.44 77.20 +nearood 36.38 85.39 78.63 82.48 77.20 +ssb_hard 10.03 96.95 90.26 99.69 77.20 +ninco 7.11 98.11 97.65 98.59 77.20 +midood 8.57 97.53 93.95 99.14 77.20 +inaturalist 1.64 99.48 99.11 99.75 77.20 +textures 1.83 99.61 99.47 99.74 77.20 +openimage_o 6.88 98.34 95.70 99.58 77.20 +farood 3.45 99.14 98.09 99.69 77.20 +Postprocessor_name is ignored because postprocessor is passed +Loaded pre-computed scores from file. +Processing near ood... +Inference has been performed on imagenet22k_container dataset... +Computing metrics on imagenet22k_container dataset... +FPR@95: 63.54, AUROC: 80.12 AUPR_IN: 16.71, AUPR_OUT: 98.76 +────────────────────────────────────────────────────────────────────── + +Inference has been performed on imagenet22k_container_refine dataset... +Computing metrics on imagenet22k_container_refine dataset... +FPR@95: 64.86, AUROC: 78.95 AUPR_IN: 21.65, AUPR_OUT: 98.01 +────────────────────────────────────────────────────────────────────── + +Inference has been performed on bronzeS_containerM dataset... +Computing metrics on bronzeS_containerM dataset... +FPR@95: 70.51, AUROC: 75.45 AUPR_IN: 22.02, AUPR_OUT: 97.61 +────────────────────────────────────────────────────────────────────── + +Inference has been performed on bronzeM_containerS dataset... +Computing metrics on bronzeM_containerS dataset... +FPR@95: 61.41, AUROC: 80.54 AUPR_IN: 29.60, AUPR_OUT: 98.17 +────────────────────────────────────────────────────────────────────── + +Inference has been performed on bronze_Line dataset... +Computing metrics on bronze_Line dataset... +FPR@95: 87.27, AUROC: 58.15 AUPR_IN: 89.85, AUPR_OUT: 16.04 +────────────────────────────────────────────────────────────────────── + +Computing mean metrics... +FPR@95: 69.52, AUROC: 74.64 AUPR_IN: 35.96, AUPR_OUT: 81.72 +────────────────────────────────────────────────────────────────────── + +Processing mid ood... +Performing inference on ssb_hard dataset... +Computing metrics on ssb_hard dataset... +FPR@95: 45.40, AUROC: 88.15 AUPR_IN: 42.52, AUPR_OUT: 98.95 +────────────────────────────────────────────────────────────────────── + +Performing inference on ninco dataset... +Computing metrics on ninco dataset... +FPR@95: 59.26, AUROC: 82.93 AUPR_IN: 71.84, AUPR_OUT: 88.72 +────────────────────────────────────────────────────────────────────── + +Computing mean metrics... +FPR@95: 52.33, AUROC: 85.54 AUPR_IN: 57.18, AUPR_OUT: 93.83 +────────────────────────────────────────────────────────────────────── + +Processing far ood... +Inference has been performed on inaturalist dataset... +Computing metrics on inaturalist dataset... +FPR@95: 56.91, AUROC: 74.61 AUPR_IN: 62.41, AUPR_OUT: 86.24 +────────────────────────────────────────────────────────────────────── + +Inference has been performed on textures dataset... +Computing metrics on textures dataset... +FPR@95: 86.27, AUROC: 69.51 AUPR_IN: 54.56, AUPR_OUT: 75.68 +────────────────────────────────────────────────────────────────────── + +Inference has been performed on openimage_o dataset... +Computing metrics on openimage_o dataset... +FPR@95: 60.61, AUROC: 79.88 AUPR_IN: 50.63, AUPR_OUT: 94.03 +────────────────────────────────────────────────────────────────────── + +Computing mean metrics... +FPR@95: 67.93, AUROC: 74.67 AUPR_IN: 55.87, AUPR_OUT: 85.32 +────────────────────────────────────────────────────────────────────── + + FPR@95 AUROC AUPR_IN AUPR_OUT ACC +imagenet22k_container 63.54 80.12 16.71 98.76 77.20 +imagenet22k_container_refine 64.86 78.95 21.65 98.01 77.20 +bronzeS_containerM 70.51 75.45 22.02 97.61 77.20 +bronzeM_containerS 61.41 80.54 29.60 98.17 77.20 +bronze_Line 87.27 58.15 89.85 16.04 77.20 +nearood 69.52 74.64 35.96 81.72 77.20 +ssb_hard 45.40 88.15 42.52 98.95 77.20 +ninco 59.26 82.93 71.84 88.72 77.20 +midood 52.33 85.54 57.18 93.83 77.20 +inaturalist 56.91 74.61 62.41 86.24 77.20 +textures 86.27 69.51 54.56 75.68 77.20 +openimage_o 60.61 79.88 50.63 94.03 77.20 +farood 67.93 74.67 55.87 85.32 77.20 +Postprocessor_name is ignored because postprocessor is passed +Loaded pre-computed scores from file. +Processing near ood... +Inference has been performed on imagenet22k_container dataset... +Computing metrics on imagenet22k_container dataset... +FPR@95: 81.99, AUROC: 62.20 AUPR_IN: 7.57, AUPR_OUT: 97.73 +────────────────────────────────────────────────────────────────────── + +Inference has been performed on imagenet22k_container_refine dataset... +Computing metrics on imagenet22k_container_refine dataset... +FPR@95: 78.52, AUROC: 68.92 AUPR_IN: 13.58, AUPR_OUT: 97.42 +────────────────────────────────────────────────────────────────────── + +Inference has been performed on bronzeS_containerM dataset... +Computing metrics on bronzeS_containerM dataset... +FPR@95: 95.24, AUROC: 50.25 AUPR_IN: 5.97, AUPR_OUT: 94.46 +────────────────────────────────────────────────────────────────────── + +Inference has been performed on bronzeM_containerS dataset... +Computing metrics on bronzeM_containerS dataset... +FPR@95: 87.20, AUROC: 61.75 AUPR_IN: 8.86, AUPR_OUT: 96.40 +────────────────────────────────────────────────────────────────────── + +Inference has been performed on bronze_Line dataset... +Computing metrics on bronze_Line dataset... +FPR@95: 83.83, AUROC: 28.37 AUPR_IN: 83.01, AUPR_OUT: 9.17 +────────────────────────────────────────────────────────────────────── + +Computing mean metrics... +FPR@95: 85.36, AUROC: 54.30 AUPR_IN: 23.80, AUPR_OUT: 79.04 +────────────────────────────────────────────────────────────────────── + +Processing mid ood... +Performing inference on ssb_hard dataset... +Computing metrics on ssb_hard dataset... +FPR@95: 80.13, AUROC: 77.78 AUPR_IN: 16.85, AUPR_OUT: 98.27 +────────────────────────────────────────────────────────────────────── + +Performing inference on ninco dataset... +Computing metrics on ninco dataset... +FPR@95: 87.14, AUROC: 68.33 AUPR_IN: 48.57, AUPR_OUT: 83.09 +────────────────────────────────────────────────────────────────────── + +Computing mean metrics... +FPR@95: 83.63, AUROC: 73.06 AUPR_IN: 32.71, AUPR_OUT: 90.68 +────────────────────────────────────────────────────────────────────── + +Processing far ood... +Inference has been performed on inaturalist dataset... +Computing metrics on inaturalist dataset... +FPR@95: 74.95, AUROC: 65.27 AUPR_IN: 40.48, AUPR_OUT: 85.72 +────────────────────────────────────────────────────────────────────── + +Inference has been performed on textures dataset... +Computing metrics on textures dataset... +FPR@95: 83.76, AUROC: 69.40 AUPR_IN: 51.16, AUPR_OUT: 83.36 +────────────────────────────────────────────────────────────────────── + +Inference has been performed on openimage_o dataset... +Computing metrics on openimage_o dataset... +FPR@95: 85.69, AUROC: 64.68 AUPR_IN: 25.08, AUPR_OUT: 91.09 +────────────────────────────────────────────────────────────────────── + +Computing mean metrics... +FPR@95: 81.47, AUROC: 66.45 AUPR_IN: 38.90, AUPR_OUT: 86.73 +────────────────────────────────────────────────────────────────────── + + FPR@95 AUROC AUPR_IN AUPR_OUT ACC +imagenet22k_container 81.99 62.20 7.57 97.73 77.20 +imagenet22k_container_refine 78.52 68.92 13.58 97.42 77.20 +bronzeS_containerM 95.24 50.25 5.97 94.46 77.20 +bronzeM_containerS 87.20 61.75 8.86 96.40 77.20 +bronze_Line 83.83 28.37 83.01 9.17 77.20 +nearood 85.36 54.30 23.80 79.04 77.20 +ssb_hard 80.13 77.78 16.85 98.27 77.20 +ninco 87.14 68.33 48.57 83.09 77.20 +midood 83.63 73.06 32.71 90.68 77.20 +inaturalist 74.95 65.27 40.48 85.72 77.20 +textures 83.76 69.40 51.16 83.36 77.20 +openimage_o 85.69 64.68 25.08 91.09 77.20 +farood 81.47 66.45 38.90 86.73 77.20 +Loaded pre-computed scores from file. +Processing near ood... +Inference has been performed on imagenet22k_container dataset... +Computing metrics on imagenet22k_container dataset... +FPR@95: 51.77, AUROC: 85.08 AUPR_IN: 29.00, AUPR_OUT: 99.10 +────────────────────────────────────────────────────────────────────── + +Inference has been performed on imagenet22k_container_refine dataset... +Computing metrics on imagenet22k_container_refine dataset... +FPR@95: 53.92, AUROC: 84.05 AUPR_IN: 34.93, AUPR_OUT: 98.54 +────────────────────────────────────────────────────────────────────── + +Inference has been performed on bronzeS_containerM dataset... +Computing metrics on bronzeS_containerM dataset... +FPR@95: 64.02, AUROC: 77.46 AUPR_IN: 28.34, AUPR_OUT: 97.78 +────────────────────────────────────────────────────────────────────── + +Inference has been performed on bronzeM_containerS dataset... +Computing metrics on bronzeM_containerS dataset... +FPR@95: 65.27, AUROC: 79.56 AUPR_IN: 26.53, AUPR_OUT: 98.13 +────────────────────────────────────────────────────────────────────── + +Inference has been performed on bronze_Line dataset... +Computing metrics on bronze_Line dataset... +FPR@95: 89.16, AUROC: 57.53 AUPR_IN: 89.57, AUPR_OUT: 16.48 +────────────────────────────────────────────────────────────────────── + +Computing mean metrics... +FPR@95: 64.83, AUROC: 76.73 AUPR_IN: 41.67, AUPR_OUT: 82.01 +────────────────────────────────────────────────────────────────────── + +Processing mid ood... +Performing inference on ssb_hard dataset... +Computing metrics on ssb_hard dataset... +FPR@95: 38.78, AUROC: 90.71 AUPR_IN: 53.77, AUPR_OUT: 99.21 +────────────────────────────────────────────────────────────────────── + +Performing inference on ninco dataset... +Computing metrics on ninco dataset... +FPR@95: 52.93, AUROC: 84.63 AUPR_IN: 77.01, AUPR_OUT: 89.76 +────────────────────────────────────────────────────────────────────── + +Computing mean metrics... +FPR@95: 45.85, AUROC: 87.67 AUPR_IN: 65.39, AUPR_OUT: 94.48 +────────────────────────────────────────────────────────────────────── + +Processing far ood... +Inference has been performed on inaturalist dataset... +Computing metrics on inaturalist dataset... +FPR@95: 52.86, AUROC: 77.49 AUPR_IN: 65.50, AUPR_OUT: 87.56 +────────────────────────────────────────────────────────────────────── + +Inference has been performed on textures dataset... +Computing metrics on textures dataset... +FPR@95: 85.72, AUROC: 67.14 AUPR_IN: 53.34, AUPR_OUT: 73.92 +────────────────────────────────────────────────────────────────────── + +Inference has been performed on openimage_o dataset... +Computing metrics on openimage_o dataset... +FPR@95: 57.23, AUROC: 81.31 AUPR_IN: 55.71, AUPR_OUT: 94.55 +────────────────────────────────────────────────────────────────────── + +Computing mean metrics... +FPR@95: 65.27, AUROC: 75.31 AUPR_IN: 58.18, AUPR_OUT: 85.34 +────────────────────────────────────────────────────────────────────── + + FPR@95 AUROC AUPR_IN AUPR_OUT ACC +imagenet22k_container 51.77 85.08 29.00 99.10 77.20 +imagenet22k_container_refine 53.92 84.05 34.93 98.54 77.20 +bronzeS_containerM 64.02 77.46 28.34 97.78 77.20 +bronzeM_containerS 65.27 79.56 26.53 98.13 77.20 +bronze_Line 89.16 57.53 89.57 16.48 77.20 +nearood 64.83 76.73 41.67 82.01 77.20 +ssb_hard 38.78 90.71 53.77 99.21 77.20 +ninco 52.93 84.63 77.01 89.76 77.20 +midood 45.85 87.67 65.39 94.48 77.20 +inaturalist 52.86 77.49 65.50 87.56 77.20 +textures 85.72 67.14 53.34 73.92 77.20 +openimage_o 57.23 81.31 55.71 94.55 77.20 +farood 65.27 75.31 58.18 85.34 77.20 +Postprocessor_name is ignored because postprocessor is passed +Loaded pre-computed scores from file. +Processing near ood... +Inference has been performed on imagenet22k_container dataset... +Computing metrics on imagenet22k_container dataset... +FPR@95: 64.08, AUROC: 79.69 AUPR_IN: 16.75, AUPR_OUT: 98.71 +────────────────────────────────────────────────────────────────────── + +Inference has been performed on imagenet22k_container_refine dataset... +Computing metrics on imagenet22k_container_refine dataset... +FPR@95: 65.18, AUROC: 78.66 AUPR_IN: 21.71, AUPR_OUT: 97.95 +────────────────────────────────────────────────────────────────────── + +Inference has been performed on bronzeS_containerM dataset... +Computing metrics on bronzeS_containerM dataset... +FPR@95: 64.89, AUROC: 77.34 AUPR_IN: 25.46, AUPR_OUT: 97.81 +────────────────────────────────────────────────────────────────────── + +Inference has been performed on bronzeM_containerS dataset... +Computing metrics on bronzeM_containerS dataset... +FPR@95: 63.76, AUROC: 78.80 AUPR_IN: 26.64, AUPR_OUT: 97.94 +────────────────────────────────────────────────────────────────────── + +Inference has been performed on bronze_Line dataset... +Computing metrics on bronze_Line dataset... +FPR@95: 87.30, AUROC: 54.94 AUPR_IN: 89.28, AUPR_OUT: 14.41 +────────────────────────────────────────────────────────────────────── + +Computing mean metrics... +FPR@95: 69.04, AUROC: 73.89 AUPR_IN: 35.97, AUPR_OUT: 81.36 +────────────────────────────────────────────────────────────────────── + +Processing mid ood... +Performing inference on ssb_hard dataset... +Computing metrics on ssb_hard dataset... +FPR@95: 47.52, AUROC: 87.17 AUPR_IN: 41.59, AUPR_OUT: 98.84 +────────────────────────────────────────────────────────────────────── + +Performing inference on ninco dataset... +Computing metrics on ninco dataset... +FPR@95: 60.55, AUROC: 81.34 AUPR_IN: 70.79, AUPR_OUT: 87.21 +────────────────────────────────────────────────────────────────────── + +Computing mean metrics... +FPR@95: 54.04, AUROC: 84.26 AUPR_IN: 56.19, AUPR_OUT: 93.03 +────────────────────────────────────────────────────────────────────── + +Processing far ood... +Inference has been performed on inaturalist dataset... +Computing metrics on inaturalist dataset... +FPR@95: 58.97, AUROC: 72.51 AUPR_IN: 60.65, AUPR_OUT: 84.46 +────────────────────────────────────────────────────────────────────── + +Inference has been performed on textures dataset... +Computing metrics on textures dataset... +FPR@95: 87.78, AUROC: 68.41 AUPR_IN: 53.56, AUPR_OUT: 74.51 +────────────────────────────────────────────────────────────────────── + +Inference has been performed on openimage_o dataset... +Computing metrics on openimage_o dataset... +FPR@95: 61.32, AUROC: 78.42 AUPR_IN: 49.15, AUPR_OUT: 93.45 +────────────────────────────────────────────────────────────────────── + +Computing mean metrics... +FPR@95: 69.36, AUROC: 73.11 AUPR_IN: 54.45, AUPR_OUT: 84.14 +────────────────────────────────────────────────────────────────────── + + FPR@95 AUROC AUPR_IN AUPR_OUT ACC +imagenet22k_container 64.08 79.69 16.75 98.71 77.20 +imagenet22k_container_refine 65.18 78.66 21.71 97.95 77.20 +bronzeS_containerM 64.89 77.34 25.46 97.81 77.20 +bronzeM_containerS 63.76 78.80 26.64 97.94 77.20 +bronze_Line 87.30 54.94 89.28 14.41 77.20 +nearood 69.04 73.89 35.97 81.36 77.20 +ssb_hard 47.52 87.17 41.59 98.84 77.20 +ninco 60.55 81.34 70.79 87.21 77.20 +midood 54.04 84.26 56.19 93.03 77.20 +inaturalist 58.97 72.51 60.65 84.46 77.20 +textures 87.78 68.41 53.56 74.51 77.20 +openimage_o 61.32 78.42 49.15 93.45 77.20 +farood 69.36 73.11 54.45 84.14 77.20 +Loaded pre-computed scores from file. +Processing near ood... +Inference has been performed on imagenet22k_container dataset... +Computing metrics on imagenet22k_container dataset... +FPR@95: 63.54, AUROC: 79.82 AUPR_IN: 16.63, AUPR_OUT: 98.73 +────────────────────────────────────────────────────────────────────── + +Inference has been performed on imagenet22k_container_refine dataset... +Computing metrics on imagenet22k_container_refine dataset... +FPR@95: 65.02, AUROC: 78.66 AUPR_IN: 21.55, AUPR_OUT: 97.95 +────────────────────────────────────────────────────────────────────── + +Inference has been performed on bronzeS_containerM dataset... +Computing metrics on bronzeS_containerM dataset... +FPR@95: 70.51, AUROC: 75.29 AUPR_IN: 21.97, AUPR_OUT: 97.58 +────────────────────────────────────────────────────────────────────── + +Inference has been performed on bronzeM_containerS dataset... +Computing metrics on bronzeM_containerS dataset... +FPR@95: 61.48, AUROC: 80.14 AUPR_IN: 29.46, AUPR_OUT: 98.10 +────────────────────────────────────────────────────────────────────── + +Inference has been performed on bronze_Line dataset... +Computing metrics on bronze_Line dataset... +FPR@95: 87.27, AUROC: 58.24 AUPR_IN: 89.86, AUPR_OUT: 16.13 +────────────────────────────────────────────────────────────────────── + +Computing mean metrics... +FPR@95: 69.56, AUROC: 74.43 AUPR_IN: 35.89, AUPR_OUT: 81.70 +────────────────────────────────────────────────────────────────────── + +Processing mid ood... +Performing inference on ssb_hard dataset... +Computing metrics on ssb_hard dataset... +FPR@95: 45.66, AUROC: 87.82 AUPR_IN: 42.28, AUPR_OUT: 98.89 +────────────────────────────────────────────────────────────────────── + +Performing inference on ninco dataset... +Computing metrics on ninco dataset... +FPR@95: 59.42, AUROC: 82.64 AUPR_IN: 71.63, AUPR_OUT: 88.38 +────────────────────────────────────────────────────────────────────── + +Computing mean metrics... +FPR@95: 52.54, AUROC: 85.23 AUPR_IN: 56.96, AUPR_OUT: 93.64 +────────────────────────────────────────────────────────────────────── + +Processing far ood... +Inference has been performed on inaturalist dataset... +Computing metrics on inaturalist dataset... +FPR@95: 56.91, AUROC: 74.43 AUPR_IN: 62.26, AUPR_OUT: 86.13 +────────────────────────────────────────────────────────────────────── + +Inference has been performed on textures dataset... +Computing metrics on textures dataset... +FPR@95: 86.27, AUROC: 69.61 AUPR_IN: 54.64, AUPR_OUT: 75.81 +────────────────────────────────────────────────────────────────────── + +Inference has been performed on openimage_o dataset... +Computing metrics on openimage_o dataset... +FPR@95: 60.68, AUROC: 79.67 AUPR_IN: 50.49, AUPR_OUT: 93.91 +────────────────────────────────────────────────────────────────────── + +Computing mean metrics... +FPR@95: 67.95, AUROC: 74.57 AUPR_IN: 55.80, AUPR_OUT: 85.28 +────────────────────────────────────────────────────────────────────── + + FPR@95 AUROC AUPR_IN AUPR_OUT ACC +imagenet22k_container 63.54 79.82 16.63 98.73 77.20 +imagenet22k_container_refine 65.02 78.66 21.55 97.95 77.20 +bronzeS_containerM 70.51 75.29 21.97 97.58 77.20 +bronzeM_containerS 61.48 80.14 29.46 98.10 77.20 +bronze_Line 87.27 58.24 89.86 16.13 77.20 +nearood 69.56 74.43 35.89 81.70 77.20 +ssb_hard 45.66 87.82 42.28 98.89 77.20 +ninco 59.42 82.64 71.63 88.38 77.20 +midood 52.54 85.23 56.96 93.64 77.20 +inaturalist 56.91 74.43 62.26 86.13 77.20 +textures 86.27 69.61 54.64 75.81 77.20 +openimage_o 60.68 79.67 50.49 93.91 77.20 +farood 67.95 74.57 55.80 85.28 77.20 +Postprocessor_name is ignored because postprocessor is passed +Loaded pre-computed scores from file. +Processing near ood... +Inference has been performed on imagenet22k_container dataset... +Computing metrics on imagenet22k_container dataset... +FPR@95: 97.20, AUROC: 52.56 AUPR_IN: 4.08, AUPR_OUT: 97.08 +────────────────────────────────────────────────────────────────────── + +Inference has been performed on imagenet22k_container_refine dataset... +Computing metrics on imagenet22k_container_refine dataset... +FPR@95: 97.43, AUROC: 50.18 AUPR_IN: 5.66, AUPR_OUT: 95.38 +────────────────────────────────────────────────────────────────────── + +Inference has been performed on bronzeS_containerM dataset... +Computing metrics on bronzeS_containerM dataset... +FPR@95: 95.47, AUROC: 60.14 AUPR_IN: 7.58, AUPR_OUT: 96.11 +────────────────────────────────────────────────────────────────────── + +Inference has been performed on bronzeM_containerS dataset... +Computing metrics on bronzeM_containerS dataset... +FPR@95: 96.17, AUROC: 52.77 AUPR_IN: 6.36, AUPR_OUT: 95.77 +────────────────────────────────────────────────────────────────────── + +Inference has been performed on bronze_Line dataset... +Computing metrics on bronze_Line dataset... +FPR@95: 95.88, AUROC: 43.43 AUPR_IN: 83.44, AUPR_OUT: 14.44 +────────────────────────────────────────────────────────────────────── + +Computing mean metrics... +FPR@95: 96.43, AUROC: 51.82 AUPR_IN: 21.42, AUPR_OUT: 79.76 +────────────────────────────────────────────────────────────────────── + +Processing mid ood... +Performing inference on ssb_hard dataset... +Computing metrics on ssb_hard dataset... +FPR@95: 96.33, AUROC: 65.96 AUPR_IN: 8.56, AUPR_OUT: 97.03 +────────────────────────────────────────────────────────────────────── + +Performing inference on ninco dataset... +Computing metrics on ninco dataset... +FPR@95: 96.30, AUROC: 60.98 AUPR_IN: 39.08, AUPR_OUT: 78.53 +────────────────────────────────────────────────────────────────────── + +Computing mean metrics... +FPR@95: 96.32, AUROC: 63.47 AUPR_IN: 23.82, AUPR_OUT: 87.78 +────────────────────────────────────────────────────────────────────── + +Processing far ood... +Inference has been performed on inaturalist dataset... +Computing metrics on inaturalist dataset... +FPR@95: 97.75, AUROC: 41.62 AUPR_IN: 19.42, AUPR_OUT: 76.78 +────────────────────────────────────────────────────────────────────── + +Inference has been performed on textures dataset... +Computing metrics on textures dataset... +FPR@95: 96.66, AUROC: 54.15 AUPR_IN: 38.27, AUPR_OUT: 70.13 +────────────────────────────────────────────────────────────────────── + +Inference has been performed on openimage_o dataset... +Computing metrics on openimage_o dataset... +FPR@95: 96.72, AUROC: 55.56 AUPR_IN: 17.09, AUPR_OUT: 88.54 +────────────────────────────────────────────────────────────────────── + +Computing mean metrics... +FPR@95: 97.04, AUROC: 50.44 AUPR_IN: 24.93, AUPR_OUT: 78.49 +────────────────────────────────────────────────────────────────────── + + FPR@95 AUROC AUPR_IN AUPR_OUT ACC +imagenet22k_container 97.20 52.56 4.08 97.08 77.20 +imagenet22k_container_refine 97.43 50.18 5.66 95.38 77.20 +bronzeS_containerM 95.47 60.14 7.58 96.11 77.20 +bronzeM_containerS 96.17 52.77 6.36 95.77 77.20 +bronze_Line 95.88 43.43 83.44 14.44 77.20 +nearood 96.43 51.82 21.42 79.76 77.20 +ssb_hard 96.33 65.96 8.56 97.03 77.20 +ninco 96.30 60.98 39.08 78.53 77.20 +midood 96.32 63.47 23.82 87.78 77.20 +inaturalist 97.75 41.62 19.42 76.78 77.20 +textures 96.66 54.15 38.27 70.13 77.20 +openimage_o 96.72 55.56 17.09 88.54 77.20 +farood 97.04 50.44 24.93 78.49 77.20 +Postprocessor_name is ignored because postprocessor is passed +Loaded pre-computed scores from file. +Processing near ood... +Inference has been performed on imagenet22k_container dataset... +Computing metrics on imagenet22k_container dataset... +FPR@95: 16.17, AUROC: 96.69 AUPR_IN: 74.38, AUPR_OUT: 99.84 +────────────────────────────────────────────────────────────────────── + +Inference has been performed on imagenet22k_container_refine dataset... +Computing metrics on imagenet22k_container_refine dataset... +FPR@95: 17.94, AUROC: 96.30 AUPR_IN: 77.25, AUPR_OUT: 99.73 +────────────────────────────────────────────────────────────────────── + +Inference has been performed on bronzeS_containerM dataset... +Computing metrics on bronzeS_containerM dataset... +FPR@95: 38.78, AUROC: 90.60 AUPR_IN: 58.22, AUPR_OUT: 99.24 +────────────────────────────────────────────────────────────────────── + +Inference has been performed on bronzeM_containerS dataset... +Computing metrics on bronzeM_containerS dataset... +FPR@95: 22.77, AUROC: 94.89 AUPR_IN: 73.51, AUPR_OUT: 99.61 +────────────────────────────────────────────────────────────────────── + +Inference has been performed on bronze_Line dataset... +Computing metrics on bronze_Line dataset... +FPR@95: 86.43, AUROC: 55.05 AUPR_IN: 89.37, AUPR_OUT: 14.99 +────────────────────────────────────────────────────────────────────── + +Computing mean metrics... +FPR@95: 36.42, AUROC: 86.70 AUPR_IN: 74.55, AUPR_OUT: 82.68 +────────────────────────────────────────────────────────────────────── + +Processing mid ood... +Performing inference on ssb_hard dataset... +Computing metrics on ssb_hard dataset... +FPR@95: 4.47, AUROC: 98.97 AUPR_IN: 91.72, AUPR_OUT: 99.93 +────────────────────────────────────────────────────────────────────── + +Performing inference on ninco dataset... +Computing metrics on ninco dataset... +FPR@95: 4.47, AUROC: 98.96 AUPR_IN: 98.15, AUPR_OUT: 99.45 +────────────────────────────────────────────────────────────────────── + +Computing mean metrics... +FPR@95: 4.47, AUROC: 98.97 AUPR_IN: 94.93, AUPR_OUT: 99.69 +────────────────────────────────────────────────────────────────────── + +Processing far ood... +Inference has been performed on inaturalist dataset... +Computing metrics on inaturalist dataset... +FPR@95: 1.41, AUROC: 99.72 AUPR_IN: 99.29, AUPR_OUT: 99.91 +────────────────────────────────────────────────────────────────────── + +Inference has been performed on textures dataset... +Computing metrics on textures dataset... +FPR@95: 1.29, AUROC: 99.59 AUPR_IN: 99.27, AUPR_OUT: 99.77 +────────────────────────────────────────────────────────────────────── + +Inference has been performed on openimage_o dataset... +Computing metrics on openimage_o dataset... +FPR@95: 4.50, AUROC: 99.03 AUPR_IN: 96.09, AUPR_OUT: 99.80 +────────────────────────────────────────────────────────────────────── + +Computing mean metrics... +FPR@95: 2.40, AUROC: 99.45 AUPR_IN: 98.22, AUPR_OUT: 99.83 +────────────────────────────────────────────────────────────────────── + + FPR@95 AUROC AUPR_IN AUPR_OUT ACC +imagenet22k_container 16.17 96.69 74.38 99.84 77.20 +imagenet22k_container_refine 17.94 96.30 77.25 99.73 77.20 +bronzeS_containerM 38.78 90.60 58.22 99.24 77.20 +bronzeM_containerS 22.77 94.89 73.51 99.61 77.20 +bronze_Line 86.43 55.05 89.37 14.99 77.20 +nearood 36.42 86.70 74.55 82.68 77.20 +ssb_hard 4.47 98.97 91.72 99.93 77.20 +ninco 4.47 98.96 98.15 99.45 77.20 +midood 4.47 98.97 94.93 99.69 77.20 +inaturalist 1.41 99.72 99.29 99.91 77.20 +textures 1.29 99.59 99.27 99.77 77.20 +openimage_o 4.50 99.03 96.09 99.80 77.20 +farood 2.40 99.45 98.22 99.83 77.20 +Postprocessor_name is ignored because postprocessor is passed +Loaded pre-computed scores from file. +Processing near ood... +Inference has been performed on imagenet22k_container dataset... +Computing metrics on imagenet22k_container dataset... +FPR@95: 38.71, AUROC: 90.42 AUPR_IN: 50.58, AUPR_OUT: 99.49 +────────────────────────────────────────────────────────────────────── + +Inference has been performed on imagenet22k_container_refine dataset... +Computing metrics on imagenet22k_container_refine dataset... +FPR@95: 40.93, AUROC: 89.39 AUPR_IN: 54.14, AUPR_OUT: 99.13 +────────────────────────────────────────────────────────────────────── + +Inference has been performed on bronzeS_containerM dataset... +Computing metrics on bronzeS_containerM dataset... +FPR@95: 60.06, AUROC: 81.07 AUPR_IN: 34.09, AUPR_OUT: 98.29 +────────────────────────────────────────────────────────────────────── + +Inference has been performed on bronzeM_containerS dataset... +Computing metrics on bronzeM_containerS dataset... +FPR@95: 45.40, AUROC: 86.94 AUPR_IN: 48.86, AUPR_OUT: 98.86 +────────────────────────────────────────────────────────────────────── + +Inference has been performed on bronze_Line dataset... +Computing metrics on bronze_Line dataset... +FPR@95: 89.26, AUROC: 58.54 AUPR_IN: 89.21, AUPR_OUT: 16.16 +────────────────────────────────────────────────────────────────────── + +Computing mean metrics... +FPR@95: 54.87, AUROC: 81.27 AUPR_IN: 55.38, AUPR_OUT: 82.39 +────────────────────────────────────────────────────────────────────── + +Processing mid ood... +Performing inference on ssb_hard dataset... +Computing metrics on ssb_hard dataset... +FPR@95: 17.81, AUROC: 95.98 AUPR_IN: 77.50, AUPR_OUT: 99.68 +────────────────────────────────────────────────────────────────────── + +Performing inference on ninco dataset... +Computing metrics on ninco dataset... +FPR@95: 22.09, AUROC: 94.61 AUPR_IN: 92.20, AUPR_OUT: 96.56 +────────────────────────────────────────────────────────────────────── + +Computing mean metrics... +FPR@95: 19.95, AUROC: 95.29 AUPR_IN: 84.85, AUPR_OUT: 98.12 +────────────────────────────────────────────────────────────────────── + +Processing far ood... +Inference has been performed on inaturalist dataset... +Computing metrics on inaturalist dataset... +FPR@95: 18.65, AUROC: 94.10 AUPR_IN: 90.00, AUPR_OUT: 97.20 +────────────────────────────────────────────────────────────────────── + +Inference has been performed on textures dataset... +Computing metrics on textures dataset... +FPR@95: 28.04, AUROC: 93.03 AUPR_IN: 91.10, AUPR_OUT: 94.84 +────────────────────────────────────────────────────────────────────── + +Inference has been performed on openimage_o dataset... +Computing metrics on openimage_o dataset... +FPR@95: 22.35, AUROC: 94.56 AUPR_IN: 84.94, AUPR_OUT: 98.62 +────────────────────────────────────────────────────────────────────── + +Computing mean metrics... +FPR@95: 23.01, AUROC: 93.90 AUPR_IN: 88.68, AUPR_OUT: 96.89 +────────────────────────────────────────────────────────────────────── + + FPR@95 AUROC AUPR_IN AUPR_OUT ACC +imagenet22k_container 38.71 90.42 50.58 99.49 77.20 +imagenet22k_container_refine 40.93 89.39 54.14 99.13 77.20 +bronzeS_containerM 60.06 81.07 34.09 98.29 77.20 +bronzeM_containerS 45.40 86.94 48.86 98.86 77.20 +bronze_Line 89.26 58.54 89.21 16.16 77.20 +nearood 54.87 81.27 55.38 82.39 77.20 +ssb_hard 17.81 95.98 77.50 99.68 77.20 +ninco 22.09 94.61 92.20 96.56 77.20 +midood 19.95 95.29 84.85 98.12 77.20 +inaturalist 18.65 94.10 90.00 97.20 77.20 +textures 28.04 93.03 91.10 94.84 77.20 +openimage_o 22.35 94.56 84.94 98.62 77.20 +farood 23.01 93.90 88.68 96.89 77.20 +Postprocessor_name is ignored because postprocessor is passed +Loaded pre-computed scores from file. +Processing near ood... +Inference has been performed on imagenet22k_container dataset... +Computing metrics on imagenet22k_container dataset... +FPR@95: 86.91, AUROC: 53.58 AUPR_IN: 7.04, AUPR_OUT: 95.30 +────────────────────────────────────────────────────────────────────── + +Inference has been performed on imagenet22k_container_refine dataset... +Computing metrics on imagenet22k_container_refine dataset... +FPR@95: 88.68, AUROC: 52.48 AUPR_IN: 8.97, AUPR_OUT: 93.09 +────────────────────────────────────────────────────────────────────── + +Inference has been performed on bronzeS_containerM dataset... +Computing metrics on bronzeS_containerM dataset... +FPR@95: 75.11, AUROC: 67.79 AUPR_IN: 19.01, AUPR_OUT: 95.86 +────────────────────────────────────────────────────────────────────── + +Inference has been performed on bronzeM_containerS dataset... +Computing metrics on bronzeM_containerS dataset... +FPR@95: 94.15, AUROC: 38.35 AUPR_IN: 5.14, AUPR_OUT: 90.31 +────────────────────────────────────────────────────────────────────── + +Inference has been performed on bronze_Line dataset... +Computing metrics on bronze_Line dataset... +FPR@95: 98.39, AUROC: 26.74 AUPR_IN: 77.97, AUPR_OUT: 8.75 +────────────────────────────────────────────────────────────────────── + +Computing mean metrics... +FPR@95: 88.65, AUROC: 47.79 AUPR_IN: 23.62, AUPR_OUT: 76.66 +────────────────────────────────────────────────────────────────────── + +Processing mid ood... +Performing inference on ssb_hard dataset... +Computing metrics on ssb_hard dataset... +FPR@95: 84.24, AUROC: 57.12 AUPR_IN: 12.10, AUPR_OUT: 93.39 +────────────────────────────────────────────────────────────────────── + +Performing inference on ninco dataset... +Computing metrics on ninco dataset... +FPR@95: 90.93, AUROC: 44.59 AUPR_IN: 36.40, AUPR_OUT: 58.81 +────────────────────────────────────────────────────────────────────── + +Computing mean metrics... +FPR@95: 87.59, AUROC: 50.85 AUPR_IN: 24.25, AUPR_OUT: 76.10 +────────────────────────────────────────────────────────────────────── + +Processing far ood... +Inference has been performed on inaturalist dataset... +Computing metrics on inaturalist dataset... +FPR@95: 93.41, AUROC: 31.69 AUPR_IN: 20.15, AUPR_OUT: 64.10 +────────────────────────────────────────────────────────────────────── + +Inference has been performed on textures dataset... +Computing metrics on textures dataset... +FPR@95: 92.60, AUROC: 45.89 AUPR_IN: 38.15, AUPR_OUT: 56.40 +────────────────────────────────────────────────────────────────────── + +Inference has been performed on openimage_o dataset... +Computing metrics on openimage_o dataset... +FPR@95: 93.15, AUROC: 41.30 AUPR_IN: 15.63, AUPR_OUT: 77.25 +────────────────────────────────────────────────────────────────────── + +Computing mean metrics... +FPR@95: 93.05, AUROC: 39.63 AUPR_IN: 24.64, AUPR_OUT: 65.92 +────────────────────────────────────────────────────────────────────── + + FPR@95 AUROC AUPR_IN AUPR_OUT ACC +imagenet22k_container 86.91 53.58 7.04 95.30 77.20 +imagenet22k_container_refine 88.68 52.48 8.97 93.09 77.20 +bronzeS_containerM 75.11 67.79 19.01 95.86 77.20 +bronzeM_containerS 94.15 38.35 5.14 90.31 77.20 +bronze_Line 98.39 26.74 77.97 8.75 77.20 +nearood 88.65 47.79 23.62 76.66 77.20 +ssb_hard 84.24 57.12 12.10 93.39 77.20 +ninco 90.93 44.59 36.40 58.81 77.20 +midood 87.59 50.85 24.25 76.10 77.20 +inaturalist 93.41 31.69 20.15 64.10 77.20 +textures 92.60 45.89 38.15 56.40 77.20 +openimage_o 93.15 41.30 15.63 77.25 77.20 +farood 93.05 39.63 24.64 65.92 77.20 diff --git a/OpenOOD/results/bronze2_opengan_opengan_e90_lr0.0001_default/s0/best_DNet.ckpt b/OpenOOD/results/bronze2_opengan_opengan_e90_lr0.0001_default/s0/best_DNet.ckpt new file mode 100644 index 0000000000000000000000000000000000000000..bf4f84871e941e1c4af73efa17dae27af0c86cdb --- /dev/null +++ b/OpenOOD/results/bronze2_opengan_opengan_e90_lr0.0001_default/s0/best_DNet.ckpt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a570bbac0c98f5d40c716f3d5f4a620a2cfdff301c4bb6ec32ac3763dabe7caf +size 1750549 diff --git a/OpenOOD/results/bronze2_opengan_opengan_e90_lr0.0001_default/s0/best_GNet.ckpt b/OpenOOD/results/bronze2_opengan_opengan_e90_lr0.0001_default/s0/best_GNet.ckpt new file mode 100644 index 0000000000000000000000000000000000000000..21e7f390be34c84cf2902ffb56b8e8973a9da485 --- /dev/null +++ b/OpenOOD/results/bronze2_opengan_opengan_e90_lr0.0001_default/s0/best_GNet.ckpt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:325bc71b075a3c70c3c6989d895dc50c36819f18a47b91e841c6cc77e6f9e026 +size 1544143 diff --git a/OpenOOD/results/bronze2_opengan_opengan_e90_lr0.0001_default/s0/config.yml b/OpenOOD/results/bronze2_opengan_opengan_e90_lr0.0001_default/s0/config.yml new file mode 100644 index 0000000000000000000000000000000000000000..377e9ff4686a17cdcdbd4d8054e0778ddd971b9a --- /dev/null +++ b/OpenOOD/results/bronze2_opengan_opengan_e90_lr0.0001_default/s0/config.yml @@ -0,0 +1,277 @@ +!!python/object/new:openood.utils.config.Config +state: + dataset: + name: bronze2 + num_classes: 11 + pre_size: 420 + image_size: 400 + interpolation: bilinear + normalization_type: imagenet + num_workers: 8 + num_gpus: 1 + num_machines: 1 + split_names: &id001 + - train + - val + - test + train: + dataset_class: Bronze2ExcelDataset + data_dir: /data/bronze_ID_and_OOD/bronze2NotLine/image_not_line + imglist_pth: /data/bronze_ID_and_OOD/bronze2NotLine/not_line_ding_gui_train_val_test/ding_gui_not_line_train.xlsx + xml_path: /data/bronze_ID_and_OOD/bronze2NotLine/xmls + batch_size: 128 + shuffle: true + val: + dataset_class: Bronze2ExcelDataset + data_dir: /data/bronze_ID_and_OOD/bronze2NotLine/image_not_line + imglist_pth: /data/bronze_ID_and_OOD/bronze2NotLine/not_line_ding_gui_train_val_test/ding_gui_not_line_val.xlsx + xml_path: /data/bronze_ID_and_OOD/bronze2NotLine/xmls + batch_size: 128 + shuffle: false + test: + dataset_class: Bronze2ExcelDataset + data_dir: /data/bronze_ID_and_OOD/bronze2NotLine/image_not_line + imglist_pth: /data/bronze_ID_and_OOD/bronze2NotLine/not_line_ding_gui_train_val_test/ding_gui_not_line_test.xlsx + xml_path: /data/bronze_ID_and_OOD/bronze2NotLine/xmls + batch_size: 128 + shuffle: false + feat_root: ./results/bronze2_OursBronze2_feat_extract_opengan_default/s0 + network: + name: opengan + num_classes: 11 + image_size: 400 + pretrained: false + checkpoint: none + num_gpus: 1 + nc: 512 + nz: 100 + ngf: 64 + ndf: 64 + backbone: + name: resnet50 + num_classes: 11 + image_size: 400 + pretrained: true + checkpoint: ./results/pretrained_weights/resnet50_imagenet1k_v1.pth + num_gpus: 1 + exp_name: bronze2_opengan_opengan_e90_lr0.0001_default/s0 + output_dir: /home/zhourixin/OOD_Folder/CODE/other_methods/openOOD_code/OpenOOD/results/bronze2_opengan_opengan_e90_lr0.0001_default/s0 + save_output: true + merge_option: default + mark: default + seed: 0 + num_gpus: 1 + num_workers: 8 + num_machines: 1 + machine_rank: 0 + pipeline: + name: train_opengan + trainer: + name: opengan + evaluator: + name: ood + optimizer: + name: Adam + num_epochs: 90 + lr: 0.0001 + beta1: 0.5 + recorder: + name: opengan + save_all_models: false + preprocessor: + name: base + postprocessor: + name: opengan + APS_mode: false +dictitems: + dataset: !!python/object/new:openood.utils.config.Config + state: + name: bronze2 + num_classes: 11 + pre_size: 420 + image_size: 400 + interpolation: bilinear + normalization_type: imagenet + num_workers: 8 + num_gpus: 1 + num_machines: 1 + split_names: *id001 + train: + dataset_class: Bronze2ExcelDataset + data_dir: /data/bronze_ID_and_OOD/bronze2NotLine/image_not_line + imglist_pth: /data/bronze_ID_and_OOD/bronze2NotLine/not_line_ding_gui_train_val_test/ding_gui_not_line_train.xlsx + xml_path: /data/bronze_ID_and_OOD/bronze2NotLine/xmls + batch_size: 128 + shuffle: true + val: + dataset_class: Bronze2ExcelDataset + data_dir: /data/bronze_ID_and_OOD/bronze2NotLine/image_not_line + imglist_pth: /data/bronze_ID_and_OOD/bronze2NotLine/not_line_ding_gui_train_val_test/ding_gui_not_line_val.xlsx + xml_path: /data/bronze_ID_and_OOD/bronze2NotLine/xmls + batch_size: 128 + shuffle: false + test: + dataset_class: Bronze2ExcelDataset + data_dir: /data/bronze_ID_and_OOD/bronze2NotLine/image_not_line + imglist_pth: /data/bronze_ID_and_OOD/bronze2NotLine/not_line_ding_gui_train_val_test/ding_gui_not_line_test.xlsx + xml_path: /data/bronze_ID_and_OOD/bronze2NotLine/xmls + batch_size: 128 + shuffle: false + feat_root: ./results/bronze2_OursBronze2_feat_extract_opengan_default/s0 + dictitems: + name: bronze2 + num_classes: 11 + pre_size: 420 + image_size: 400 + interpolation: bilinear + normalization_type: imagenet + num_workers: 8 + num_gpus: 1 + num_machines: 1 + split_names: *id001 + train: !!python/object/new:openood.utils.config.Config + state: + dataset_class: Bronze2ExcelDataset + data_dir: /data/bronze_ID_and_OOD/bronze2NotLine/image_not_line + imglist_pth: /data/bronze_ID_and_OOD/bronze2NotLine/not_line_ding_gui_train_val_test/ding_gui_not_line_train.xlsx + xml_path: /data/bronze_ID_and_OOD/bronze2NotLine/xmls + batch_size: 128 + shuffle: true + dictitems: + dataset_class: Bronze2ExcelDataset + data_dir: /data/bronze_ID_and_OOD/bronze2NotLine/image_not_line + imglist_pth: /data/bronze_ID_and_OOD/bronze2NotLine/not_line_ding_gui_train_val_test/ding_gui_not_line_train.xlsx + xml_path: /data/bronze_ID_and_OOD/bronze2NotLine/xmls + batch_size: 128 + shuffle: true + val: !!python/object/new:openood.utils.config.Config + state: + dataset_class: Bronze2ExcelDataset + data_dir: /data/bronze_ID_and_OOD/bronze2NotLine/image_not_line + imglist_pth: /data/bronze_ID_and_OOD/bronze2NotLine/not_line_ding_gui_train_val_test/ding_gui_not_line_val.xlsx + xml_path: /data/bronze_ID_and_OOD/bronze2NotLine/xmls + batch_size: 128 + shuffle: false + dictitems: + dataset_class: Bronze2ExcelDataset + data_dir: /data/bronze_ID_and_OOD/bronze2NotLine/image_not_line + imglist_pth: /data/bronze_ID_and_OOD/bronze2NotLine/not_line_ding_gui_train_val_test/ding_gui_not_line_val.xlsx + xml_path: /data/bronze_ID_and_OOD/bronze2NotLine/xmls + batch_size: 128 + shuffle: false + test: !!python/object/new:openood.utils.config.Config + state: + dataset_class: Bronze2ExcelDataset + data_dir: /data/bronze_ID_and_OOD/bronze2NotLine/image_not_line + imglist_pth: /data/bronze_ID_and_OOD/bronze2NotLine/not_line_ding_gui_train_val_test/ding_gui_not_line_test.xlsx + xml_path: /data/bronze_ID_and_OOD/bronze2NotLine/xmls + batch_size: 128 + shuffle: false + dictitems: + dataset_class: Bronze2ExcelDataset + data_dir: /data/bronze_ID_and_OOD/bronze2NotLine/image_not_line + imglist_pth: /data/bronze_ID_and_OOD/bronze2NotLine/not_line_ding_gui_train_val_test/ding_gui_not_line_test.xlsx + xml_path: /data/bronze_ID_and_OOD/bronze2NotLine/xmls + batch_size: 128 + shuffle: false + feat_root: ./results/bronze2_OursBronze2_feat_extract_opengan_default/s0 + network: !!python/object/new:openood.utils.config.Config + state: + name: opengan + num_classes: 11 + image_size: 400 + pretrained: false + checkpoint: none + num_gpus: 1 + nc: 512 + nz: 100 + ngf: 64 + ndf: 64 + backbone: + name: resnet50 + num_classes: 11 + image_size: 400 + pretrained: true + checkpoint: ./results/pretrained_weights/resnet50_imagenet1k_v1.pth + num_gpus: 1 + dictitems: + name: opengan + num_classes: 11 + image_size: 400 + pretrained: false + checkpoint: none + num_gpus: 1 + nc: 512 + nz: 100 + ngf: 64 + ndf: 64 + backbone: !!python/object/new:openood.utils.config.Config + state: + name: resnet50 + num_classes: 11 + image_size: 400 + pretrained: true + checkpoint: ./results/pretrained_weights/resnet50_imagenet1k_v1.pth + num_gpus: 1 + dictitems: + name: resnet50 + num_classes: 11 + image_size: 400 + pretrained: true + checkpoint: ./results/pretrained_weights/resnet50_imagenet1k_v1.pth + num_gpus: 1 + exp_name: bronze2_opengan_opengan_e90_lr0.0001_default/s0 + output_dir: /home/zhourixin/OOD_Folder/CODE/other_methods/openOOD_code/OpenOOD/results/bronze2_opengan_opengan_e90_lr0.0001_default/s0 + save_output: true + merge_option: default + mark: default + seed: 0 + num_gpus: 1 + num_workers: 8 + num_machines: 1 + machine_rank: 0 + pipeline: !!python/object/new:openood.utils.config.Config + state: + name: train_opengan + dictitems: + name: train_opengan + trainer: !!python/object/new:openood.utils.config.Config + state: + name: opengan + dictitems: + name: opengan + evaluator: !!python/object/new:openood.utils.config.Config + state: + name: ood + dictitems: + name: ood + optimizer: !!python/object/new:openood.utils.config.Config + state: + name: Adam + num_epochs: 90 + lr: 0.0001 + beta1: 0.5 + dictitems: + name: Adam + num_epochs: 90 + lr: 0.0001 + beta1: 0.5 + recorder: !!python/object/new:openood.utils.config.Config + state: + name: opengan + save_all_models: false + dictitems: + name: opengan + save_all_models: false + preprocessor: !!python/object/new:openood.utils.config.Config + state: + name: base + dictitems: + name: base + postprocessor: !!python/object/new:openood.utils.config.Config + state: + name: opengan + APS_mode: false + dictitems: + name: opengan + APS_mode: false diff --git a/OpenOOD/results/bronze2_opengan_opengan_e90_lr0.0001_default/s0/log.txt b/OpenOOD/results/bronze2_opengan_opengan_e90_lr0.0001_default/s0/log.txt new file mode 100644 index 0000000000000000000000000000000000000000..6ab2b293f22b993e3f7512a4ee4e6064ae78dc42 --- /dev/null +++ b/OpenOOD/results/bronze2_opengan_opengan_e90_lr0.0001_default/s0/log.txt @@ -0,0 +1,188 @@ +Loaded feature size: torch.Size([2273, 512, 1, 1]) +Loaded feature size: torch.Size([571, 512, 1, 1]) +Loaded feature size: torch.Size([1763, 512, 1, 1]) +Model Loading resnet50 Completed! +Random Seed: 999 +Start training... +Epoch [001/090] | Time 6s | Loss_G: 0.7282 | Loss_D: 1.3538 | Val AUROC: 71.47 + +Epoch [002/090] | Time 8s | Loss_G: 1.0511 | Loss_D: 0.4525 | Val AUROC: 89.41 + +Epoch [003/090] | Time 10s | Loss_G: 1.5178 | Loss_D: 0.2670 | Val AUROC: 88.22 + +Epoch [004/090] | Time 12s | Loss_G: 2.3561 | Loss_D: 0.1244 | Val AUROC: 86.68 + +Epoch [005/090] | Time 14s | Loss_G: 3.1624 | Loss_D: 0.0612 | Val AUROC: 88.26 + +Epoch [006/090] | Time 15s | Loss_G: 3.7658 | Loss_D: 0.0321 | Val AUROC: 85.74 + +Epoch [007/090] | Time 17s | Loss_G: 4.0033 | Loss_D: 0.0228 | Val AUROC: 85.87 + +Epoch [008/090] | Time 19s | Loss_G: 5.6282 | Loss_D: 0.0134 | Val AUROC: 85.05 + +Epoch [009/090] | Time 21s | Loss_G: 6.6962 | Loss_D: 0.0094 | Val AUROC: 88.68 + +Epoch [010/090] | Time 23s | Loss_G: 6.1475 | Loss_D: 0.0086 | Val AUROC: 89.59 + +Epoch [011/090] | Time 25s | Loss_G: 8.3407 | Loss_D: 0.0663 | Val AUROC: 92.29 + +Epoch [012/090] | Time 26s | Loss_G: 7.1722 | Loss_D: 0.0363 | Val AUROC: 71.72 + +Epoch [013/090] | Time 28s | Loss_G: 7.6437 | Loss_D: 0.0114 | Val AUROC: 90.94 + +Epoch [014/090] | Time 30s | Loss_G: 6.0804 | Loss_D: 0.0120 | Val AUROC: 86.90 + +Epoch [015/090] | Time 32s | Loss_G: 10.9070 | Loss_D: 0.0127 | Val AUROC: 86.18 + +Epoch [016/090] | Time 33s | Loss_G: 7.4531 | Loss_D: 0.0389 | Val AUROC: 87.11 + +Epoch [017/090] | Time 35s | Loss_G: 4.9278 | Loss_D: 0.2941 | Val AUROC: 87.61 + +Epoch [018/090] | Time 37s | Loss_G: 4.8428 | Loss_D: 0.4727 | Val AUROC: 87.12 + +Epoch [019/090] | Time 39s | Loss_G: 2.3572 | Loss_D: 0.5454 | Val AUROC: 84.66 + +Epoch [020/090] | Time 41s | Loss_G: 3.3959 | Loss_D: 0.4151 | Val AUROC: 82.49 + +Epoch [021/090] | Time 42s | Loss_G: 3.9449 | Loss_D: 0.3556 | Val AUROC: 80.81 + +Epoch [022/090] | Time 44s | Loss_G: 3.6945 | Loss_D: 0.2338 | Val AUROC: 78.99 + +Epoch [023/090] | Time 46s | Loss_G: 2.2174 | Loss_D: 0.3393 | Val AUROC: 80.34 + +Epoch [024/090] | Time 48s | Loss_G: 2.5923 | Loss_D: 0.3211 | Val AUROC: 80.84 + +Epoch [025/090] | Time 50s | Loss_G: 3.9246 | Loss_D: 0.2152 | Val AUROC: 80.71 + +Epoch [026/090] | Time 51s | Loss_G: 5.1947 | Loss_D: 0.3392 | Val AUROC: 80.47 + +Epoch [027/090] | Time 53s | Loss_G: 3.3834 | Loss_D: 0.2533 | Val AUROC: 80.02 + +Epoch [028/090] | Time 55s | Loss_G: 2.2813 | Loss_D: 0.3239 | Val AUROC: 78.83 + +Epoch [029/090] | Time 57s | Loss_G: 2.2574 | Loss_D: 0.3699 | Val AUROC: 82.47 + +Epoch [030/090] | Time 59s | Loss_G: 4.5995 | Loss_D: 0.2505 | Val AUROC: 80.28 + +Epoch [031/090] | Time 61s | Loss_G: 3.2052 | Loss_D: 0.2266 | Val AUROC: 80.42 + +Epoch [032/090] | Time 62s | Loss_G: 6.3002 | Loss_D: 0.2013 | Val AUROC: 80.17 + +Epoch [033/090] | Time 64s | Loss_G: 1.6697 | Loss_D: 0.1610 | Val AUROC: 79.90 + +Epoch [034/090] | Time 66s | Loss_G: 3.8676 | Loss_D: 0.0733 | Val AUROC: 79.92 + +Epoch [035/090] | Time 67s | Loss_G: 4.7959 | Loss_D: 0.2381 | Val AUROC: 79.24 + +Epoch [036/090] | Time 69s | Loss_G: 5.1420 | Loss_D: 0.1722 | Val AUROC: 79.11 + +Epoch [037/090] | Time 71s | Loss_G: 3.3933 | Loss_D: 0.9018 | Val AUROC: 79.45 + +Epoch [038/090] | Time 73s | Loss_G: 4.8425 | Loss_D: 0.3684 | Val AUROC: 77.39 + +Epoch [039/090] | Time 74s | Loss_G: 3.4949 | Loss_D: 0.2009 | Val AUROC: 80.20 + +Epoch [040/090] | Time 76s | Loss_G: 5.3049 | Loss_D: 0.4047 | Val AUROC: 76.59 + +Epoch [041/090] | Time 78s | Loss_G: 4.5496 | Loss_D: 0.1521 | Val AUROC: 73.18 + +Epoch [042/090] | Time 80s | Loss_G: 3.4166 | Loss_D: 0.2172 | Val AUROC: 81.51 + +Epoch [043/090] | Time 81s | Loss_G: 2.5058 | Loss_D: 0.4549 | Val AUROC: 76.61 + +Epoch [044/090] | Time 83s | Loss_G: 4.3001 | Loss_D: 0.3740 | Val AUROC: 79.09 + +Epoch [045/090] | Time 85s | Loss_G: 3.3792 | Loss_D: 0.3470 | Val AUROC: 76.87 + +Epoch [046/090] | Time 87s | Loss_G: 5.3554 | Loss_D: 0.1864 | Val AUROC: 74.75 + +Epoch [047/090] | Time 88s | Loss_G: 7.2390 | Loss_D: 0.4205 | Val AUROC: 74.59 + +Epoch [048/090] | Time 90s | Loss_G: 7.8204 | Loss_D: 0.4623 | Val AUROC: 78.99 + +Epoch [049/090] | Time 92s | Loss_G: 4.2358 | Loss_D: 0.2095 | Val AUROC: 75.79 + +Epoch [050/090] | Time 94s | Loss_G: 6.4633 | Loss_D: 0.3409 | Val AUROC: 67.03 + +Epoch [051/090] | Time 95s | Loss_G: 4.3612 | Loss_D: 0.1870 | Val AUROC: 83.25 + +Epoch [052/090] | Time 97s | Loss_G: 4.0391 | Loss_D: 0.1380 | Val AUROC: 79.26 + +Epoch [053/090] | Time 99s | Loss_G: 2.1782 | Loss_D: 0.2517 | Val AUROC: 75.96 + +Epoch [054/090] | Time 101s | Loss_G: 2.7877 | Loss_D: 0.4837 | Val AUROC: 77.09 + +Epoch [055/090] | Time 102s | Loss_G: 2.8852 | Loss_D: 0.2837 | Val AUROC: 78.22 + +Epoch [056/090] | Time 104s | Loss_G: 3.6794 | Loss_D: 0.1560 | Val AUROC: 78.02 + +Epoch [057/090] | Time 106s | Loss_G: 5.1773 | Loss_D: 0.3692 | Val AUROC: 76.72 + +Epoch [058/090] | Time 108s | Loss_G: 5.7377 | Loss_D: 0.2169 | Val AUROC: 76.59 + +Epoch [059/090] | Time 109s | Loss_G: 3.5491 | Loss_D: 0.2842 | Val AUROC: 77.48 + +Epoch [060/090] | Time 111s | Loss_G: 5.3507 | Loss_D: 0.1919 | Val AUROC: 71.10 + +Epoch [061/090] | Time 113s | Loss_G: 3.1684 | Loss_D: 0.2539 | Val AUROC: 80.44 + +Epoch [062/090] | Time 115s | Loss_G: 5.4074 | Loss_D: 0.8738 | Val AUROC: 73.14 + +Epoch [063/090] | Time 117s | Loss_G: 2.4067 | Loss_D: 0.6037 | Val AUROC: 78.92 + +Epoch [064/090] | Time 118s | Loss_G: 3.3879 | Loss_D: 0.2313 | Val AUROC: 75.97 + +Epoch [065/090] | Time 120s | Loss_G: 3.4366 | Loss_D: 0.3502 | Val AUROC: 75.96 + +Epoch [066/090] | Time 122s | Loss_G: 3.9607 | Loss_D: 0.1833 | Val AUROC: 75.55 + +Epoch [067/090] | Time 124s | Loss_G: 3.6740 | Loss_D: 0.1531 | Val AUROC: 69.95 + +Epoch [068/090] | Time 126s | Loss_G: 3.8609 | Loss_D: 0.1678 | Val AUROC: 72.80 + +Epoch [069/090] | Time 128s | Loss_G: 7.9624 | Loss_D: 0.2586 | Val AUROC: 71.05 + +Epoch [070/090] | Time 129s | Loss_G: 5.0265 | Loss_D: 0.2056 | Val AUROC: 67.53 + +Epoch [071/090] | Time 131s | Loss_G: 4.2059 | Loss_D: 0.1759 | Val AUROC: 76.23 + +Epoch [072/090] | Time 133s | Loss_G: 7.2145 | Loss_D: 0.4731 | Val AUROC: 74.51 + +Epoch [073/090] | Time 135s | Loss_G: 11.8886 | Loss_D: 0.7915 | Val AUROC: 68.29 + +Epoch [074/090] | Time 137s | Loss_G: 9.1990 | Loss_D: 1.0014 | Val AUROC: 66.74 + +Epoch [075/090] | Time 139s | Loss_G: 3.0742 | Loss_D: 0.3401 | Val AUROC: 69.91 + +Epoch [076/090] | Time 140s | Loss_G: 3.9252 | Loss_D: 0.2995 | Val AUROC: 79.80 + +Epoch [077/090] | Time 142s | Loss_G: 3.8950 | Loss_D: 0.3188 | Val AUROC: 70.34 + +Epoch [078/090] | Time 144s | Loss_G: 2.9045 | Loss_D: 0.3413 | Val AUROC: 65.57 + +Epoch [079/090] | Time 146s | Loss_G: 3.2079 | Loss_D: 0.2466 | Val AUROC: 68.93 + +Epoch [080/090] | Time 148s | Loss_G: 4.0387 | Loss_D: 0.1131 | Val AUROC: 67.70 + +Epoch [081/090] | Time 149s | Loss_G: 4.5834 | Loss_D: 0.1492 | Val AUROC: 65.94 + +Epoch [082/090] | Time 151s | Loss_G: 4.2651 | Loss_D: 0.1533 | Val AUROC: 67.56 + +Epoch [083/090] | Time 153s | Loss_G: 5.0847 | Loss_D: 0.1060 | Val AUROC: 65.00 + +Epoch [084/090] | Time 155s | Loss_G: 6.3769 | Loss_D: 0.3917 | Val AUROC: 64.11 + +Epoch [085/090] | Time 156s | Loss_G: 4.8690 | Loss_D: 0.1739 | Val AUROC: 71.40 + +Epoch [086/090] | Time 158s | Loss_G: 5.7117 | Loss_D: 0.1763 | Val AUROC: 66.51 + +Epoch [087/090] | Time 160s | Loss_G: 4.8352 | Loss_D: 0.1934 | Val AUROC: 68.76 + +Epoch [088/090] | Time 161s | Loss_G: 4.4518 | Loss_D: 0.1593 | Val AUROC: 65.50 + +Epoch [089/090] | Time 163s | Loss_G: 5.1243 | Loss_D: 0.2094 | Val AUROC: 73.06 + +Epoch [090/090] | Time 165s | Loss_G: 4.1075 | Loss_D: 0.0743 | Val AUROC: 67.35 + +Training Completed! Best val AUROC on netD: 92.288360 at epoch 11 +Completed! diff --git a/OpenOOD/results/cifar100_resnet18_32x32_base_e100_lr0.1_default/s1/best.ckpt b/OpenOOD/results/cifar100_resnet18_32x32_base_e100_lr0.1_default/s1/best.ckpt new file mode 100644 index 0000000000000000000000000000000000000000..bbd6bd32e4f454ebef6411d54c457ba6b31fdc1e --- /dev/null +++ b/OpenOOD/results/cifar100_resnet18_32x32_base_e100_lr0.1_default/s1/best.ckpt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c9c88b3f3c24aa51c500db09bff36661dd6aa317c125f520fbd5792c56c606dd +size 44951833 diff --git a/OpenOOD/results/cifar100_resnet18_32x32_base_e100_lr0.1_default/s1/best_epoch100_acc0.7710.ckpt b/OpenOOD/results/cifar100_resnet18_32x32_base_e100_lr0.1_default/s1/best_epoch100_acc0.7710.ckpt new file mode 100644 index 0000000000000000000000000000000000000000..b458622301af1e992f533ec364a7659cbc9c6369 --- /dev/null +++ b/OpenOOD/results/cifar100_resnet18_32x32_base_e100_lr0.1_default/s1/best_epoch100_acc0.7710.ckpt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:262d455cf3db2ed429330608583f43b21981a78f0c529187d7f987d4dd2ce5fd +size 44960781 diff --git a/OpenOOD/results/cifar100_resnet18_32x32_base_e100_lr0.1_default/s1/config.yml b/OpenOOD/results/cifar100_resnet18_32x32_base_e100_lr0.1_default/s1/config.yml new file mode 100644 index 0000000000000000000000000000000000000000..8f104446a414fc08799167d2592ee7a5ba664a21 --- /dev/null +++ b/OpenOOD/results/cifar100_resnet18_32x32_base_e100_lr0.1_default/s1/config.yml @@ -0,0 +1,211 @@ +!!python/object/new:openood.utils.config.Config +state: + dataset: + name: cifar100 + num_classes: 100 + image_size: 32 + pre_size: 32 + interpolation: bilinear + normalization_type: cifar100 + num_workers: 8 + num_gpus: 1 + num_machines: 1 + split_names: &id001 + - train + - val + - test + train: + dataset_class: ImglistDataset + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/cifar100/train_cifar100.txt + batch_size: 128 + shuffle: true + val: + dataset_class: ImglistDataset + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/cifar100/val_cifar100.txt + batch_size: 200 + shuffle: false + test: + dataset_class: ImglistDataset + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/cifar100/test_cifar100.txt + batch_size: 200 + shuffle: false + network: + name: resnet18_32x32 + num_classes: 100 + pretrained: false + checkpoint: ./results/cifar10_double_label_resnet18_32x32_mos_e100_lr0.003/best.ckpt + num_gpus: 1 + exp_name: cifar100_resnet18_32x32_base_e100_lr0.1_default/s1 + output_dir: ./results/cifar100_resnet18_32x32_base_e100_lr0.1_default/s1 + save_output: true + merge_option: default + mark: default + seed: 1 + num_gpus: 1 + num_workers: 8 + num_machines: 1 + machine_rank: 0 + preprocessor: + name: base + pipeline: + name: train + trainer: + name: base + evaluator: + name: base + optimizer: + name: sgd + num_epochs: 100 + lr: 0.1 + momentum: 0.9 + weight_decay: 0.0005 + recorder: + name: base + save_all_models: false +dictitems: + dataset: !!python/object/new:openood.utils.config.Config + state: + name: cifar100 + num_classes: 100 + image_size: 32 + pre_size: 32 + interpolation: bilinear + normalization_type: cifar100 + num_workers: 8 + num_gpus: 1 + num_machines: 1 + split_names: *id001 + train: + dataset_class: ImglistDataset + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/cifar100/train_cifar100.txt + batch_size: 128 + shuffle: true + val: + dataset_class: ImglistDataset + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/cifar100/val_cifar100.txt + batch_size: 200 + shuffle: false + test: + dataset_class: ImglistDataset + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/cifar100/test_cifar100.txt + batch_size: 200 + shuffle: false + dictitems: + name: cifar100 + num_classes: 100 + image_size: 32 + pre_size: 32 + interpolation: bilinear + normalization_type: cifar100 + num_workers: 8 + num_gpus: 1 + num_machines: 1 + split_names: *id001 + train: !!python/object/new:openood.utils.config.Config + state: + dataset_class: ImglistDataset + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/cifar100/train_cifar100.txt + batch_size: 128 + shuffle: true + dictitems: + dataset_class: ImglistDataset + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/cifar100/train_cifar100.txt + batch_size: 128 + shuffle: true + val: !!python/object/new:openood.utils.config.Config + state: + dataset_class: ImglistDataset + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/cifar100/val_cifar100.txt + batch_size: 200 + shuffle: false + dictitems: + dataset_class: ImglistDataset + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/cifar100/val_cifar100.txt + batch_size: 200 + shuffle: false + test: !!python/object/new:openood.utils.config.Config + state: + dataset_class: ImglistDataset + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/cifar100/test_cifar100.txt + batch_size: 200 + shuffle: false + dictitems: + dataset_class: ImglistDataset + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/cifar100/test_cifar100.txt + batch_size: 200 + shuffle: false + network: !!python/object/new:openood.utils.config.Config + state: + name: resnet18_32x32 + num_classes: 100 + pretrained: false + checkpoint: ./results/cifar10_double_label_resnet18_32x32_mos_e100_lr0.003/best.ckpt + num_gpus: 1 + dictitems: + name: resnet18_32x32 + num_classes: 100 + pretrained: false + checkpoint: ./results/cifar10_double_label_resnet18_32x32_mos_e100_lr0.003/best.ckpt + num_gpus: 1 + exp_name: cifar100_resnet18_32x32_base_e100_lr0.1_default/s1 + output_dir: ./results/cifar100_resnet18_32x32_base_e100_lr0.1_default/s1 + save_output: true + merge_option: default + mark: default + seed: 1 + num_gpus: 1 + num_workers: 8 + num_machines: 1 + machine_rank: 0 + preprocessor: !!python/object/new:openood.utils.config.Config + state: + name: base + dictitems: + name: base + pipeline: !!python/object/new:openood.utils.config.Config + state: + name: train + dictitems: + name: train + trainer: !!python/object/new:openood.utils.config.Config + state: + name: base + dictitems: + name: base + evaluator: !!python/object/new:openood.utils.config.Config + state: + name: base + dictitems: + name: base + optimizer: !!python/object/new:openood.utils.config.Config + state: + name: sgd + num_epochs: 100 + lr: 0.1 + momentum: 0.9 + weight_decay: 0.0005 + dictitems: + name: sgd + num_epochs: 100 + lr: 0.1 + momentum: 0.9 + weight_decay: 0.0005 + recorder: !!python/object/new:openood.utils.config.Config + state: + name: base + save_all_models: false + dictitems: + name: base + save_all_models: false diff --git a/OpenOOD/results/cifar100_resnet18_32x32_base_e100_lr0.1_default/s1/last_epoch100_acc0.7710.ckpt b/OpenOOD/results/cifar100_resnet18_32x32_base_e100_lr0.1_default/s1/last_epoch100_acc0.7710.ckpt new file mode 100644 index 0000000000000000000000000000000000000000..f38ea2f4d58cd608dad8e8cbd7505be77f12c760 --- /dev/null +++ b/OpenOOD/results/cifar100_resnet18_32x32_base_e100_lr0.1_default/s1/last_epoch100_acc0.7710.ckpt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:51e15661a5437a53ab92f2e1f092d96ff5e00c895f743c3db88dddc22452c47e +size 44892160 diff --git a/OpenOOD/results/cifar100_resnet18_32x32_base_e100_lr0.1_default/s1/log.txt b/OpenOOD/results/cifar100_resnet18_32x32_base_e100_lr0.1_default/s1/log.txt new file mode 100644 index 0000000000000000000000000000000000000000..5eab20e6158acbb150cbe6a485badb1c094a8913 --- /dev/null +++ b/OpenOOD/results/cifar100_resnet18_32x32_base_e100_lr0.1_default/s1/log.txt @@ -0,0 +1,207 @@ +Start training... + +Epoch 001 | Time 20s | Train Loss 3.5140 | Val Loss 3.485 | Val Acc 16.20 + +Epoch 002 | Time 38s | Train Loss 3.0562 | Val Loss 2.955 | Val Acc 25.50 + +Epoch 003 | Time 56s | Train Loss 2.3919 | Val Loss 2.371 | Val Acc 38.00 + +Epoch 004 | Time 75s | Train Loss 2.0953 | Val Loss 2.265 | Val Acc 41.80 + +Epoch 005 | Time 94s | Train Loss 1.9038 | Val Loss 1.965 | Val Acc 46.20 + +Epoch 006 | Time 112s | Train Loss 1.7440 | Val Loss 1.817 | Val Acc 50.40 + +Epoch 007 | Time 130s | Train Loss 1.6294 | Val Loss 1.771 | Val Acc 50.30 + +Epoch 008 | Time 148s | Train Loss 1.5901 | Val Loss 1.848 | Val Acc 51.20 + +Epoch 009 | Time 166s | Train Loss 1.4998 | Val Loss 1.774 | Val Acc 54.50 + +Epoch 010 | Time 184s | Train Loss 1.3421 | Val Loss 1.555 | Val Acc 56.10 + +Epoch 011 | Time 202s | Train Loss 1.4618 | Val Loss 1.631 | Val Acc 57.70 + +Epoch 012 | Time 220s | Train Loss 1.3887 | Val Loss 1.763 | Val Acc 54.30 + +Epoch 013 | Time 238s | Train Loss 1.3948 | Val Loss 1.555 | Val Acc 56.70 + +Epoch 014 | Time 256s | Train Loss 1.1937 | Val Loss 1.648 | Val Acc 56.20 + +Epoch 015 | Time 274s | Train Loss 1.2287 | Val Loss 1.538 | Val Acc 58.00 + +Epoch 016 | Time 292s | Train Loss 1.3518 | Val Loss 1.623 | Val Acc 56.60 + +Epoch 017 | Time 310s | Train Loss 1.2904 | Val Loss 1.473 | Val Acc 60.40 + +Epoch 018 | Time 328s | Train Loss 1.1429 | Val Loss 1.729 | Val Acc 54.90 + +Epoch 019 | Time 346s | Train Loss 1.1246 | Val Loss 1.450 | Val Acc 60.50 + +Epoch 020 | Time 364s | Train Loss 1.1826 | Val Loss 1.526 | Val Acc 60.10 + +Epoch 021 | Time 381s | Train Loss 1.1999 | Val Loss 1.601 | Val Acc 58.30 + +Epoch 022 | Time 399s | Train Loss 1.1689 | Val Loss 1.489 | Val Acc 59.60 + +Epoch 023 | Time 417s | Train Loss 1.0689 | Val Loss 1.500 | Val Acc 60.70 + +Epoch 024 | Time 435s | Train Loss 1.0879 | Val Loss 1.396 | Val Acc 61.30 + +Epoch 025 | Time 453s | Train Loss 1.0990 | Val Loss 1.537 | Val Acc 59.10 + +Epoch 026 | Time 471s | Train Loss 1.0758 | Val Loss 1.462 | Val Acc 60.90 + +Epoch 027 | Time 489s | Train Loss 0.9911 | Val Loss 1.442 | Val Acc 58.70 + +Epoch 028 | Time 507s | Train Loss 0.9960 | Val Loss 1.493 | Val Acc 60.00 + +Epoch 029 | Time 525s | Train Loss 1.0261 | Val Loss 1.343 | Val Acc 64.70 + +Epoch 030 | Time 543s | Train Loss 0.9885 | Val Loss 1.424 | Val Acc 63.90 + +Epoch 031 | Time 561s | Train Loss 0.9917 | Val Loss 1.573 | Val Acc 59.40 + +Epoch 032 | Time 578s | Train Loss 0.9264 | Val Loss 1.377 | Val Acc 62.50 + +Epoch 033 | Time 596s | Train Loss 1.0410 | Val Loss 1.551 | Val Acc 61.10 + +Epoch 034 | Time 614s | Train Loss 0.9814 | Val Loss 1.448 | Val Acc 60.60 + +Epoch 035 | Time 632s | Train Loss 0.9046 | Val Loss 1.490 | Val Acc 59.30 + +Epoch 036 | Time 650s | Train Loss 0.8824 | Val Loss 1.472 | Val Acc 63.00 + +Epoch 037 | Time 668s | Train Loss 0.7712 | Val Loss 1.413 | Val Acc 63.10 + +Epoch 038 | Time 686s | Train Loss 0.8543 | Val Loss 1.279 | Val Acc 65.50 + +Epoch 039 | Time 704s | Train Loss 0.8835 | Val Loss 1.431 | Val Acc 64.40 + +Epoch 040 | Time 722s | Train Loss 0.8521 | Val Loss 1.335 | Val Acc 66.30 + +Epoch 041 | Time 740s | Train Loss 0.7559 | Val Loss 1.477 | Val Acc 61.80 + +Epoch 042 | Time 758s | Train Loss 0.8050 | Val Loss 1.421 | Val Acc 62.80 + +Epoch 043 | Time 776s | Train Loss 0.7814 | Val Loss 1.510 | Val Acc 61.30 + +Epoch 044 | Time 794s | Train Loss 0.7644 | Val Loss 1.348 | Val Acc 64.60 + +Epoch 045 | Time 812s | Train Loss 0.7142 | Val Loss 1.374 | Val Acc 63.20 + +Epoch 046 | Time 831s | Train Loss 0.7160 | Val Loss 1.300 | Val Acc 66.00 + +Epoch 047 | Time 849s | Train Loss 0.7354 | Val Loss 1.409 | Val Acc 63.70 + +Epoch 048 | Time 867s | Train Loss 0.6409 | Val Loss 1.358 | Val Acc 64.70 + +Epoch 049 | Time 885s | Train Loss 0.5696 | Val Loss 1.349 | Val Acc 64.80 + +Epoch 050 | Time 903s | Train Loss 0.6403 | Val Loss 1.439 | Val Acc 61.30 + +Epoch 051 | Time 922s | Train Loss 0.5578 | Val Loss 1.365 | Val Acc 67.60 + +Epoch 052 | Time 943s | Train Loss 0.6280 | Val Loss 1.392 | Val Acc 66.40 + +Epoch 053 | Time 962s | Train Loss 0.5914 | Val Loss 1.375 | Val Acc 66.30 + +Epoch 054 | Time 983s | Train Loss 0.5869 | Val Loss 1.404 | Val Acc 66.10 + +Epoch 055 | Time 1002s | Train Loss 0.5931 | Val Loss 1.287 | Val Acc 67.20 + +Epoch 056 | Time 1023s | Train Loss 0.5942 | Val Loss 1.308 | Val Acc 69.00 + +Epoch 057 | Time 1044s | Train Loss 0.4663 | Val Loss 1.272 | Val Acc 69.00 + +Epoch 058 | Time 1064s | Train Loss 0.4086 | Val Loss 1.261 | Val Acc 68.90 + +Epoch 059 | Time 1085s | Train Loss 0.4628 | Val Loss 1.339 | Val Acc 67.50 + +Epoch 060 | Time 1105s | Train Loss 0.4601 | Val Loss 1.194 | Val Acc 70.50 + +Epoch 061 | Time 1126s | Train Loss 0.4049 | Val Loss 1.235 | Val Acc 69.10 + +Epoch 062 | Time 1146s | Train Loss 0.3628 | Val Loss 1.308 | Val Acc 70.30 + +Epoch 063 | Time 1167s | Train Loss 0.3093 | Val Loss 1.264 | Val Acc 68.10 + +Epoch 064 | Time 1188s | Train Loss 0.2632 | Val Loss 1.343 | Val Acc 67.70 + +Epoch 065 | Time 1207s | Train Loss 0.2745 | Val Loss 1.233 | Val Acc 68.20 + +Epoch 066 | Time 1229s | Train Loss 0.2934 | Val Loss 1.243 | Val Acc 71.20 + +Epoch 067 | Time 1250s | Train Loss 0.2616 | Val Loss 1.180 | Val Acc 72.40 + +Epoch 068 | Time 1270s | Train Loss 0.1713 | Val Loss 1.226 | Val Acc 70.90 + +Epoch 069 | Time 1291s | Train Loss 0.1922 | Val Loss 1.161 | Val Acc 72.90 + +Epoch 070 | Time 1312s | Train Loss 0.1354 | Val Loss 1.174 | Val Acc 72.10 + +Epoch 071 | Time 1333s | Train Loss 0.1133 | Val Loss 1.109 | Val Acc 73.00 + +Epoch 072 | Time 1354s | Train Loss 0.1134 | Val Loss 1.143 | Val Acc 73.40 + +Epoch 073 | Time 1375s | Train Loss 0.0772 | Val Loss 1.122 | Val Acc 73.70 + +Epoch 074 | Time 1397s | Train Loss 0.0698 | Val Loss 1.046 | Val Acc 75.00 + +Epoch 075 | Time 1418s | Train Loss 0.0457 | Val Loss 1.037 | Val Acc 74.60 + +Epoch 076 | Time 1439s | Train Loss 0.0401 | Val Loss 1.032 | Val Acc 75.10 + +Epoch 077 | Time 1460s | Train Loss 0.0412 | Val Loss 1.024 | Val Acc 74.90 + +Epoch 078 | Time 1482s | Train Loss 0.0349 | Val Loss 1.009 | Val Acc 75.50 + +Epoch 079 | Time 1503s | Train Loss 0.0244 | Val Loss 0.980 | Val Acc 75.10 + +Epoch 080 | Time 1524s | Train Loss 0.0269 | Val Loss 0.995 | Val Acc 75.70 + +Epoch 081 | Time 1546s | Train Loss 0.0164 | Val Loss 0.941 | Val Acc 75.90 + +Epoch 082 | Time 1566s | Train Loss 0.0187 | Val Loss 0.958 | Val Acc 75.10 + +Epoch 083 | Time 1588s | Train Loss 0.0175 | Val Loss 0.940 | Val Acc 76.00 + +Epoch 084 | Time 1610s | Train Loss 0.0156 | Val Loss 0.941 | Val Acc 76.40 + +Epoch 085 | Time 1630s | Train Loss 0.0251 | Val Loss 0.928 | Val Acc 76.20 + +Epoch 086 | Time 1651s | Train Loss 0.0136 | Val Loss 0.926 | Val Acc 76.00 + +Epoch 087 | Time 1672s | Train Loss 0.0178 | Val Loss 0.911 | Val Acc 76.30 + +Epoch 088 | Time 1694s | Train Loss 0.0120 | Val Loss 0.922 | Val Acc 76.40 + +Epoch 089 | Time 1715s | Train Loss 0.0127 | Val Loss 0.923 | Val Acc 76.30 + +Epoch 090 | Time 1736s | Train Loss 0.0137 | Val Loss 0.924 | Val Acc 76.20 + +Epoch 091 | Time 1757s | Train Loss 0.0120 | Val Loss 0.921 | Val Acc 76.00 + +Epoch 092 | Time 1778s | Train Loss 0.0120 | Val Loss 0.920 | Val Acc 76.60 + +Epoch 093 | Time 1800s | Train Loss 0.0110 | Val Loss 0.907 | Val Acc 76.90 + +Epoch 094 | Time 1821s | Train Loss 0.0117 | Val Loss 0.916 | Val Acc 76.30 + +Epoch 095 | Time 1841s | Train Loss 0.0102 | Val Loss 0.913 | Val Acc 76.70 + +Epoch 096 | Time 1863s | Train Loss 0.0102 | Val Loss 0.911 | Val Acc 76.60 + +Epoch 097 | Time 1883s | Train Loss 0.0124 | Val Loss 0.915 | Val Acc 76.80 + +Epoch 098 | Time 1905s | Train Loss 0.0095 | Val Loss 0.912 | Val Acc 76.70 + +Epoch 099 | Time 1926s | Train Loss 0.0110 | Val Loss 0.911 | Val Acc 77.00 + +Epoch 100 | Time 1948s | Train Loss 0.0098 | Val Loss 0.914 | Val Acc 77.10 +Training Completed! Best accuracy: 77.10 at epoch 100 +────────────────────────────────────────────────────────────────────── +Start testing... + +Complete Evaluation, Last accuracy 77.39 +Completed! diff --git a/OpenOOD/results/cifar100_resnet18_32x32_base_e100_lr0.1_default/s2/best.ckpt b/OpenOOD/results/cifar100_resnet18_32x32_base_e100_lr0.1_default/s2/best.ckpt new file mode 100644 index 0000000000000000000000000000000000000000..7840920912416e2f8d179691fd342f061b938c84 --- /dev/null +++ b/OpenOOD/results/cifar100_resnet18_32x32_base_e100_lr0.1_default/s2/best.ckpt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9285b3226b4d3e358800151a58e9e9fd250c74384d552193b54cb607f01ffc98 +size 44951833 diff --git a/OpenOOD/results/cifar100_resnet18_32x32_base_e100_lr0.1_default/s2/best_epoch90_acc0.7760.ckpt b/OpenOOD/results/cifar100_resnet18_32x32_base_e100_lr0.1_default/s2/best_epoch90_acc0.7760.ckpt new file mode 100644 index 0000000000000000000000000000000000000000..3ed790e3da9c5d54d80275faf84b48b67e1416b3 --- /dev/null +++ b/OpenOOD/results/cifar100_resnet18_32x32_base_e100_lr0.1_default/s2/best_epoch90_acc0.7760.ckpt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5bde97f1c14c228b82fa86417f855b124b55244a41d1a4cc10ae87582b7c4b9f +size 44960657 diff --git a/OpenOOD/results/cifar100_resnet18_32x32_base_e100_lr0.1_default/s2/config.yml b/OpenOOD/results/cifar100_resnet18_32x32_base_e100_lr0.1_default/s2/config.yml new file mode 100644 index 0000000000000000000000000000000000000000..5c4764a4c117508ef3ab28ea07576a7197b4bce6 --- /dev/null +++ b/OpenOOD/results/cifar100_resnet18_32x32_base_e100_lr0.1_default/s2/config.yml @@ -0,0 +1,211 @@ +!!python/object/new:openood.utils.config.Config +state: + dataset: + name: cifar100 + num_classes: 100 + image_size: 32 + pre_size: 32 + interpolation: bilinear + normalization_type: cifar100 + num_workers: 8 + num_gpus: 1 + num_machines: 1 + split_names: &id001 + - train + - val + - test + train: + dataset_class: ImglistDataset + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/cifar100/train_cifar100.txt + batch_size: 128 + shuffle: true + val: + dataset_class: ImglistDataset + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/cifar100/val_cifar100.txt + batch_size: 200 + shuffle: false + test: + dataset_class: ImglistDataset + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/cifar100/test_cifar100.txt + batch_size: 200 + shuffle: false + network: + name: resnet18_32x32 + num_classes: 100 + pretrained: false + checkpoint: ./results/cifar10_double_label_resnet18_32x32_mos_e100_lr0.003/best.ckpt + num_gpus: 1 + exp_name: cifar100_resnet18_32x32_base_e100_lr0.1_default/s2 + output_dir: ./results/cifar100_resnet18_32x32_base_e100_lr0.1_default/s2 + save_output: true + merge_option: default + mark: default + seed: 2 + num_gpus: 1 + num_workers: 8 + num_machines: 1 + machine_rank: 0 + preprocessor: + name: base + pipeline: + name: train + trainer: + name: base + evaluator: + name: base + optimizer: + name: sgd + num_epochs: 100 + lr: 0.1 + momentum: 0.9 + weight_decay: 0.0005 + recorder: + name: base + save_all_models: false +dictitems: + dataset: !!python/object/new:openood.utils.config.Config + state: + name: cifar100 + num_classes: 100 + image_size: 32 + pre_size: 32 + interpolation: bilinear + normalization_type: cifar100 + num_workers: 8 + num_gpus: 1 + num_machines: 1 + split_names: *id001 + train: + dataset_class: ImglistDataset + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/cifar100/train_cifar100.txt + batch_size: 128 + shuffle: true + val: + dataset_class: ImglistDataset + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/cifar100/val_cifar100.txt + batch_size: 200 + shuffle: false + test: + dataset_class: ImglistDataset + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/cifar100/test_cifar100.txt + batch_size: 200 + shuffle: false + dictitems: + name: cifar100 + num_classes: 100 + image_size: 32 + pre_size: 32 + interpolation: bilinear + normalization_type: cifar100 + num_workers: 8 + num_gpus: 1 + num_machines: 1 + split_names: *id001 + train: !!python/object/new:openood.utils.config.Config + state: + dataset_class: ImglistDataset + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/cifar100/train_cifar100.txt + batch_size: 128 + shuffle: true + dictitems: + dataset_class: ImglistDataset + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/cifar100/train_cifar100.txt + batch_size: 128 + shuffle: true + val: !!python/object/new:openood.utils.config.Config + state: + dataset_class: ImglistDataset + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/cifar100/val_cifar100.txt + batch_size: 200 + shuffle: false + dictitems: + dataset_class: ImglistDataset + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/cifar100/val_cifar100.txt + batch_size: 200 + shuffle: false + test: !!python/object/new:openood.utils.config.Config + state: + dataset_class: ImglistDataset + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/cifar100/test_cifar100.txt + batch_size: 200 + shuffle: false + dictitems: + dataset_class: ImglistDataset + data_dir: ./data/images_classic/ + imglist_pth: ./data/benchmark_imglist/cifar100/test_cifar100.txt + batch_size: 200 + shuffle: false + network: !!python/object/new:openood.utils.config.Config + state: + name: resnet18_32x32 + num_classes: 100 + pretrained: false + checkpoint: ./results/cifar10_double_label_resnet18_32x32_mos_e100_lr0.003/best.ckpt + num_gpus: 1 + dictitems: + name: resnet18_32x32 + num_classes: 100 + pretrained: false + checkpoint: ./results/cifar10_double_label_resnet18_32x32_mos_e100_lr0.003/best.ckpt + num_gpus: 1 + exp_name: cifar100_resnet18_32x32_base_e100_lr0.1_default/s2 + output_dir: ./results/cifar100_resnet18_32x32_base_e100_lr0.1_default/s2 + save_output: true + merge_option: default + mark: default + seed: 2 + num_gpus: 1 + num_workers: 8 + num_machines: 1 + machine_rank: 0 + preprocessor: !!python/object/new:openood.utils.config.Config + state: + name: base + dictitems: + name: base + pipeline: !!python/object/new:openood.utils.config.Config + state: + name: train + dictitems: + name: train + trainer: !!python/object/new:openood.utils.config.Config + state: + name: base + dictitems: + name: base + evaluator: !!python/object/new:openood.utils.config.Config + state: + name: base + dictitems: + name: base + optimizer: !!python/object/new:openood.utils.config.Config + state: + name: sgd + num_epochs: 100 + lr: 0.1 + momentum: 0.9 + weight_decay: 0.0005 + dictitems: + name: sgd + num_epochs: 100 + lr: 0.1 + momentum: 0.9 + weight_decay: 0.0005 + recorder: !!python/object/new:openood.utils.config.Config + state: + name: base + save_all_models: false + dictitems: + name: base + save_all_models: false diff --git a/OpenOOD/results/cifar100_resnet18_32x32_base_e100_lr0.1_default/s2/last_epoch100_acc0.7730.ckpt b/OpenOOD/results/cifar100_resnet18_32x32_base_e100_lr0.1_default/s2/last_epoch100_acc0.7730.ckpt new file mode 100644 index 0000000000000000000000000000000000000000..e6c19f4e6476e8cec9afd17f5e0d1207cb61f1f0 --- /dev/null +++ b/OpenOOD/results/cifar100_resnet18_32x32_base_e100_lr0.1_default/s2/last_epoch100_acc0.7730.ckpt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e5d94321d4d422d93dcd9328096660277cf4e64b47305788d71fe61826922fae +size 44960781 diff --git a/OpenOOD/results/cifar100_resnet18_32x32_base_e100_lr0.1_default/s2/log.txt b/OpenOOD/results/cifar100_resnet18_32x32_base_e100_lr0.1_default/s2/log.txt new file mode 100644 index 0000000000000000000000000000000000000000..dfe702b5ded2f4499adef8dbad1e05dd143f4548 --- /dev/null +++ b/OpenOOD/results/cifar100_resnet18_32x32_base_e100_lr0.1_default/s2/log.txt @@ -0,0 +1,207 @@ +Start training... + +Epoch 001 | Time 20s | Train Loss 3.5462 | Val Loss 3.487 | Val Acc 16.10 + +Epoch 002 | Time 38s | Train Loss 2.9263 | Val Loss 2.917 | Val Acc 29.50 + +Epoch 003 | Time 56s | Train Loss 2.5085 | Val Loss 2.393 | Val Acc 37.00 + +Epoch 004 | Time 74s | Train Loss 2.1973 | Val Loss 2.164 | Val Acc 41.80 + +Epoch 005 | Time 92s | Train Loss 1.9716 | Val Loss 2.123 | Val Acc 45.30 + +Epoch 006 | Time 111s | Train Loss 1.7725 | Val Loss 1.795 | Val Acc 51.10 + +Epoch 007 | Time 129s | Train Loss 1.6572 | Val Loss 1.690 | Val Acc 54.80 + +Epoch 008 | Time 148s | Train Loss 1.5983 | Val Loss 2.006 | Val Acc 46.90 + +Epoch 009 | Time 166s | Train Loss 1.4254 | Val Loss 1.800 | Val Acc 53.70 + +Epoch 010 | Time 184s | Train Loss 1.4317 | Val Loss 1.758 | Val Acc 53.40 + +Epoch 011 | Time 203s | Train Loss 1.4236 | Val Loss 1.539 | Val Acc 58.80 + +Epoch 012 | Time 221s | Train Loss 1.3600 | Val Loss 1.666 | Val Acc 56.90 + +Epoch 013 | Time 239s | Train Loss 1.3411 | Val Loss 1.545 | Val Acc 59.00 + +Epoch 014 | Time 258s | Train Loss 1.3232 | Val Loss 1.477 | Val Acc 59.80 + +Epoch 015 | Time 276s | Train Loss 1.2384 | Val Loss 1.661 | Val Acc 56.10 + +Epoch 016 | Time 295s | Train Loss 1.2892 | Val Loss 1.620 | Val Acc 56.20 + +Epoch 017 | Time 313s | Train Loss 1.1384 | Val Loss 1.563 | Val Acc 58.60 + +Epoch 018 | Time 331s | Train Loss 1.2201 | Val Loss 1.552 | Val Acc 59.40 + +Epoch 019 | Time 350s | Train Loss 1.1826 | Val Loss 1.619 | Val Acc 58.70 + +Epoch 020 | Time 368s | Train Loss 1.1592 | Val Loss 1.699 | Val Acc 56.60 + +Epoch 021 | Time 386s | Train Loss 1.2195 | Val Loss 1.508 | Val Acc 58.60 + +Epoch 022 | Time 405s | Train Loss 1.0760 | Val Loss 1.396 | Val Acc 62.40 + +Epoch 023 | Time 423s | Train Loss 1.0751 | Val Loss 1.521 | Val Acc 60.20 + +Epoch 024 | Time 441s | Train Loss 1.1389 | Val Loss 1.647 | Val Acc 57.50 + +Epoch 025 | Time 460s | Train Loss 0.9897 | Val Loss 1.542 | Val Acc 60.40 + +Epoch 026 | Time 478s | Train Loss 1.0621 | Val Loss 1.473 | Val Acc 61.80 + +Epoch 027 | Time 496s | Train Loss 1.0716 | Val Loss 1.273 | Val Acc 64.70 + +Epoch 028 | Time 515s | Train Loss 1.1194 | Val Loss 1.493 | Val Acc 61.50 + +Epoch 029 | Time 533s | Train Loss 0.9729 | Val Loss 1.441 | Val Acc 60.10 + +Epoch 030 | Time 551s | Train Loss 1.0625 | Val Loss 1.485 | Val Acc 61.20 + +Epoch 031 | Time 569s | Train Loss 1.0097 | Val Loss 1.406 | Val Acc 64.00 + +Epoch 032 | Time 588s | Train Loss 0.9092 | Val Loss 1.523 | Val Acc 61.20 + +Epoch 033 | Time 606s | Train Loss 0.8982 | Val Loss 1.489 | Val Acc 60.10 + +Epoch 034 | Time 624s | Train Loss 0.9027 | Val Loss 1.398 | Val Acc 63.50 + +Epoch 035 | Time 643s | Train Loss 0.9952 | Val Loss 1.465 | Val Acc 61.40 + +Epoch 036 | Time 662s | Train Loss 0.9195 | Val Loss 1.575 | Val Acc 59.80 + +Epoch 037 | Time 680s | Train Loss 0.8336 | Val Loss 1.309 | Val Acc 65.40 + +Epoch 038 | Time 699s | Train Loss 0.9020 | Val Loss 1.475 | Val Acc 63.50 + +Epoch 039 | Time 718s | Train Loss 0.8512 | Val Loss 1.463 | Val Acc 62.10 + +Epoch 040 | Time 736s | Train Loss 0.8647 | Val Loss 1.420 | Val Acc 62.90 + +Epoch 041 | Time 755s | Train Loss 0.8332 | Val Loss 1.341 | Val Acc 65.60 + +Epoch 042 | Time 774s | Train Loss 0.8191 | Val Loss 1.410 | Val Acc 63.30 + +Epoch 043 | Time 793s | Train Loss 0.7464 | Val Loss 1.306 | Val Acc 65.40 + +Epoch 044 | Time 812s | Train Loss 0.7638 | Val Loss 1.386 | Val Acc 65.30 + +Epoch 045 | Time 831s | Train Loss 0.7033 | Val Loss 1.419 | Val Acc 65.60 + +Epoch 046 | Time 850s | Train Loss 0.7633 | Val Loss 1.272 | Val Acc 66.70 + +Epoch 047 | Time 869s | Train Loss 0.6610 | Val Loss 1.330 | Val Acc 65.40 + +Epoch 048 | Time 888s | Train Loss 0.6741 | Val Loss 1.314 | Val Acc 65.00 + +Epoch 049 | Time 907s | Train Loss 0.6430 | Val Loss 1.342 | Val Acc 66.10 + +Epoch 050 | Time 928s | Train Loss 0.6387 | Val Loss 1.270 | Val Acc 67.20 + +Epoch 051 | Time 950s | Train Loss 0.5501 | Val Loss 1.308 | Val Acc 66.80 + +Epoch 052 | Time 971s | Train Loss 0.5859 | Val Loss 1.508 | Val Acc 65.70 + +Epoch 053 | Time 992s | Train Loss 0.5972 | Val Loss 1.378 | Val Acc 64.70 + +Epoch 054 | Time 1013s | Train Loss 0.5634 | Val Loss 1.356 | Val Acc 66.90 + +Epoch 055 | Time 1035s | Train Loss 0.5724 | Val Loss 1.354 | Val Acc 67.60 + +Epoch 056 | Time 1057s | Train Loss 0.4970 | Val Loss 1.211 | Val Acc 68.10 + +Epoch 057 | Time 1078s | Train Loss 0.4831 | Val Loss 1.334 | Val Acc 67.30 + +Epoch 058 | Time 1100s | Train Loss 0.5272 | Val Loss 1.313 | Val Acc 68.10 + +Epoch 059 | Time 1122s | Train Loss 0.4011 | Val Loss 1.217 | Val Acc 68.90 + +Epoch 060 | Time 1144s | Train Loss 0.4146 | Val Loss 1.313 | Val Acc 67.10 + +Epoch 061 | Time 1166s | Train Loss 0.3966 | Val Loss 1.276 | Val Acc 69.60 + +Epoch 062 | Time 1186s | Train Loss 0.3843 | Val Loss 1.250 | Val Acc 68.40 + +Epoch 063 | Time 1208s | Train Loss 0.3282 | Val Loss 1.295 | Val Acc 70.30 + +Epoch 064 | Time 1229s | Train Loss 0.2982 | Val Loss 1.252 | Val Acc 69.30 + +Epoch 065 | Time 1251s | Train Loss 0.3069 | Val Loss 1.281 | Val Acc 71.10 + +Epoch 066 | Time 1273s | Train Loss 0.2598 | Val Loss 1.312 | Val Acc 69.40 + +Epoch 067 | Time 1295s | Train Loss 0.2248 | Val Loss 1.222 | Val Acc 71.00 + +Epoch 068 | Time 1317s | Train Loss 0.2474 | Val Loss 1.208 | Val Acc 70.60 + +Epoch 069 | Time 1339s | Train Loss 0.1645 | Val Loss 1.150 | Val Acc 70.80 + +Epoch 070 | Time 1360s | Train Loss 0.1758 | Val Loss 1.178 | Val Acc 71.40 + +Epoch 071 | Time 1382s | Train Loss 0.1285 | Val Loss 1.159 | Val Acc 72.50 + +Epoch 072 | Time 1405s | Train Loss 0.0999 | Val Loss 1.108 | Val Acc 73.20 + +Epoch 073 | Time 1426s | Train Loss 0.0895 | Val Loss 1.085 | Val Acc 72.50 + +Epoch 074 | Time 1449s | Train Loss 0.0797 | Val Loss 1.056 | Val Acc 74.50 + +Epoch 075 | Time 1472s | Train Loss 0.0613 | Val Loss 1.029 | Val Acc 75.20 + +Epoch 076 | Time 1494s | Train Loss 0.0476 | Val Loss 1.052 | Val Acc 74.50 + +Epoch 077 | Time 1516s | Train Loss 0.0355 | Val Loss 0.977 | Val Acc 76.60 + +Epoch 078 | Time 1538s | Train Loss 0.0290 | Val Loss 1.007 | Val Acc 75.60 + +Epoch 079 | Time 1560s | Train Loss 0.0259 | Val Loss 0.970 | Val Acc 76.20 + +Epoch 080 | Time 1583s | Train Loss 0.0212 | Val Loss 0.951 | Val Acc 76.50 + +Epoch 081 | Time 1605s | Train Loss 0.0164 | Val Loss 0.952 | Val Acc 76.90 + +Epoch 082 | Time 1628s | Train Loss 0.0154 | Val Loss 0.935 | Val Acc 76.70 + +Epoch 083 | Time 1649s | Train Loss 0.0139 | Val Loss 0.923 | Val Acc 76.80 + +Epoch 084 | Time 1672s | Train Loss 0.0159 | Val Loss 0.922 | Val Acc 76.90 + +Epoch 085 | Time 1695s | Train Loss 0.0104 | Val Loss 0.912 | Val Acc 77.40 + +Epoch 086 | Time 1717s | Train Loss 0.0124 | Val Loss 0.914 | Val Acc 77.60 + +Epoch 087 | Time 1740s | Train Loss 0.0155 | Val Loss 0.910 | Val Acc 77.10 + +Epoch 088 | Time 1762s | Train Loss 0.0098 | Val Loss 0.899 | Val Acc 77.40 + +Epoch 089 | Time 1785s | Train Loss 0.0095 | Val Loss 0.912 | Val Acc 77.60 + +Epoch 090 | Time 1807s | Train Loss 0.0126 | Val Loss 0.896 | Val Acc 77.60 + +Epoch 091 | Time 1830s | Train Loss 0.0115 | Val Loss 0.900 | Val Acc 77.40 + +Epoch 092 | Time 1851s | Train Loss 0.0107 | Val Loss 0.903 | Val Acc 77.40 + +Epoch 093 | Time 1873s | Train Loss 0.0095 | Val Loss 0.894 | Val Acc 77.40 + +Epoch 094 | Time 1895s | Train Loss 0.0094 | Val Loss 0.899 | Val Acc 77.30 + +Epoch 095 | Time 1918s | Train Loss 0.0097 | Val Loss 0.899 | Val Acc 77.40 + +Epoch 096 | Time 1940s | Train Loss 0.0108 | Val Loss 0.901 | Val Acc 77.50 + +Epoch 097 | Time 1962s | Train Loss 0.0089 | Val Loss 0.900 | Val Acc 77.30 + +Epoch 098 | Time 1984s | Train Loss 0.0105 | Val Loss 0.895 | Val Acc 77.30 + +Epoch 099 | Time 2007s | Train Loss 0.0108 | Val Loss 0.894 | Val Acc 77.30 + +Epoch 100 | Time 2029s | Train Loss 0.0104 | Val Loss 0.898 | Val Acc 77.30 +Training Completed! Best accuracy: 77.60 at epoch 90 +────────────────────────────────────────────────────────────────────── +Start testing... + +Complete Evaluation, Last accuracy 77.70 +Completed! diff --git a/OpenOOD/results/imagenet_arpl_net_arpl_e30_lr0.001/s0/best_NetF.ckpt b/OpenOOD/results/imagenet_arpl_net_arpl_e30_lr0.001/s0/best_NetF.ckpt new file mode 100644 index 0000000000000000000000000000000000000000..d2371d19c39683e75f730a7c3147d946d1f66499 --- /dev/null +++ b/OpenOOD/results/imagenet_arpl_net_arpl_e30_lr0.001/s0/best_NetF.ckpt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d476a8707611187fa44c66cc8d88cf5a67d01f09624a54e670eb1f66b73461f1 +size 94349317 diff --git a/OpenOOD/results/imagenet_arpl_net_arpl_e30_lr0.001/s0/best_criterion.ckpt b/OpenOOD/results/imagenet_arpl_net_arpl_e30_lr0.001/s0/best_criterion.ckpt new file mode 100644 index 0000000000000000000000000000000000000000..8756738633ee34ed59e4dc304026bcc190c58651 --- /dev/null +++ b/OpenOOD/results/imagenet_arpl_net_arpl_e30_lr0.001/s0/best_criterion.ckpt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:38506a27f3a807b123574f131ff04a95e42ef8d8cb4f89bb55383d97c639c2f3 +size 8193219 diff --git a/OpenOOD/results/imagenet_arpl_net_arpl_e30_lr0.001/s0/config.yml b/OpenOOD/results/imagenet_arpl_net_arpl_e30_lr0.001/s0/config.yml new file mode 100644 index 0000000000000000000000000000000000000000..6212293056eaf48c4aa673a08e86559deb30c519 --- /dev/null +++ b/OpenOOD/results/imagenet_arpl_net_arpl_e30_lr0.001/s0/config.yml @@ -0,0 +1,243 @@ +!!python/object/new:openood.utils.config.Config +state: + dataset: + name: imagenet + num_classes: 1000 + pre_size: 256 + image_size: 224 + interpolation: bilinear + normalization_type: imagenet + num_workers: 16 + num_gpus: 2 + num_machines: 1 + split_names: &id001 + - train + - val + - test + train: + dataset_class: ImglistDataset + data_dir: ./data/images_largescale/ + imglist_pth: ./data/benchmark_imglist/imagenet/train_imagenet.txt + batch_size: 128 + shuffle: true + val: + dataset_class: ImglistDataset + data_dir: ./data/images_largescale/ + imglist_pth: ./data/benchmark_imglist/imagenet/val_imagenet.txt + batch_size: 128 + shuffle: false + test: + dataset_class: ImglistDataset + data_dir: ./data/images_largescale/ + imglist_pth: ./data/benchmark_imglist/imagenet/test_imagenet.txt + batch_size: 128 + shuffle: false + network: + name: arpl_net + num_classes: 1000 + image_size: 224 + pretrained: false + checkpoint: none + num_gpus: 2 + weight_pl: 0.1 + temp: 1.0 + feat_extract_network: + name: resnet50 + num_classes: 1000 + pretrained: true + checkpoint: ./results/pretrained_weights/resnet50_imagenet1k_v1.pth + num_gpus: 2 + exp_name: imagenet_arpl_net_arpl_e30_lr0.001/s0 + output_dir: ./results/imagenet_arpl_net_arpl_e30_lr0.001/s0 + save_output: true + merge_option: merge + seed: 0 + num_gpus: 2 + num_workers: 16 + num_machines: 1 + machine_rank: 0 + pipeline: + name: train + trainer: + name: arpl + evaluator: + name: arpl + optimizer: + name: sgd + num_epochs: 30 + lr: 0.001 + momentum: 0.9 + weight_decay: 0.0001 + recorder: + name: arpl + save_all_models: false + preprocessor: + name: base +dictitems: + dataset: !!python/object/new:openood.utils.config.Config + state: + name: imagenet + num_classes: 1000 + pre_size: 256 + image_size: 224 + interpolation: bilinear + normalization_type: imagenet + num_workers: 16 + num_gpus: 2 + num_machines: 1 + split_names: *id001 + train: + dataset_class: ImglistDataset + data_dir: ./data/images_largescale/ + imglist_pth: ./data/benchmark_imglist/imagenet/train_imagenet.txt + batch_size: 128 + shuffle: true + val: + dataset_class: ImglistDataset + data_dir: ./data/images_largescale/ + imglist_pth: ./data/benchmark_imglist/imagenet/val_imagenet.txt + batch_size: 128 + shuffle: false + test: + dataset_class: ImglistDataset + data_dir: ./data/images_largescale/ + imglist_pth: ./data/benchmark_imglist/imagenet/test_imagenet.txt + batch_size: 128 + shuffle: false + dictitems: + name: imagenet + num_classes: 1000 + pre_size: 256 + image_size: 224 + interpolation: bilinear + normalization_type: imagenet + num_workers: 16 + num_gpus: 2 + num_machines: 1 + split_names: *id001 + train: !!python/object/new:openood.utils.config.Config + state: + dataset_class: ImglistDataset + data_dir: ./data/images_largescale/ + imglist_pth: ./data/benchmark_imglist/imagenet/train_imagenet.txt + batch_size: 128 + shuffle: true + dictitems: + dataset_class: ImglistDataset + data_dir: ./data/images_largescale/ + imglist_pth: ./data/benchmark_imglist/imagenet/train_imagenet.txt + batch_size: 128 + shuffle: true + val: !!python/object/new:openood.utils.config.Config + state: + dataset_class: ImglistDataset + data_dir: ./data/images_largescale/ + imglist_pth: ./data/benchmark_imglist/imagenet/val_imagenet.txt + batch_size: 128 + shuffle: false + dictitems: + dataset_class: ImglistDataset + data_dir: ./data/images_largescale/ + imglist_pth: ./data/benchmark_imglist/imagenet/val_imagenet.txt + batch_size: 128 + shuffle: false + test: !!python/object/new:openood.utils.config.Config + state: + dataset_class: ImglistDataset + data_dir: ./data/images_largescale/ + imglist_pth: ./data/benchmark_imglist/imagenet/test_imagenet.txt + batch_size: 128 + shuffle: false + dictitems: + dataset_class: ImglistDataset + data_dir: ./data/images_largescale/ + imglist_pth: ./data/benchmark_imglist/imagenet/test_imagenet.txt + batch_size: 128 + shuffle: false + network: !!python/object/new:openood.utils.config.Config + state: + name: arpl_net + num_classes: 1000 + image_size: 224 + pretrained: false + checkpoint: none + num_gpus: 2 + weight_pl: 0.1 + temp: 1.0 + feat_extract_network: + name: resnet50 + num_classes: 1000 + pretrained: true + checkpoint: ./results/pretrained_weights/resnet50_imagenet1k_v1.pth + num_gpus: 2 + dictitems: + name: arpl_net + num_classes: 1000 + image_size: 224 + pretrained: false + checkpoint: none + num_gpus: 2 + weight_pl: 0.1 + temp: 1.0 + feat_extract_network: !!python/object/new:openood.utils.config.Config + state: + name: resnet50 + num_classes: 1000 + pretrained: true + checkpoint: ./results/pretrained_weights/resnet50_imagenet1k_v1.pth + num_gpus: 2 + dictitems: + name: resnet50 + num_classes: 1000 + pretrained: true + checkpoint: ./results/pretrained_weights/resnet50_imagenet1k_v1.pth + num_gpus: 2 + exp_name: imagenet_arpl_net_arpl_e30_lr0.001/s0 + output_dir: ./results/imagenet_arpl_net_arpl_e30_lr0.001/s0 + save_output: true + merge_option: merge + seed: 0 + num_gpus: 2 + num_workers: 16 + num_machines: 1 + machine_rank: 0 + pipeline: !!python/object/new:openood.utils.config.Config + state: + name: train + dictitems: + name: train + trainer: !!python/object/new:openood.utils.config.Config + state: + name: arpl + dictitems: + name: arpl + evaluator: !!python/object/new:openood.utils.config.Config + state: + name: arpl + dictitems: + name: arpl + optimizer: !!python/object/new:openood.utils.config.Config + state: + name: sgd + num_epochs: 30 + lr: 0.001 + momentum: 0.9 + weight_decay: 0.0001 + dictitems: + name: sgd + num_epochs: 30 + lr: 0.001 + momentum: 0.9 + weight_decay: 0.0001 + recorder: !!python/object/new:openood.utils.config.Config + state: + name: arpl + save_all_models: false + dictitems: + name: arpl + save_all_models: false + preprocessor: !!python/object/new:openood.utils.config.Config + state: + name: base + dictitems: + name: base diff --git a/OpenOOD/results/imagenet_arpl_net_arpl_e30_lr0.001/s0/log.txt b/OpenOOD/results/imagenet_arpl_net_arpl_e30_lr0.001/s0/log.txt new file mode 100644 index 0000000000000000000000000000000000000000..3d21102d2a206c3b5898f4b941856a4a8a36520b --- /dev/null +++ b/OpenOOD/results/imagenet_arpl_net_arpl_e30_lr0.001/s0/log.txt @@ -0,0 +1,68 @@ +Model Loading resnet50 Completed! +Start training... + +Epoch 001 | Time 7832s | Train Loss 1.2749 | Val Loss 1.189 | Val Acc 35.78 + +Epoch 002 | Time 14421s | Train Loss 1.2995 | Val Loss 1.115 | Val Acc 36.86 + +Epoch 003 | Time 21974s | Train Loss 1.1358 | Val Loss 1.092 | Val Acc 36.76 + +Epoch 004 | Time 30754s | Train Loss 1.0682 | Val Loss 1.086 | Val Acc 36.98 + +Epoch 005 | Time 36944s | Train Loss 1.0781 | Val Loss 1.080 | Val Acc 37.02 + +Epoch 006 | Time 41332s | Train Loss 1.0132 | Val Loss 1.090 | Val Acc 36.68 + +Epoch 007 | Time 46446s | Train Loss 0.9356 | Val Loss 1.079 | Val Acc 37.16 + +Epoch 008 | Time 55654s | Train Loss 0.9617 | Val Loss 1.080 | Val Acc 37.18 + +Epoch 009 | Time 64517s | Train Loss 0.9541 | Val Loss 1.079 | Val Acc 37.28 + +Epoch 010 | Time 70918s | Train Loss 0.9436 | Val Loss 1.076 | Val Acc 37.18 + +Epoch 011 | Time 80417s | Train Loss 0.9338 | Val Loss 1.046 | Val Acc 37.26 + +Epoch 012 | Time 89985s | Train Loss 0.8738 | Val Loss 1.056 | Val Acc 37.30 + +Epoch 013 | Time 102448s | Train Loss 0.9005 | Val Loss 1.071 | Val Acc 37.38 + +Epoch 014 | Time 112222s | Train Loss 0.8536 | Val Loss 1.069 | Val Acc 37.28 + +Epoch 015 | Time 135544s | Train Loss 0.7998 | Val Loss 1.063 | Val Acc 37.42 + +Epoch 016 | Time 152342s | Train Loss 0.8609 | Val Loss 1.053 | Val Acc 37.30 + +Epoch 017 | Time 162197s | Train Loss 0.8253 | Val Loss 1.066 | Val Acc 37.42 + +Epoch 018 | Time 169640s | Train Loss 0.8291 | Val Loss 1.049 | Val Acc 37.62 + +Epoch 019 | Time 174525s | Train Loss 0.7483 | Val Loss 1.046 | Val Acc 37.48 + +Epoch 020 | Time 182772s | Train Loss 0.8413 | Val Loss 1.044 | Val Acc 37.90 + +Epoch 021 | Time 185322s | Train Loss 0.7295 | Val Loss 1.051 | Val Acc 37.78 + +Epoch 022 | Time 191523s | Train Loss 0.8545 | Val Loss 1.053 | Val Acc 37.82 + +Epoch 023 | Time 194687s | Train Loss 0.7405 | Val Loss 1.045 | Val Acc 37.80 + +Epoch 024 | Time 198715s | Train Loss 0.7828 | Val Loss 1.040 | Val Acc 37.90 + +Epoch 025 | Time 201621s | Train Loss 0.7612 | Val Loss 1.044 | Val Acc 37.74 + +Epoch 026 | Time 204329s | Train Loss 0.8379 | Val Loss 1.042 | Val Acc 37.86 + +Epoch 027 | Time 207174s | Train Loss 0.7498 | Val Loss 1.043 | Val Acc 37.98 + +Epoch 028 | Time 209811s | Train Loss 0.7509 | Val Loss 1.039 | Val Acc 37.92 + +Epoch 029 | Time 212976s | Train Loss 0.8492 | Val Loss 1.041 | Val Acc 38.00 + +Epoch 030 | Time 215773s | Train Loss 0.8239 | Val Loss 1.041 | Val Acc 38.00 +Training Completed! Best accuracy: 38.00 at epoch 30 +────────────────────────────────────────────────────────────────────── +Start testing... + +Complete Evaluation, Last accuracy 38.00 +Completed! diff --git a/OpenOOD/results/imagenet_resnet50_regmixup_e30_lr0.001_alpha10_default/s0/best.ckpt b/OpenOOD/results/imagenet_resnet50_regmixup_e30_lr0.001_alpha10_default/s0/best.ckpt new file mode 100644 index 0000000000000000000000000000000000000000..1677e3ab545e76783a43598193e5f77170ae86d5 --- /dev/null +++ b/OpenOOD/results/imagenet_resnet50_regmixup_e30_lr0.001_alpha10_default/s0/best.ckpt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:727dba5746007a2b31acf59e6486eee9d61d8c9a2bb8cbb4472ec1521c8d8f3f +size 102527227 diff --git a/OpenOOD/results/imagenet_resnet50_regmixup_e30_lr0.001_alpha10_default/s0/best_epoch30_acc0.7608.ckpt b/OpenOOD/results/imagenet_resnet50_regmixup_e30_lr0.001_alpha10_default/s0/best_epoch30_acc0.7608.ckpt new file mode 100644 index 0000000000000000000000000000000000000000..a3b485c47379d9cd656a44aa9823692cd974c3f7 --- /dev/null +++ b/OpenOOD/results/imagenet_resnet50_regmixup_e30_lr0.001_alpha10_default/s0/best_epoch30_acc0.7608.ckpt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:acd8d9b1b259e3c91daa434117b1df8290e6f2b615b581014be9946d7b6ba1dc +size 102550175 diff --git a/OpenOOD/results/imagenet_resnet50_regmixup_e30_lr0.001_alpha10_default/s0/config.yml b/OpenOOD/results/imagenet_resnet50_regmixup_e30_lr0.001_alpha10_default/s0/config.yml new file mode 100644 index 0000000000000000000000000000000000000000..3dabf109dd4415530a651b9d34666c6a6b4576ec --- /dev/null +++ b/OpenOOD/results/imagenet_resnet50_regmixup_e30_lr0.001_alpha10_default/s0/config.yml @@ -0,0 +1,223 @@ +!!python/object/new:openood.utils.config.Config +state: + dataset: + name: imagenet + num_classes: 1000 + pre_size: 256 + image_size: 224 + interpolation: bilinear + normalization_type: imagenet + num_workers: 16 + num_gpus: 2 + num_machines: 1 + split_names: &id001 + - train + - val + - test + train: + dataset_class: ImglistDataset + data_dir: ./data/images_largescale/ + imglist_pth: ./data/benchmark_imglist/imagenet/train_imagenet.txt + batch_size: 128 + shuffle: true + val: + dataset_class: ImglistDataset + data_dir: ./data/images_largescale/ + imglist_pth: ./data/benchmark_imglist/imagenet/val_imagenet.txt + batch_size: 128 + shuffle: false + test: + dataset_class: ImglistDataset + data_dir: ./data/images_largescale/ + imglist_pth: ./data/benchmark_imglist/imagenet/test_imagenet.txt + batch_size: 128 + shuffle: false + network: + name: resnet50 + num_classes: 1000 + image_size: 224 + pretrained: true + checkpoint: ./results/pretrained_weights/resnet50_imagenet1k_v1.pth + num_gpus: 2 + exp_name: imagenet_resnet50_regmixup_e30_lr0.001_alpha10_default/s0 + output_dir: ./results/imagenet_resnet50_regmixup_e30_lr0.001_alpha10_default/s0 + save_output: true + merge_option: merge + mark: default + seed: 0 + num_gpus: 2 + num_workers: 16 + num_machines: 1 + machine_rank: 0 + pipeline: + name: train + trainer: + name: regmixup + trainer_args: + alpha: 10 + evaluator: + name: base + optimizer: + name: sgd + num_epochs: 30 + lr: 0.001 + momentum: 0.9 + weight_decay: 0.0005 + recorder: + name: base + save_all_models: false + preprocessor: + name: base +dictitems: + dataset: !!python/object/new:openood.utils.config.Config + state: + name: imagenet + num_classes: 1000 + pre_size: 256 + image_size: 224 + interpolation: bilinear + normalization_type: imagenet + num_workers: 16 + num_gpus: 2 + num_machines: 1 + split_names: *id001 + train: + dataset_class: ImglistDataset + data_dir: ./data/images_largescale/ + imglist_pth: ./data/benchmark_imglist/imagenet/train_imagenet.txt + batch_size: 128 + shuffle: true + val: + dataset_class: ImglistDataset + data_dir: ./data/images_largescale/ + imglist_pth: ./data/benchmark_imglist/imagenet/val_imagenet.txt + batch_size: 128 + shuffle: false + test: + dataset_class: ImglistDataset + data_dir: ./data/images_largescale/ + imglist_pth: ./data/benchmark_imglist/imagenet/test_imagenet.txt + batch_size: 128 + shuffle: false + dictitems: + name: imagenet + num_classes: 1000 + pre_size: 256 + image_size: 224 + interpolation: bilinear + normalization_type: imagenet + num_workers: 16 + num_gpus: 2 + num_machines: 1 + split_names: *id001 + train: !!python/object/new:openood.utils.config.Config + state: + dataset_class: ImglistDataset + data_dir: ./data/images_largescale/ + imglist_pth: ./data/benchmark_imglist/imagenet/train_imagenet.txt + batch_size: 128 + shuffle: true + dictitems: + dataset_class: ImglistDataset + data_dir: ./data/images_largescale/ + imglist_pth: ./data/benchmark_imglist/imagenet/train_imagenet.txt + batch_size: 128 + shuffle: true + val: !!python/object/new:openood.utils.config.Config + state: + dataset_class: ImglistDataset + data_dir: ./data/images_largescale/ + imglist_pth: ./data/benchmark_imglist/imagenet/val_imagenet.txt + batch_size: 128 + shuffle: false + dictitems: + dataset_class: ImglistDataset + data_dir: ./data/images_largescale/ + imglist_pth: ./data/benchmark_imglist/imagenet/val_imagenet.txt + batch_size: 128 + shuffle: false + test: !!python/object/new:openood.utils.config.Config + state: + dataset_class: ImglistDataset + data_dir: ./data/images_largescale/ + imglist_pth: ./data/benchmark_imglist/imagenet/test_imagenet.txt + batch_size: 128 + shuffle: false + dictitems: + dataset_class: ImglistDataset + data_dir: ./data/images_largescale/ + imglist_pth: ./data/benchmark_imglist/imagenet/test_imagenet.txt + batch_size: 128 + shuffle: false + network: !!python/object/new:openood.utils.config.Config + state: + name: resnet50 + num_classes: 1000 + image_size: 224 + pretrained: true + checkpoint: ./results/pretrained_weights/resnet50_imagenet1k_v1.pth + num_gpus: 2 + dictitems: + name: resnet50 + num_classes: 1000 + image_size: 224 + pretrained: true + checkpoint: ./results/pretrained_weights/resnet50_imagenet1k_v1.pth + num_gpus: 2 + exp_name: imagenet_resnet50_regmixup_e30_lr0.001_alpha10_default/s0 + output_dir: ./results/imagenet_resnet50_regmixup_e30_lr0.001_alpha10_default/s0 + save_output: true + merge_option: merge + mark: default + seed: 0 + num_gpus: 2 + num_workers: 16 + num_machines: 1 + machine_rank: 0 + pipeline: !!python/object/new:openood.utils.config.Config + state: + name: train + dictitems: + name: train + trainer: !!python/object/new:openood.utils.config.Config + state: + name: regmixup + trainer_args: + alpha: 10 + dictitems: + name: regmixup + trainer_args: !!python/object/new:openood.utils.config.Config + state: + alpha: 10 + dictitems: + alpha: 10 + evaluator: !!python/object/new:openood.utils.config.Config + state: + name: base + dictitems: + name: base + optimizer: !!python/object/new:openood.utils.config.Config + state: + name: sgd + num_epochs: 30 + lr: 0.001 + momentum: 0.9 + weight_decay: 0.0005 + dictitems: + name: sgd + num_epochs: 30 + lr: 0.001 + momentum: 0.9 + weight_decay: 0.0005 + recorder: !!python/object/new:openood.utils.config.Config + state: + name: base + save_all_models: false + dictitems: + name: base + save_all_models: false + preprocessor: !!python/object/new:openood.utils.config.Config + state: + name: base + dictitems: + name: base diff --git a/OpenOOD/results/imagenet_resnet50_regmixup_e30_lr0.001_alpha10_default/s0/last_epoch30_acc0.7608.ckpt b/OpenOOD/results/imagenet_resnet50_regmixup_e30_lr0.001_alpha10_default/s0/last_epoch30_acc0.7608.ckpt new file mode 100644 index 0000000000000000000000000000000000000000..d92a23a45b61ac5529f0f6a471de5e22a666a819 --- /dev/null +++ b/OpenOOD/results/imagenet_resnet50_regmixup_e30_lr0.001_alpha10_default/s0/last_epoch30_acc0.7608.ckpt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f1f35dad624fd6c18fe12360028718ce5f9cf01cfec19fec2ef75fa7a0a955dc +size 102550175 diff --git a/OpenOOD/results/imagenet_resnet50_regmixup_e30_lr0.001_alpha10_default/s0/log.txt b/OpenOOD/results/imagenet_resnet50_regmixup_e30_lr0.001_alpha10_default/s0/log.txt new file mode 100644 index 0000000000000000000000000000000000000000..2c8b4dccd19a0a64986fd7263667ee63baff9944 --- /dev/null +++ b/OpenOOD/results/imagenet_resnet50_regmixup_e30_lr0.001_alpha10_default/s0/log.txt @@ -0,0 +1,68 @@ +Model Loading resnet50 Completed! +Start training... + +Epoch 001 | Time 30162s | Train Loss 2.5926 | Val Loss 2.016 | Val Acc 75.06 + +Epoch 002 | Time 42529s | Train Loss 2.5329 | Val Loss 2.003 | Val Acc 75.10 + +Epoch 003 | Time 49813s | Train Loss 2.5391 | Val Loss 1.986 | Val Acc 75.14 + +Epoch 004 | Time 56718s | Train Loss 2.4981 | Val Loss 1.971 | Val Acc 75.38 + +Epoch 005 | Time 60548s | Train Loss 2.5152 | Val Loss 1.972 | Val Acc 75.38 + +Epoch 006 | Time 67769s | Train Loss 2.4826 | Val Loss 1.968 | Val Acc 75.42 + +Epoch 007 | Time 71667s | Train Loss 2.5483 | Val Loss 1.965 | Val Acc 75.34 + +Epoch 008 | Time 78963s | Train Loss 2.4000 | Val Loss 1.970 | Val Acc 75.52 + +Epoch 009 | Time 82537s | Train Loss 2.5211 | Val Loss 1.949 | Val Acc 75.74 + +Epoch 010 | Time 87671s | Train Loss 2.3902 | Val Loss 1.955 | Val Acc 75.46 + +Epoch 011 | Time 91081s | Train Loss 2.4596 | Val Loss 1.951 | Val Acc 75.42 + +Epoch 012 | Time 94874s | Train Loss 2.3736 | Val Loss 1.962 | Val Acc 75.82 + +Epoch 013 | Time 99550s | Train Loss 2.4321 | Val Loss 1.951 | Val Acc 75.60 + +Epoch 014 | Time 103576s | Train Loss 2.4984 | Val Loss 1.950 | Val Acc 75.48 + +Epoch 015 | Time 106999s | Train Loss 2.3610 | Val Loss 1.947 | Val Acc 75.34 + +Epoch 016 | Time 110802s | Train Loss 2.4626 | Val Loss 1.942 | Val Acc 75.68 + +Epoch 017 | Time 114826s | Train Loss 2.3728 | Val Loss 1.951 | Val Acc 75.72 + +Epoch 018 | Time 118378s | Train Loss 2.3965 | Val Loss 1.933 | Val Acc 75.86 + +Epoch 019 | Time 121880s | Train Loss 2.4190 | Val Loss 1.937 | Val Acc 75.62 + +Epoch 020 | Time 125592s | Train Loss 2.3381 | Val Loss 1.944 | Val Acc 75.66 + +Epoch 021 | Time 129035s | Train Loss 2.2956 | Val Loss 1.935 | Val Acc 75.68 + +Epoch 022 | Time 132536s | Train Loss 2.3862 | Val Loss 1.927 | Val Acc 75.68 + +Epoch 023 | Time 136158s | Train Loss 2.3872 | Val Loss 1.924 | Val Acc 75.82 + +Epoch 024 | Time 140771s | Train Loss 2.3394 | Val Loss 1.922 | Val Acc 75.84 + +Epoch 025 | Time 144639s | Train Loss 2.4248 | Val Loss 1.924 | Val Acc 76.04 + +Epoch 026 | Time 148593s | Train Loss 2.4147 | Val Loss 1.921 | Val Acc 76.02 + +Epoch 027 | Time 152440s | Train Loss 2.3763 | Val Loss 1.919 | Val Acc 76.06 + +Epoch 028 | Time 156549s | Train Loss 2.3928 | Val Loss 1.916 | Val Acc 76.04 + +Epoch 029 | Time 160150s | Train Loss 2.3886 | Val Loss 1.915 | Val Acc 75.96 + +Epoch 030 | Time 163738s | Train Loss 2.3009 | Val Loss 1.919 | Val Acc 76.08 +Training Completed! Best accuracy: 76.08 at epoch 30 +────────────────────────────────────────────────────────────────────── +Start testing... + +Complete Evaluation, Last accuracy 76.69 +Completed! diff --git a/OpenOOD/results/pretrained_weights/resnet50_imagenet1k_v1.pth b/OpenOOD/results/pretrained_weights/resnet50_imagenet1k_v1.pth new file mode 100644 index 0000000000000000000000000000000000000000..357fcb97e7c32933d9bd1264adf1675264c7608c --- /dev/null +++ b/OpenOOD/results/pretrained_weights/resnet50_imagenet1k_v1.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:64072a68d55430dc4b1dffe641925daf5e232dd4864fab0b24161712249cf484 +size 102545695 diff --git a/OpenOOD/scripts/ad/cutpaste/bottle_test_cutpaste.sh b/OpenOOD/scripts/ad/cutpaste/bottle_test_cutpaste.sh new file mode 100644 index 0000000000000000000000000000000000000000..78544b01796fb549df2b604dc5cec9fcec48d56a --- /dev/null +++ b/OpenOOD/scripts/ad/cutpaste/bottle_test_cutpaste.sh @@ -0,0 +1,20 @@ +#!/bin/bash +# sh scripts/a_anomaly/2_cutpaste_test.sh + +GPU=1 +CPU=1 +node=68 +jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +#srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +#--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +#--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ +python main.py \ +--config configs/datasets/draem/bottle.yml \ +configs/networks/cutpaste.yml \ +configs/pipelines/test/test_cutpaste.yml \ +configs/postprocessors/cutpaste.yml \ +configs/preprocessors/cutpaste_preprocessor.yml \ +--network.checkpoint "results/bottle_projectionNet_cutpaste_e100_lr0.03/best_epoch35_auroc93.08219178082192.ckpt" \ +--evaluator.name ad diff --git a/OpenOOD/scripts/ad/cutpaste/cifar100_test_ood_cutpaste.sh b/OpenOOD/scripts/ad/cutpaste/cifar100_test_ood_cutpaste.sh new file mode 100644 index 0000000000000000000000000000000000000000..50bdf8dfad68cccda64c980f1ae804dbc5e3d6c8 --- /dev/null +++ b/OpenOOD/scripts/ad/cutpaste/cifar100_test_ood_cutpaste.sh @@ -0,0 +1,22 @@ +#!/bin/bash +# sh scripts/ad/cutpaste/cifar100_test_ood_cutpaste.sh + +GPU=1 +CPU=1 +jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +--kill-on-bad-exit=1 --job-name=${jobname} \ +python main.py \ +--config configs/datasets/cifar100/cifar100.yml \ +configs/datasets/cifar100/cifar100_ood.yml \ +configs/networks/cutpaste.yml \ +configs/pipelines/test/test_cutpaste.yml \ +configs/preprocessors/cutpaste_preprocessor.yml \ +configs/postprocessors/cutpaste.yml \ +--network.backbone.name resnet18_32x32 \ +--num_workers 8 \ +--network.pretrained True \ +--network.checkpoint "results/cifar100_projectionNet_cutpaste_e100_lr0.03/best.ckpt" diff --git a/OpenOOD/scripts/ad/cutpaste/cifar100_train_cutpaste.sh b/OpenOOD/scripts/ad/cutpaste/cifar100_train_cutpaste.sh new file mode 100644 index 0000000000000000000000000000000000000000..b68e4ae8face9bcbff8d4701e5cd7589985c585e --- /dev/null +++ b/OpenOOD/scripts/ad/cutpaste/cifar100_train_cutpaste.sh @@ -0,0 +1,26 @@ +#!/bin/bash +# sh scripts/ad/cutpaste/cifar100_train_cutpaste.sh + +GPU=1 +CPU=1 +node=68 +jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +--kill-on-bad-exit=1 --job-name=${jobname} \ +python main.py \ +--config configs/datasets/cifar100/cifar100.yml \ +configs/datasets/cifar100/cifar100_ood.yml \ +configs/networks/cutpaste.yml \ +configs/pipelines/train/train_cutpaste.yml \ +configs/preprocessors/cutpaste_preprocessor.yml \ +configs/postprocessors/cutpaste.yml \ +--network.pretrained False \ +--network.backbone.name resnet18_32x32 \ +--network.backbone.pretrained True \ +--network.backbone.checkpoint 'results/checkpoints/cifar100_res18_acc78.20.ckpt' \ +--num_workers 8 \ +--optimizer.num_epochs 100 \ +--merge_option merge diff --git a/OpenOOD/scripts/ad/cutpaste/cifar10_test_ood_cutpaste.sh b/OpenOOD/scripts/ad/cutpaste/cifar10_test_ood_cutpaste.sh new file mode 100644 index 0000000000000000000000000000000000000000..d71d3f7ccd5cadcca9c2d3e472578d69f5e8f396 --- /dev/null +++ b/OpenOOD/scripts/ad/cutpaste/cifar10_test_ood_cutpaste.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# sh scripts/ad/cutpaste/cifar10_test_ood_cutpaste.sh + +# GPU=1 +# CPU=1 +# node=68 +# jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +#srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +#--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +#--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ + +python main.py \ +--config configs/datasets/cifar10/cifar10.yml \ +configs/datasets/cifar10/cifar10_ood.yml \ +configs/networks/cutpaste.yml \ +configs/pipelines/test/test_cutpaste.yml \ +configs/preprocessors/cutpaste_preprocessor.yml \ +configs/postprocessors/cutpaste.yml \ +--network.backbone.name resnet18_32x32 \ +--num_workers 8 \ +--network.checkpoint "results/cifar10_projectionNet_cutpaste_e1_lr0.03/best_epoch1_auroc0.5146.ckpt" diff --git a/OpenOOD/scripts/ad/cutpaste/cifar10_train_cutpaste.sh b/OpenOOD/scripts/ad/cutpaste/cifar10_train_cutpaste.sh new file mode 100644 index 0000000000000000000000000000000000000000..e4df351a5a140cebaab452db3086341cacceaef0 --- /dev/null +++ b/OpenOOD/scripts/ad/cutpaste/cifar10_train_cutpaste.sh @@ -0,0 +1,25 @@ +#!/bin/bash +# sh scripts/ad/cutpaste/cifar10_train_cutpaste.sh + +GPU=1 +CPU=1 +jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +--kill-on-bad-exit=1 --job-name=${jobname} \ +python main.py \ +--config configs/datasets/cifar10/cifar10.yml \ +configs/datasets/cifar10/cifar10_ood.yml \ +configs/networks/cutpaste.yml \ +configs/pipelines/train/train_cutpaste.yml \ +configs/preprocessors/cutpaste_preprocessor.yml \ +configs/postprocessors/cutpaste.yml \ +--network.pretrained False \ +--network.backbone.name resnet18_32x32 \ +--network.backbone.pretrained True \ +--network.backbone.checkpoint 'results/checkpoints/cifar10_res18_acc94.30.ckpt' \ +--num_workers 8 \ +--optimizer.num_epochs 100 \ +--merge_option merge diff --git a/OpenOOD/scripts/ad/cutpaste/mnist_test_osr_cutpaste.sh b/OpenOOD/scripts/ad/cutpaste/mnist_test_osr_cutpaste.sh new file mode 100644 index 0000000000000000000000000000000000000000..968b83382e50d951f44f96c794ef31e11512b6cd --- /dev/null +++ b/OpenOOD/scripts/ad/cutpaste/mnist_test_osr_cutpaste.sh @@ -0,0 +1,25 @@ +#!/bin/bash +# sh scripts/ad/cutpaste/mnist_test_osr_cutpaste.sh + +# GPU=1 +# CPU=1 +# node=68 +# jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +#srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +#--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +#--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ + +python main.py \ +--config configs/datasets/osr_mnist6/mnist6_seed1.yml \ +configs/datasets/osr_mnist6/mnist6_seed1_ood.yml \ +configs/networks/cutpaste.yml \ +configs/pipelines/test/test_cutpaste.yml \ +configs/preprocessors/cutpaste_preprocessor.yml \ +configs/postprocessors/cutpaste.yml \ +--network.backbone.name lenet \ +--num_workers 8 \ +--network.checkpoint 'results/osr_mnist6_seed1_projectionNet_cutpaste_e100_lr0.03/best.ckpt' \ +--network.backbone.pretrained False \ +--evaluator.name osr diff --git a/OpenOOD/scripts/ad/cutpaste/mnist_train_cutpaste.sh b/OpenOOD/scripts/ad/cutpaste/mnist_train_cutpaste.sh new file mode 100644 index 0000000000000000000000000000000000000000..7dc9e50717c596a08d14e36655a84e054a2cfe53 --- /dev/null +++ b/OpenOOD/scripts/ad/cutpaste/mnist_train_cutpaste.sh @@ -0,0 +1,25 @@ +#!/bin/bash +# sh scripts/ad/cutpaste/mnist_train_cutpaste.sh + +GPU=1 +CPU=1 +jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +--kill-on-bad-exit=1 --job-name=${jobname} \ +python main.py \ +--config configs/datasets/mnist/mnist.yml \ +configs/datasets/mnist/mnist_ood.yml \ +configs/networks/cutpaste.yml \ +configs/pipelines/train/train_cutpaste.yml \ +configs/preprocessors/cutpaste_preprocessor.yml \ +configs/postprocessors/cutpaste.yml \ +--network.pretrained False \ +--network.backbone.name lenet \ +--network.backbone.pretrained True \ +--network.backbone.checkpoint 'results/checkpoints/mnist_lenet_acc99.30.ckpt' \ +--num_workers 8 \ +--optimizer.num_epochs 100 \ +--merge_option merge diff --git a/OpenOOD/scripts/ad/cutpaste/mnist_train_osr_cutpaste.sh b/OpenOOD/scripts/ad/cutpaste/mnist_train_osr_cutpaste.sh new file mode 100644 index 0000000000000000000000000000000000000000..714f9f958148111226abf626aaeb8d7a099bbbd2 --- /dev/null +++ b/OpenOOD/scripts/ad/cutpaste/mnist_train_osr_cutpaste.sh @@ -0,0 +1,25 @@ +#!/bin/bash +# sh scripts/ad/cutpaste/mnist_train_osr_cutpaste.sh + +GPU=1 +CPU=1 +node=68 +jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +# --kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ +python main.py \ +--config configs/datasets/osr_mnist6/mnist6_seed1.yml \ +configs/datasets/osr_mnist6/mnist6_seed1_ood.yml \ +configs/networks/cutpaste.yml \ +configs/pipelines/train/train_cutpaste.yml \ +configs/preprocessors/cutpaste_preprocessor.yml \ +configs/postprocessors/cutpaste.yml \ +--network.pretrained False \ +--network.backbone.name lenet \ +--network.backbone.pretrained True \ +--network.backbone.checkpoint 'results/checkpoints/osr/mnist6_seed1.ckpt' \ +--num_workers 8 \ +--optimizer.num_epochs 100 diff --git a/OpenOOD/scripts/ad/cutpaste/osr_cifar50_train_cutpaste.sh b/OpenOOD/scripts/ad/cutpaste/osr_cifar50_train_cutpaste.sh new file mode 100644 index 0000000000000000000000000000000000000000..40c19f5c96be1731eeecb3cc2ce0701f85631e4d --- /dev/null +++ b/OpenOOD/scripts/ad/cutpaste/osr_cifar50_train_cutpaste.sh @@ -0,0 +1,26 @@ +#!/bin/bash +# sh scripts/ad/cutpaste/cifar50_train_cutpaste.sh + +GPU=1 +CPU=1 +node=68 +jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +--kill-on-bad-exit=1 --job-name=${jobname} \ +python main.py \ +--config configs/datasets/osr_cifar50/cifar50_seed1.yml \ +configs/datasets/osr_cifar50/cifar50_seed1_ood.yml \ +configs/networks/cutpaste.yml \ +configs/pipelines/train/train_cutpaste.yml \ +configs/preprocessors/cutpaste_preprocessor.yml \ +configs/postprocessors/cutpaste.yml \ +--network.pretrained False \ +--network.backbone.name resnet18_32x32 \ +--network.backbone.pretrained True \ +--network.backbone.checkpoint 'results/checkpoints/osr/cifar50_seed1_acc80.24.ckpt' \ +--num_workers 8 \ +--optimizer.num_epochs 100 \ +--merge_option merge diff --git a/OpenOOD/scripts/ad/cutpaste/osr_cifar6_train_cutpaste.sh b/OpenOOD/scripts/ad/cutpaste/osr_cifar6_train_cutpaste.sh new file mode 100644 index 0000000000000000000000000000000000000000..e566015963127f1e80811e5739ca14e395015427 --- /dev/null +++ b/OpenOOD/scripts/ad/cutpaste/osr_cifar6_train_cutpaste.sh @@ -0,0 +1,26 @@ +#!/bin/bash +# sh scripts/ad/cutpaste/cifar6_train_cutpaste.sh + +GPU=1 +CPU=1 +node=68 +jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +--kill-on-bad-exit=1 --job-name=${jobname} \ +python main.py \ +--config configs/datasets/osr_cifar6/cifar6_seed1.yml \ +configs/datasets/osr_cifar6/cifar6_seed1_ood.yml \ +configs/networks/cutpaste.yml \ +configs/pipelines/train/train_cutpaste.yml \ +configs/preprocessors/cutpaste_preprocessor.yml \ +configs/postprocessors/cutpaste.yml \ +--network.pretrained False \ +--network.backbone.name resnet18_32x32 \ +--network.backbone.pretrained True \ +--network.backbone.checkpoint 'results/checkpoints/osr/cifar6_seed1_acc97.57.ckpt' \ +--num_workers 8 \ +--optimizer.num_epochs 100 \ +--merge_option merge diff --git a/OpenOOD/scripts/ad/cutpaste/osr_test_ood_cutpaste.sh b/OpenOOD/scripts/ad/cutpaste/osr_test_ood_cutpaste.sh new file mode 100644 index 0000000000000000000000000000000000000000..1ff353aaa89dc35c07d8b1b3784a30a7217be271 --- /dev/null +++ b/OpenOOD/scripts/ad/cutpaste/osr_test_ood_cutpaste.sh @@ -0,0 +1,22 @@ +#!/bin/bash +# sh scripts/ad/cutpaste/osr_test_ood_cutpaste.sh + +GPU=1 +CPU=1 +jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +--kill-on-bad-exit=1 --job-name=${jobname} \ +python main.py \ +--config configs/datasets/mnist/mnist.yml \ +configs/datasets/mnist/mnist_ood.yml \ +configs/networks/cutpaste.yml \ +configs/pipelines/test/test_cutpaste.yml \ +configs/preprocessors/cutpaste_preprocessor.yml \ +configs/postprocessors/cutpaste.yml \ +--network.backbone.name lenet \ +--num_workers 8 \ +--network.pretrained True \ +--network.checkpoint "results/mnist_projectionNet_cutpaste_e100_lr0.03/best.ckpt" diff --git a/OpenOOD/scripts/ad/cutpaste/osr_tin20_train_cutpaste.sh b/OpenOOD/scripts/ad/cutpaste/osr_tin20_train_cutpaste.sh new file mode 100644 index 0000000000000000000000000000000000000000..3e656443fdda5cd835dd980c95fe51edf433be53 --- /dev/null +++ b/OpenOOD/scripts/ad/cutpaste/osr_tin20_train_cutpaste.sh @@ -0,0 +1,26 @@ +#!/bin/bash +# sh scripts/ad/cutpaste/tin20_train_cutpaste.sh + +GPU=1 +CPU=1 +node=68 +jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +--kill-on-bad-exit=1 --job-name=${jobname} \ +python main.py \ +--config configs/datasets/osr_tin20/tin20_seed1.yml \ +configs/datasets/osr_tin20/tin20_seed1_ood.yml \ +configs/networks/cutpaste.yml \ +configs/pipelines/train/train_cutpaste.yml \ +configs/preprocessors/cutpaste_preprocessor.yml \ +configs/postprocessors/cutpaste.yml \ +--network.pretrained False \ +--network.backbone.name resnet18_64x64 \ +--network.backbone.pretrained True \ +--network.backbone.checkpoint 'results/checkpoints/osr/tin20_seed1_acc77.23.ckpt' \ +--num_workers 8 \ +--optimizer.num_epochs 100 \ +--merge_option merge diff --git a/OpenOOD/scripts/ad/draem/bottle_test_draem.sh b/OpenOOD/scripts/ad/draem/bottle_test_draem.sh new file mode 100644 index 0000000000000000000000000000000000000000..4e87d3a0ac8433ecc7e08b0794ada854e311c02d --- /dev/null +++ b/OpenOOD/scripts/ad/draem/bottle_test_draem.sh @@ -0,0 +1,19 @@ +#!/bin/bash +# sh scripts/ad/draem/bottle_test_draem.sh + +# GPU=1 +# CPU=1 +# node=30 +# jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +#srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +#--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +#--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ +python main.py \ +--config configs/datasets/draem/bottle.yml \ +configs/networks/draem.yml \ +configs/pipelines/test/test_draem.yml \ +configs/preprocessors/draem_preprocessor.yml \ +configs/postprocessors/msp.yml \ +--evaluator.name ood diff --git a/OpenOOD/scripts/ad/draem/bottle_train_draem.sh b/OpenOOD/scripts/ad/draem/bottle_train_draem.sh new file mode 100644 index 0000000000000000000000000000000000000000..e66dcd96878d909d169678c46d14d32d374a79c0 --- /dev/null +++ b/OpenOOD/scripts/ad/draem/bottle_train_draem.sh @@ -0,0 +1,20 @@ +#!/bin/bash +# sh scripts/ad/draem/bottle_train_draem.sh + +# GPU=1 +# CPU=1 +# node=30 +# jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +#srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +#--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +#--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ +python main.py \ +--config configs/datasets/mvtec/bottle.yml \ +configs/networks/draem.yml \ +configs/pipelines/train/train_draem.yml \ +configs/preprocessors/draem_preprocessor.yml \ +configs/postprocessors/msp.yml \ +--evaluator.name ad \ +--optimizer.num_epochs 2 diff --git a/OpenOOD/scripts/ad/draem/cifar100_test_ood_draem.sh b/OpenOOD/scripts/ad/draem/cifar100_test_ood_draem.sh new file mode 100644 index 0000000000000000000000000000000000000000..973c8fccd7d4ec8021cccd515cef76ab1d598097 --- /dev/null +++ b/OpenOOD/scripts/ad/draem/cifar100_test_ood_draem.sh @@ -0,0 +1,21 @@ +#!/bin/bash +# sh scripts/ad/draem/cifar100_test_ood_draem.sh + +# GPU=1 +# CPU=1 +# node=30 +# jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +#srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +#--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +#--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ +python main.py \ +--config configs/datasets/cifar100/cifar100.yml \ +configs/datasets/cifar100/cifar100_ood.yml \ +configs/networks/draem.yml \ +configs/pipelines/test/test_draem.yml \ +configs/preprocessors/draem_preprocessor.yml \ +configs/postprocessors/draem.yml \ +--network.pretrained True \ + diff --git a/OpenOOD/scripts/ad/draem/cifar100_train_draem.sh b/OpenOOD/scripts/ad/draem/cifar100_train_draem.sh new file mode 100644 index 0000000000000000000000000000000000000000..29845c6aa0ab560301545ce711b2e624bcc70f47 --- /dev/null +++ b/OpenOOD/scripts/ad/draem/cifar100_train_draem.sh @@ -0,0 +1,24 @@ +#!/bin/bash +# sh scripts/ad/draem/cifar100_train_draem.sh + +# GPU=1 +# CPU=1 +# node=30 +# jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +#srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +#--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +#--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ +python main.py \ +--config configs/datasets/cifar100/cifar100.yml \ +configs/datasets/cifar100/cifar100_ood.yml \ +configs/networks/draem.yml \ +configs/pipelines/train/train_draem.yml \ +configs/preprocessors/draem_preprocessor.yml \ +configs/postprocessors/draem.yml \ +--evaluator.name ad \ +--dataset.train.batch_size 64 \ +--num_workers 8 \ +--optimizer.num_epochs 100 \ +--recorder.name draem diff --git a/OpenOOD/scripts/ad/draem/cifar10_test_ood_draem.sh b/OpenOOD/scripts/ad/draem/cifar10_test_ood_draem.sh new file mode 100644 index 0000000000000000000000000000000000000000..47e5937cbf76c93f8b1d32bbcfe59c65fefa69ed --- /dev/null +++ b/OpenOOD/scripts/ad/draem/cifar10_test_ood_draem.sh @@ -0,0 +1,20 @@ +#!/bin/bash +# sh scripts/ad/draem/cifar10_test_ood_draem.sh + +# GPU=1 +# CPU=1 +# node=30 +# jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +#srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +#--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +#--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ +python main.py \ +--config configs/datasets/cifar10/cifar10.yml \ +configs/datasets/cifar10/cifar10_ood.yml \ +configs/networks/draem.yml \ +configs/pipelines/test/test_draem.yml \ +configs/preprocessors/draem_preprocessor.yml \ +configs/postprocessors/draem.yml \ +--network.pretrained True diff --git a/OpenOOD/scripts/ad/draem/cifar10_train_draem.sh b/OpenOOD/scripts/ad/draem/cifar10_train_draem.sh new file mode 100644 index 0000000000000000000000000000000000000000..2ae59b93e160fd807860f0f5ed1443ecda02dd00 --- /dev/null +++ b/OpenOOD/scripts/ad/draem/cifar10_train_draem.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# sh scripts/ad/draem/cifar10_train_draem.sh + +# GPU=1 +# CPU=1 +# node=30 +# jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +#srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +#--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +#--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ +python main.py \ +--config configs/datasets/cifar10/cifar10.yml \ +configs/datasets/cifar10/cifar10_ood.yml \ +configs/networks/draem.yml \ +configs/pipelines/train/train_draem.yml \ +configs/preprocessors/draem_preprocessor.yml \ +configs/postprocessors/draem.yml \ +--evaluator.name ad \ +--dataset.train.batch_size 64 \ +--num_workers 8 \ +--optimizer.num_epochs 2 diff --git a/OpenOOD/scripts/ad/draem/mnist_test_osr_draem.sh b/OpenOOD/scripts/ad/draem/mnist_test_osr_draem.sh new file mode 100644 index 0000000000000000000000000000000000000000..28ba1397122387cacda5f6c40151406b8ee316d1 --- /dev/null +++ b/OpenOOD/scripts/ad/draem/mnist_test_osr_draem.sh @@ -0,0 +1,21 @@ +#!/bin/bash +# sh scripts/ad/draem/mnist_test_osr_draem.sh + +# GPU=1 +# CPU=1 +# node=30 +# jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +#srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +#--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +#--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ +python main.py \ +--config configs/datasets/osr_mnist6/mnist6_seed1.yml \ +configs/datasets/osr_mnist6/mnist6_seed1_ood.yml \ +configs/networks/draem.yml \ +configs/pipelines/test/test_draem.yml \ +configs/preprocessors/draem_preprocessor.yml \ +configs/postprocessors/draem.yml \ +--network.pretrained True \ +--evaluator.name osr diff --git a/OpenOOD/scripts/ad/draem/mnist_train_draem.sh b/OpenOOD/scripts/ad/draem/mnist_train_draem.sh new file mode 100644 index 0000000000000000000000000000000000000000..7b4a5c0526d31feedc8fc7fa22962023e19e2f2d --- /dev/null +++ b/OpenOOD/scripts/ad/draem/mnist_train_draem.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# sh scripts/ad/draem/mnist_train_draem.sh + +GPU=1 +CPU=1 +node=30 +jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +--kill-on-bad-exit=1 --job-name=${jobname} \ +python main.py \ +--config configs/datasets/osr_cifar6/cifar6_seed1.yml \ +configs/datasets/osr_cifar6/cifar6_seed1_ood.yml \ +configs/networks/draem.yml \ +configs/pipelines/train/train_draem.yml \ +configs/preprocessors/draem_preprocessor.yml \ +configs/postprocessors/draem.yml \ +--evaluator.name ad \ +--dataset.train.batch_size 64 \ +--num_workers 8 \ +--optimizer.num_epochs 100 diff --git a/OpenOOD/scripts/ad/draem/mnist_train_osr_draem.sh b/OpenOOD/scripts/ad/draem/mnist_train_osr_draem.sh new file mode 100644 index 0000000000000000000000000000000000000000..b736d3e3f97e60a4ded141cde513e0ab13bb300f --- /dev/null +++ b/OpenOOD/scripts/ad/draem/mnist_train_osr_draem.sh @@ -0,0 +1,25 @@ +#!/bin/bash +# sh scripts/ad/draem/mnist_train_osr_draem.sh + +# GPU=1 +# CPU=1 +# node=30 +# jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +#srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +#--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +#--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ + +python main.py \ +--config configs/datasets/osr_mnist6/mnist6_seed1.yml \ +configs/datasets/osr_mnist6/mnist6_seed1_ood.yml \ +configs/networks/draem.yml \ +configs/pipelines/train/train_draem.yml \ +configs/preprocessors/draem_preprocessor.yml \ +configs/postprocessors/draem.yml \ +--evaluator.name ad \ +--dataset.train.batch_size 32 \ +--num_workers 8 \ +--optimizer.num_epochs 100 \ +--recorder.name draem diff --git a/OpenOOD/scripts/ad/draem/osr_cifar50_train_draem.sh b/OpenOOD/scripts/ad/draem/osr_cifar50_train_draem.sh new file mode 100644 index 0000000000000000000000000000000000000000..1dfd3d10e63bb052cfa271aca925f96de653ab2f --- /dev/null +++ b/OpenOOD/scripts/ad/draem/osr_cifar50_train_draem.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# sh scripts/ad/draem/osr_cifar50_train_draem.sh + +GPU=1 +CPU=1 +node=30 +jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +--kill-on-bad-exit=1 --job-name=${jobname} \ +python main.py \ +--config configs/datasets/osr_tin20/tin20_seed1.yml \ +configs/datasets/osr_tin20/tin20_seed1_ood.yml \ +configs/networks/draem.yml \ +configs/pipelines/train/train_draem.yml \ +configs/preprocessors/draem_preprocessor.yml \ +configs/postprocessors/draem.yml \ +--evaluator.name ad \ +--dataset.train.batch_size 64 \ +--num_workers 8 \ +--optimizer.num_epochs 2 & diff --git a/OpenOOD/scripts/ad/draem/osr_cifar6_train_draem.sh b/OpenOOD/scripts/ad/draem/osr_cifar6_train_draem.sh new file mode 100644 index 0000000000000000000000000000000000000000..6eff9d31daf78c95884f24cd8df6f980bfcd153f --- /dev/null +++ b/OpenOOD/scripts/ad/draem/osr_cifar6_train_draem.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# sh scripts/ad/draem/osr_cifar6_train_draem.sh + +GPU=1 +CPU=1 +node=30 +jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +--kill-on-bad-exit=1 --job-name=${jobname} \ +python main.py \ +--config configs/datasets/osr_cifar6/cifar6_seed1.yml \ +configs/datasets/osr_cifar6/cifar6_seed1_ood.yml \ +configs/networks/draem.yml \ +configs/pipelines/train/train_draem.yml \ +configs/preprocessors/draem_preprocessor.yml \ +configs/postprocessors/draem.yml \ +--evaluator.name ad \ +--dataset.train.batch_size 64 \ +--num_workers 8 \ +--optimizer.num_epochs 2 & diff --git a/OpenOOD/scripts/ad/draem/osr_tin20_train_draem.sh b/OpenOOD/scripts/ad/draem/osr_tin20_train_draem.sh new file mode 100644 index 0000000000000000000000000000000000000000..cd4e5f08f3a67da1df33510998d95d3e49f01a3b --- /dev/null +++ b/OpenOOD/scripts/ad/draem/osr_tin20_train_draem.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# sh scripts/ad/draem/osr_tin20_train_draem.sh + +GPU=1 +CPU=1 +node=30 +jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +--kill-on-bad-exit=1 --job-name=${jobname} \ +python main.py \ +--config configs/datasets/osr_cifar50/cifar50_seed1.yml \ +configs/datasets/osr_cifar50/cifar50_seed1_ood.yml \ +configs/networks/draem.yml \ +configs/pipelines/train/train_draem.yml \ +configs/preprocessors/draem_preprocessor.yml \ +configs/postprocessors/draem.yml \ +--evaluator.name ad \ +--dataset.train.batch_size 64 \ +--num_workers 8 \ +--optimizer.num_epochs 2 & diff --git a/OpenOOD/scripts/ad/dsvdd/0_dcae_pretrain.sh b/OpenOOD/scripts/ad/dsvdd/0_dcae_pretrain.sh new file mode 100644 index 0000000000000000000000000000000000000000..772e956c5c02e7dd565f481731f746c060725fa4 --- /dev/null +++ b/OpenOOD/scripts/ad/dsvdd/0_dcae_pretrain.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# sh scripts/ad/dsvdd/0_dcae_pretrain.sh + + +PYTHONPATH='.':$PYTHONPATH \ +#srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +#--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +#--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ + + +python main.py \ +--config configs/datasets/objects/cifar10.yml \ +configs/datasets/objects/cifar10_ood.yml \ +configs/pipelines/train/train_dcae.yml \ +configs/networks/dcae.yml \ +configs/preprocessors/base_preprocessor.yml \ +configs/postprocessors/msp.yml \ +--pipeline.name train_ad \ +--postprocessor.name dsvdd \ +--evaluator.name ad \ +--recorder.name ad \ +--optimizer.num_epochs 2 \ +--trainer.name dsvdd diff --git a/OpenOOD/scripts/ad/dsvdd/0_dsvdd_test.sh b/OpenOOD/scripts/ad/dsvdd/0_dsvdd_test.sh new file mode 100644 index 0000000000000000000000000000000000000000..65d7f0a678dedaa810a90514d066e2d84e1221e8 --- /dev/null +++ b/OpenOOD/scripts/ad/dsvdd/0_dsvdd_test.sh @@ -0,0 +1,20 @@ +#!/bin/bash +# sh scripts/a_anomaly/0_dsvdd_test.sh + + +PYTHONPATH='.':$PYTHONPATH \ +#srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +#--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +#--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ +python main.py \ +--config configs/datasets/objects/cifar10.yml \ +configs/datasets/objects/cifar10_ood.yml \ +configs/networks/resnet18_32x32.yml \ +configs/pipelines/test/test_dsvdd.yml \ +configs/preprocessors/base_preprocessor.yml \ +configs/postprocessors/msp.yml \ +--pipeline.name test_ad \ +--postprocessor.name dsvdd \ +--evaluator.name ood \ +--network.pretrained True \ +--network.checkpoint 'results/cifar10_resnet18_32x32_dsvdd_e3/DSVDD_best_epoch2_roc_auc0.719908611111111.pth' diff --git a/OpenOOD/scripts/ad/dsvdd/0_dsvdd_train_dcae.sh b/OpenOOD/scripts/ad/dsvdd/0_dsvdd_train_dcae.sh new file mode 100644 index 0000000000000000000000000000000000000000..60d0096083f1d165e9df93c143f00adf987a4943 --- /dev/null +++ b/OpenOOD/scripts/ad/dsvdd/0_dsvdd_train_dcae.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# sh scripts/a_anomaly/0_dsvdd_train_dcae.sh + + +PYTHONPATH='.':$PYTHONPATH \ +#srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +#--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +#--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ + +python main.py \ +--config configs/datasets/objects/cifar10.yml \ +configs/datasets/objects/cifar10_ood.yml \ +configs/pipelines/train/train_dsvdd.yml \ +configs/networks/dsvdd.yml \ +configs/preprocessors/base_preprocessor.yml \ +configs/postprocessors/msp.yml \ +--pipeline.name train_ad \ +--postprocessor.name dsvdd \ +--evaluator.name ad \ +--recorder.name ad \ +--optimizer.num_epochs 2 \ +--network.pretrained True \ +--network.checkpoint 'results/cifar10_dcae_dsvdd_e3/best.ckpt' diff --git a/OpenOOD/scripts/ad/dsvdd/cifar100_test_ood_dsvdd.sh b/OpenOOD/scripts/ad/dsvdd/cifar100_test_ood_dsvdd.sh new file mode 100644 index 0000000000000000000000000000000000000000..17fe9f0972b4f46b72edaa519ab85385a82f9f2d --- /dev/null +++ b/OpenOOD/scripts/ad/dsvdd/cifar100_test_ood_dsvdd.sh @@ -0,0 +1,18 @@ +#!/bin/bash +# sh scripts/ad/dsvdd/cifar100_test_ood_dsvdd.sh + + +PYTHONPATH='.':$PYTHONPATH \ +#srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +#--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +#--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ + +python main.py \ +--config configs/datasets/cifar100/cifar100.yml \ +configs/datasets/cifar100/cifar100_ood.yml \ +configs/networks/resnet18_32x32.yml \ +configs/pipelines/test/test_dsvdd.yml \ +configs/preprocessors/base_preprocessor.yml \ +configs/postprocessors/dsvdd.yml \ +--network.pretrained True \ +--network.checkpoint 'results/cifar100_resnet18_32x32_dsvdd_e100/best.ckpt' diff --git a/OpenOOD/scripts/ad/dsvdd/cifar100_train_dsvdd.sh b/OpenOOD/scripts/ad/dsvdd/cifar100_train_dsvdd.sh new file mode 100644 index 0000000000000000000000000000000000000000..c7b2ee6e238e0e82b3b2c8f93ac6ef1b2b5da081 --- /dev/null +++ b/OpenOOD/scripts/ad/dsvdd/cifar100_train_dsvdd.sh @@ -0,0 +1,19 @@ +#!/bin/bash +# sh scripts/ad/dsvdd/cifar100_train_dsvdd.sh + + +PYTHONPATH='.':$PYTHONPATH \ +#srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +#--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +#--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ + +python main.py \ +--config configs/datasets/cifar100/cifar100.yml \ +configs/datasets/cifar100/cifar100_ood.yml \ +configs/pipelines/train/train_dsvdd.yml \ +configs/networks/resnet18_32x32.yml \ +configs/preprocessors/base_preprocessor.yml \ +configs/postprocessors/dsvdd.yml \ +--optimizer.num_epochs 100 \ +--network.pretrained True \ +--network.checkpoint 'results/cifar100_resnet18_32x32_base_e100_lr0.1/best.ckpt' diff --git a/OpenOOD/scripts/ad/dsvdd/cifar10_test_ood_dsvdd.sh b/OpenOOD/scripts/ad/dsvdd/cifar10_test_ood_dsvdd.sh new file mode 100644 index 0000000000000000000000000000000000000000..30d4e52c4b0bbdd9a169fe4d2e878f29c1204d8e --- /dev/null +++ b/OpenOOD/scripts/ad/dsvdd/cifar10_test_ood_dsvdd.sh @@ -0,0 +1,18 @@ +#!/bin/bash +# sh scripts/ad/dsvdd/cifar10_test_ood_dsvdd.sh + + +PYTHONPATH='.':$PYTHONPATH \ +#srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +#--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +#--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ + +python main.py \ +--config configs/datasets/cifar10/cifar10.yml \ +configs/datasets/cifar10/cifar10_ood.yml \ +configs/networks/resnet18_32x32.yml \ +configs/pipelines/test/test_dsvdd.yml \ +configs/preprocessors/base_preprocessor.yml \ +configs/postprocessors/dsvdd.yml \ +--network.pretrained True \ +--network.checkpoint 'results/cifar10_resnet18_32x32_dsvdd_e2/best.ckpt' diff --git a/OpenOOD/scripts/ad/dsvdd/cifar10_train_dsvdd.sh b/OpenOOD/scripts/ad/dsvdd/cifar10_train_dsvdd.sh new file mode 100644 index 0000000000000000000000000000000000000000..f666696100371ffe4f6516113f77abeff87ef3ba --- /dev/null +++ b/OpenOOD/scripts/ad/dsvdd/cifar10_train_dsvdd.sh @@ -0,0 +1,19 @@ +#!/bin/bash +# sh scripts/ad/dsvdd/cifar10_train_dsvdd.sh + + +PYTHONPATH='.':$PYTHONPATH \ +srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +--kill-on-bad-exit=1 --job-name=${jobname} \ +-w SG-IDC1-10-51-2-${node} \ +python main.py \ +--config configs/datasets/cifar10/cifar10.yml \ +configs/datasets/cifar10/cifar10_ood.yml \ +configs/pipelines/train/train_dsvdd.yml \ +configs/networks/resnet18_32x32.yml \ +configs/preprocessors/base_preprocessor.yml \ +configs/postprocessors/dsvdd.yml \ +--optimizer.num_epochs 100 \ +--network.pretrained True \ +--network.checkpoint 'results/checkpoints/cifar10_res18_acc94.30.ckpt' diff --git a/OpenOOD/scripts/ad/dsvdd/mnist_test_osr_dsvdd.sh b/OpenOOD/scripts/ad/dsvdd/mnist_test_osr_dsvdd.sh new file mode 100644 index 0000000000000000000000000000000000000000..7beca01bcc63ae57f452719536d253da895976f2 --- /dev/null +++ b/OpenOOD/scripts/ad/dsvdd/mnist_test_osr_dsvdd.sh @@ -0,0 +1,19 @@ +#!/bin/bash +# sh scripts/ad/dsvdd/mnist_test_osr_dsvdd.sh + + +PYTHONPATH='.':$PYTHONPATH \ +#srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +#--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +#--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ + +python main.py \ +--config configs/datasets/osr_mnist6/mnist6_seed1.yml \ +configs/datasets/osr_mnist6/mnist6_seed1_ood.yml \ +configs/networks/lenet.yml \ +configs/pipelines/test/test_dsvdd.yml \ +configs/preprocessors/base_preprocessor.yml \ +configs/postprocessors/dsvdd.yml \ +--network.pretrained True \ +--network.checkpoint 'results/osr_mnist6_seed1_lenet_dsvdd_e100/best.ckpt' \ +--evaluator.name osr diff --git a/OpenOOD/scripts/ad/dsvdd/mnist_train_osr_dsvdd.sh b/OpenOOD/scripts/ad/dsvdd/mnist_train_osr_dsvdd.sh new file mode 100644 index 0000000000000000000000000000000000000000..03063f65ad60badeae6f9a59a55cfc85416a4f8b --- /dev/null +++ b/OpenOOD/scripts/ad/dsvdd/mnist_train_osr_dsvdd.sh @@ -0,0 +1,19 @@ +#!/bin/bash +# sh scripts/ad/dsvdd/mnist_train_osr_dsvdd.sh + + +PYTHONPATH='.':$PYTHONPATH \ +#srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +#--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +#--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ + +python main.py \ +--config configs/datasets/osr_mnist6/mnist6_seed1.yml \ +configs/datasets/osr_mnist6/mnist6_seed1_ood.yml \ +configs/networks/lenet.yml \ +configs/pipelines/train/train_dsvdd.yml \ +configs/preprocessors/base_preprocessor.yml \ +configs/postprocessors/dsvdd.yml \ +--num_workers 8 \ +--network.checkpoint 'results/checkpoints/cifar10_res18_acc94.30.ckpt' \ +--optimizer.num_epochs 100 diff --git a/OpenOOD/scripts/ad/dsvdd/osr_cifar50_train_dsvdd.sh b/OpenOOD/scripts/ad/dsvdd/osr_cifar50_train_dsvdd.sh new file mode 100644 index 0000000000000000000000000000000000000000..a4131afd9366d6a37d9ee457e83ccaec623ce786 --- /dev/null +++ b/OpenOOD/scripts/ad/dsvdd/osr_cifar50_train_dsvdd.sh @@ -0,0 +1,22 @@ +#!/bin/bash +# sh scripts/ad/dsvdd/osr_cifar50_train_dsvdd.sh + +GPU=1 +CPU=1 +node=30 +jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +--kill-on-bad-exit=1 --job-name=${jobname} \ +python main.py \ +--config configs/datasets/osr_cifar50/cifar50_seed1.yml \ +configs/datasets/osr_cifar50/cifar50_seed1_ood.yml \ +configs/pipelines/train/train_dsvdd.yml \ +configs/networks/resnet18_32x32.yml \ +configs/preprocessors/base_preprocessor.yml \ +configs/postprocessors/dsvdd.yml \ +--optimizer.num_epochs 100 \ +--network.pretrained True \ +--network.checkpoint 'results/checkpoints/osr/cifar50_seed1_acc80.24.ckpt' & diff --git a/OpenOOD/scripts/ad/dsvdd/osr_cifar6_train_dsvdd.sh b/OpenOOD/scripts/ad/dsvdd/osr_cifar6_train_dsvdd.sh new file mode 100644 index 0000000000000000000000000000000000000000..908af7e8e00173fc75cd7385f1cea4a6f6096334 --- /dev/null +++ b/OpenOOD/scripts/ad/dsvdd/osr_cifar6_train_dsvdd.sh @@ -0,0 +1,22 @@ +#!/bin/bash +# sh scripts/ad/dsvdd/osr_cifar6_train_dsvdd.sh + +GPU=1 +CPU=1 +node=30 +jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +--kill-on-bad-exit=1 --job-name=${jobname} \ +python main.py \ +--config configs/datasets/osr_cifar6/cifar6_seed1.yml \ +configs/datasets/osr_cifar6/cifar6_seed1_ood.yml \ +configs/pipelines/train/train_dsvdd.yml \ +configs/networks/resnet18_32x32.yml \ +configs/preprocessors/base_preprocessor.yml \ +configs/postprocessors/dsvdd.yml \ +--optimizer.num_epochs 100 \ +--network.pretrained True \ +--network.checkpoint 'results/checkpoints/osr/cifar6_seed1_acc97.57.ckpt' & diff --git a/OpenOOD/scripts/ad/dsvdd/osr_mnist6_train_dsvdd.sh b/OpenOOD/scripts/ad/dsvdd/osr_mnist6_train_dsvdd.sh new file mode 100644 index 0000000000000000000000000000000000000000..908af7e8e00173fc75cd7385f1cea4a6f6096334 --- /dev/null +++ b/OpenOOD/scripts/ad/dsvdd/osr_mnist6_train_dsvdd.sh @@ -0,0 +1,22 @@ +#!/bin/bash +# sh scripts/ad/dsvdd/osr_cifar6_train_dsvdd.sh + +GPU=1 +CPU=1 +node=30 +jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +--kill-on-bad-exit=1 --job-name=${jobname} \ +python main.py \ +--config configs/datasets/osr_cifar6/cifar6_seed1.yml \ +configs/datasets/osr_cifar6/cifar6_seed1_ood.yml \ +configs/pipelines/train/train_dsvdd.yml \ +configs/networks/resnet18_32x32.yml \ +configs/preprocessors/base_preprocessor.yml \ +configs/postprocessors/dsvdd.yml \ +--optimizer.num_epochs 100 \ +--network.pretrained True \ +--network.checkpoint 'results/checkpoints/osr/cifar6_seed1_acc97.57.ckpt' & diff --git a/OpenOOD/scripts/ad/dsvdd/osr_tin20_train_dsvdd.sh b/OpenOOD/scripts/ad/dsvdd/osr_tin20_train_dsvdd.sh new file mode 100644 index 0000000000000000000000000000000000000000..e07ebe38c5fdbe7863254618087c25047ee72622 --- /dev/null +++ b/OpenOOD/scripts/ad/dsvdd/osr_tin20_train_dsvdd.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# sh scripts/ad/dsvdd/osr_tin20_train_dsvdd.sh + +GPU=1 +CPU=1 +node=30 +jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +--kill-on-bad-exit=1 --job-name=${jobname} \ +python main.py \ +--config configs/datasets/osr_tin20/tin20_seed1.yml \ +configs/datasets/osr_tin20/tin20_seed1_ood.yml \ +configs/pipelines/train/train_dsvdd.yml \ +configs/networks/resnet18_32x32.yml \ +configs/preprocessors/base_preprocessor.yml \ +configs/postprocessors/dsvdd.yml \ +--optimizer.num_epochs 100 \ +--network.pretrained True \ +--network.checkpoint 'results/checkpoints/osr/tin20_seed1_acc77.23.ckpt' \ +--merge_option merge & diff --git a/OpenOOD/scripts/ad/dsvdd/sweep_osr.py b/OpenOOD/scripts/ad/dsvdd/sweep_osr.py new file mode 100644 index 0000000000000000000000000000000000000000..d89f199a9e6dc4a2333d9749c643fa607f35b2d2 --- /dev/null +++ b/OpenOOD/scripts/ad/dsvdd/sweep_osr.py @@ -0,0 +1,46 @@ +# python scripts/uncertainty/temp_scaling/sweep_osr.py +import os + +config = [ + [ + 'osr_cifar6/cifar6_seed1.yml', 'osr_cifar6/cifar6_seed1_ood.yml', + 'resnet18_32x32', 'results/checkpoints/osr/cifar6_seed1.ckpt' + ], + [ + 'osr_cifar50/cifar50_seed1.yml', 'osr_cifar50/cifar50_seed1_ood.yml', + 'resnet18_32x32', 'results/checkpoints/osr/cifar50_seed1.ckpt' + ], + [ + 'osr_tin20/tin20_seed1.yml', 'osr_tin20/tin20_seed1_ood.yml', + 'resnet18_64x64', 'results/checkpoints/osr/tin20_seed1.ckpt' + ], + [ + 'osr_mnist6/mnist6_seed1.yml', 'osr_mnist6/mnist6_seed1_ood.yml', + 'lenet', 'results/checkpoints/osr/mnist6_seed1.ckpt' + ], + [ + 'mnist/mnist.yml', 'mnist/mnist_ood.yml', 'lenet', + 'results/checkpoints/osr/mnist6_seed1.ckpt' + ], + [ + 'osr_mnist6/mnist6_seed1.yml', 'osr_mnist6/mnist6_seed1_ood.yml', + 'lenet', 'results/checkpoints/osr/mnist6_seed1.ckpt' + ], +] + +for [dataset, ood_dataset, network, pth] in config: + command = (f"PYTHONPATH='.':$PYTHONPATH \ + srun -p dsta --mpi=pmi2 --gres=gpu:1 -n1 \ + --cpus-per-task=1 --ntasks-per-node=1 \ + --kill-on-bad-exit=1 --job-name=openood \ + python main.py \ + --config configs/datasets/{dataset} \ + configs/datasets/{ood_dataset} \ + configs/networks/{network}.yml \ + configs/pipelines/test/test_osr.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/postprocessors/temp_scaling.yml \ + --network.checkpoint {pth} \ + --num_workers 8 \ + --merge_option merge &") + os.system(command) diff --git a/OpenOOD/scripts/ad/kdad/kdad_test.sh b/OpenOOD/scripts/ad/kdad/kdad_test.sh new file mode 100644 index 0000000000000000000000000000000000000000..4f42dc4798fd6c2a601f0423181b4eac971eb01e --- /dev/null +++ b/OpenOOD/scripts/ad/kdad/kdad_test.sh @@ -0,0 +1,14 @@ +#!/bin/bash +# sh scripts/a_anomaly/1_kdad_test_det.sh + + +PYTHONPATH='.':$PYTHONPATH \ +#srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +#--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +#--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ +python main.py \ +--config configs/datasets/KDAD/kdad_cifar10.yml \ +configs/datasets/KDAD/kdad_cifar10_ood.yml \ +configs/pipelines/test/test_kdad.yml \ +configs/networks/kdad_vgg.yml \ +configs/preprocessors/base_preprocessor.yml diff --git a/OpenOOD/scripts/ad/kdad/kdad_train.sh b/OpenOOD/scripts/ad/kdad/kdad_train.sh new file mode 100644 index 0000000000000000000000000000000000000000..ba58f0aacc269a3f876118fa7d8e598b1e5415f3 --- /dev/null +++ b/OpenOOD/scripts/ad/kdad/kdad_train.sh @@ -0,0 +1,14 @@ +#!/bin/bash +# sh scripts/a_anomaly/1_kdad_train.sh + + +PYTHONPATH='.':$PYTHONPATH \ +#srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +#--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +#--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ +python main.py \ +--config configs/datasets/KDAD/kdad_cifar10.yml \ +configs/datasets/KDAD/kdad_cifar10_ood.yml \ +configs/pipelines/train/train_kdad.yml \ +configs/networks/kdad_vgg.yml \ +configs/preprocessors/base_preprocessor.yml diff --git a/OpenOOD/scripts/ad/patchcore/bottle_test_ood_patchcore.sh b/OpenOOD/scripts/ad/patchcore/bottle_test_ood_patchcore.sh new file mode 100644 index 0000000000000000000000000000000000000000..6e46059bacb1ac0828df9c27c95552917b814ab2 --- /dev/null +++ b/OpenOOD/scripts/ad/patchcore/bottle_test_ood_patchcore.sh @@ -0,0 +1,20 @@ +#!/bin/bash +# sh scripts/ad/patchcore/patchcore.sh + +# GPU=1 +# CPU=1 +# node=30 +# jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ + +python main.py \ +--config configs/datasets/mnist/mnist.yml \ +configs/datasets/mnist/mnist_ood.yml \ +configs/networks/patchcorenet.yml \ +configs/pipelines/test/test_patchcore.yml \ +configs/postprocessors/patch.yml \ +configs/preprocessors/base_preprocessor.yml \ +--evaluator.name ad \ +--num_workers 8 \ +--merge_option merge diff --git a/OpenOOD/scripts/ad/patchcore/cifar100_test_ood_patchcore.sh b/OpenOOD/scripts/ad/patchcore/cifar100_test_ood_patchcore.sh new file mode 100644 index 0000000000000000000000000000000000000000..3dafbea6ba6ea2b6defb3c73b558714ba9daccea --- /dev/null +++ b/OpenOOD/scripts/ad/patchcore/cifar100_test_ood_patchcore.sh @@ -0,0 +1,20 @@ +#!/bin/bash +# sh scripts/ad/patchcore/cifar100_test_ood_patchcore.sh + +GPU=1 +CPU=1 +jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +--kill-on-bad-exit=1 --job-name=${jobname} \ +python main.py \ +--config configs/datasets/cifar100/cifar100.yml \ +configs/datasets/cifar100/cifar100_ood.yml \ +configs/networks/patchcore_net.yml \ +configs/pipelines/test/test_patchcore.yml \ +configs/preprocessors/base_preprocessor.yml \ +configs/postprocessors/patch.yml \ +--num_workers 8 \ +--merge_option merge diff --git a/OpenOOD/scripts/ad/patchcore/cifar10_test_ood_patchcore.sh b/OpenOOD/scripts/ad/patchcore/cifar10_test_ood_patchcore.sh new file mode 100644 index 0000000000000000000000000000000000000000..23086a7c2184de058be992d7560a2f98eb4f43dd --- /dev/null +++ b/OpenOOD/scripts/ad/patchcore/cifar10_test_ood_patchcore.sh @@ -0,0 +1,19 @@ +#!/bin/bash +# sh scripts/ad/patchcore/cifar10_test_ood_patchcore.sh + +# GPU=1 +# CPU=1 +# node=30 +# jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ + +python main.py \ +--config configs/datasets/cifar10/cifar10.yml \ +configs/datasets/cifar10/cifar10_ood.yml \ +configs/networks/patchcore_net.yml \ +configs/pipelines/test/test_patchcore.yml \ +configs/preprocessors/base_preprocessor.yml \ +configs/postprocessors/patch.yml \ +--num_workers 8 \ +--merge_option merge diff --git a/OpenOOD/scripts/ad/patchcore/osr_cifar50_test_ood_patchcore.sh b/OpenOOD/scripts/ad/patchcore/osr_cifar50_test_ood_patchcore.sh new file mode 100644 index 0000000000000000000000000000000000000000..ad792220911a61311c91f0d2a37d923401cfb74e --- /dev/null +++ b/OpenOOD/scripts/ad/patchcore/osr_cifar50_test_ood_patchcore.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# sh scripts/ad/patchcore/osr_cifar50_test_ood_patchcore.sh + +GPU=1 +CPU=1 +node=30 +jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +--kill-on-bad-exit=1 --job-name=${jobname} \ +python main.py \ +--config configs/datasets/osr_cifar50/cifar50_seed1.yml \ +configs/datasets/osr_cifar50/cifar50_seed1_ood.yml \ +configs/networks/patchcore_net.yml \ +configs/pipelines/test/test_patchcore.yml \ +configs/preprocessors/base_preprocessor.yml \ +configs/postprocessors/patch.yml \ +--network.backbone.name resnet18_32x32 \ +--network.backbone.checkpoint 'results/checkpoints/osr/cifar50_seed1_acc80.24.ckpt' \ +--num_workers 8 \ +--merge_option merge & diff --git a/OpenOOD/scripts/ad/patchcore/osr_cifar6_test_ood_patchcore.sh b/OpenOOD/scripts/ad/patchcore/osr_cifar6_test_ood_patchcore.sh new file mode 100644 index 0000000000000000000000000000000000000000..b2a229ea79a578921c5f188a0c4cbfdc95e5ae2c --- /dev/null +++ b/OpenOOD/scripts/ad/patchcore/osr_cifar6_test_ood_patchcore.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# sh scripts/ad/patchcore/osr_cifar6_test_ood_patchcore.sh + +GPU=1 +CPU=1 +node=30 +jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +--kill-on-bad-exit=1 --job-name=${jobname} \ +python main.py \ +--config configs/datasets/osr_cifar6/cifar6_seed1.yml \ +configs/datasets/osr_cifar6/cifar6_seed1_ood.yml \ +configs/networks/patchcore_net.yml \ +configs/pipelines/test/test_patchcore.yml \ +configs/preprocessors/base_preprocessor.yml \ +configs/postprocessors/patch.yml \ +--network.backbone.name resnet18_32x32 \ +--network.backbone.checkpoint 'results/checkpoints/osr/cifar6_seed1_acc97.57.ckpt' \ +--num_workers 8 \ +--merge_option merge & diff --git a/OpenOOD/scripts/ad/patchcore/osr_tin20_test_ood_patchcore.sh b/OpenOOD/scripts/ad/patchcore/osr_tin20_test_ood_patchcore.sh new file mode 100644 index 0000000000000000000000000000000000000000..d2f860bab4ff7c50390764bc823b46bd54e42ce3 --- /dev/null +++ b/OpenOOD/scripts/ad/patchcore/osr_tin20_test_ood_patchcore.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# sh scripts/ad/patchcore/osr_tin20_test_ood_patchcore.sh + +GPU=1 +CPU=1 +node=30 +jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +--kill-on-bad-exit=1 --job-name=${jobname} \ +python main.py \ +--config configs/datasets/osr_tin20/tin20_seed1.yml \ +configs/datasets/osr_tin20/tin20_seed1_ood.yml \ +configs/networks/patchcore_net.yml \ +configs/pipelines/test/test_patchcore.yml \ +configs/preprocessors/base_preprocessor.yml \ +configs/postprocessors/patch.yml \ +--network.backbone.name resnet18_64x64 \ +--network.backbone.checkpoint 'results/checkpoints/osr/tin20_seed1_acc77.23.ckpt' \ +--num_workers 8 \ +--merge_option merge & diff --git a/OpenOOD/scripts/ad/rd4ad/cifar10_test.sh b/OpenOOD/scripts/ad/rd4ad/cifar10_test.sh new file mode 100644 index 0000000000000000000000000000000000000000..a3f6d000eb9d63c2f3741e3a4f0364a88ad5068e --- /dev/null +++ b/OpenOOD/scripts/ad/rd4ad/cifar10_test.sh @@ -0,0 +1,15 @@ +#!/bin/bash +# sh scripts/ad/rd4ad/cifar10_test.sh + +PYTHONPATH='.':$PYTHONPATH \ +#srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +#--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +#--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ +#--config configs/datasets/mvtec/cable.yml \ +python main.py \ +--config configs/datasets/cifar10/cifar10.yml \ +configs/datasets/cifar10/cifar10_ood.yml \ +configs/networks/rd4ad_net.yml \ +configs/pipelines/test/test_rd4ad.yml \ +configs/preprocessors/base_preprocessor.yml \ +configs/postprocessors/rd4ad.yml \ diff --git a/OpenOOD/scripts/ad/rd4ad/cifar10_train.sh b/OpenOOD/scripts/ad/rd4ad/cifar10_train.sh new file mode 100644 index 0000000000000000000000000000000000000000..cd4830ddbf174dbefa8c382a6750a2ff66e37c63 --- /dev/null +++ b/OpenOOD/scripts/ad/rd4ad/cifar10_train.sh @@ -0,0 +1,14 @@ +#!/bin/bash +# sh scripts/ad/rd4ad/cifar10_train.sh + +PYTHONPATH='.':$PYTHONPATH \ +#srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +#--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +#--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ +python main.py \ +--config configs/datasets/cifar10/cifar10.yml \ +configs/datasets/cifar10/cifar10_ood.yml \ +configs/pipelines/train/train_rd4ad.yml \ +configs/networks/rd4ad_net.yml \ +configs/preprocessors/base_preprocessor.yml \ +configs/postprocessors/rd4ad.yml diff --git a/OpenOOD/scripts/basics/cifar10/train_cifar10.sh b/OpenOOD/scripts/basics/cifar10/train_cifar10.sh new file mode 100644 index 0000000000000000000000000000000000000000..33c247340b7461dcabca6689e0d815bbbe710ed9 --- /dev/null +++ b/OpenOOD/scripts/basics/cifar10/train_cifar10.sh @@ -0,0 +1,18 @@ +#!/bin/bash +# sh scripts/basics/cifar10/train_cifar10.sh + +GPU=1 +CPU=1 +node=73 +jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +--kill-on-bad-exit=1 --job-name=${jobname} \ +python main.py \ + --config configs/datasets/cifar10/cifar10.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/networks/resnet18_32x32.yml \ + configs/pipelines/train/baseline.yml \ + --seed 0 diff --git a/OpenOOD/scripts/basics/cifar10/train_cifar10_dist.sh b/OpenOOD/scripts/basics/cifar10/train_cifar10_dist.sh new file mode 100644 index 0000000000000000000000000000000000000000..1c89f6c3947075c92c67a1ffcb74199028ae30ed --- /dev/null +++ b/OpenOOD/scripts/basics/cifar10/train_cifar10_dist.sh @@ -0,0 +1,22 @@ +#!/bin/bash +# sh scripts/basics/cifar10/train_cifar10_dist.sh + +GPU=1 +CPU=1 +jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +--kill-on-bad-exit=1 --job-name=${jobname} \ +python main.py \ +--config configs/datasets/cifar10/cifar10.yml \ +configs/networks/resnet18_32x32.yml \ +configs/pipelines/train/baseline.yml \ +--dataset.image_size 32 \ +--optimizer.num_epochs 100 \ +--num_workers 8 \ +--num_gpus 2 \ +--num_machines 1 \ +--machine_rank 0 \ +--mark 0 & diff --git a/OpenOOD/scripts/basics/cifar100/train_cifar100.sh b/OpenOOD/scripts/basics/cifar100/train_cifar100.sh new file mode 100644 index 0000000000000000000000000000000000000000..44321ab40e8d0569f1b9a5a0d4b60456e8b7138f --- /dev/null +++ b/OpenOOD/scripts/basics/cifar100/train_cifar100.sh @@ -0,0 +1,21 @@ + + +#!/bin/bash +# sh scripts/basics/cifar100/train_cifar100.sh + +GPU=1 +CPU=1 +node=73 +jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +--kill-on-bad-exit=1 --job-name=${jobname} \ +-w SG-IDC1-10-51-2-${node} \ +python main.py \ + --config configs/datasets/cifar100/cifar100.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/networks/resnet18_32x32.yml \ + configs/pipelines/train/baseline.yml \ + --seed 0 diff --git a/OpenOOD/scripts/basics/covid/train_covid.sh b/OpenOOD/scripts/basics/covid/train_covid.sh new file mode 100644 index 0000000000000000000000000000000000000000..2c4fe748302a7d1addd314eed79b689f1747733b --- /dev/null +++ b/OpenOOD/scripts/basics/covid/train_covid.sh @@ -0,0 +1,21 @@ +#!/bin/bash +# sh scripts/0_basics/covid_train.sh + +GPU=1 +CPU=1 +node=73 +jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +--kill-on-bad-exit=1 --job-name=${jobname} \ +-w SG-IDC1-10-51-2-${node} \ +python main.py \ +--config configs/datasets/covid/covid.yml \ +configs/networks/resnet18_224x224.yml \ +configs/pipelines/train/baseline.yml \ +--optimizer.num_epochs 200 \ +--optimizer.lr 0.0001 \ +--optimizer.weight_decay 0.0005 \ +--num_workers 8 diff --git a/OpenOOD/scripts/basics/imagenet/test_imagenet.sh b/OpenOOD/scripts/basics/imagenet/test_imagenet.sh new file mode 100644 index 0000000000000000000000000000000000000000..37cbc608c780dd2c520298f5ccfa51120f90f807 --- /dev/null +++ b/OpenOOD/scripts/basics/imagenet/test_imagenet.sh @@ -0,0 +1,24 @@ +#!/bin/bash +# sh scripts/basics/imagenet/test_imagenet.sh + +GPU=1 +CPU=1 +node=76 +jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ +python main.py \ +--config configs/datasets/imagenet/imagenet.yml \ +configs/networks/resnet50.yml \ +configs/pipelines/test/test_acc.yml \ +configs/preprocessors/base_preprocessor.yml \ +--num_workers 20 \ +--dataset.test.batch_size 512 \ +--dataset.val.batch_size 512 \ +--network.pretrained True \ +--network.checkpoint "./results/checkpoints/imagenet_res50_acc76.10.pth" \ +--save_output True \ +--num_gpus 1 diff --git a/OpenOOD/scripts/basics/imagenet200/train_imagenet200.sh b/OpenOOD/scripts/basics/imagenet200/train_imagenet200.sh new file mode 100644 index 0000000000000000000000000000000000000000..d04091e136c5a54bc564465253e410e9803649b7 --- /dev/null +++ b/OpenOOD/scripts/basics/imagenet200/train_imagenet200.sh @@ -0,0 +1,9 @@ +#!/bin/bash +# sh scripts/basics/imagenet200/train_imagenet200.sh + +python main.py \ + --config configs/datasets/imagenet200/imagenet200.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/networks/resnet18_224x224.yml \ + configs/pipelines/train/baseline.yml \ + --seed 0 diff --git a/OpenOOD/scripts/basics/mnist/test_mnist.sh b/OpenOOD/scripts/basics/mnist/test_mnist.sh new file mode 100644 index 0000000000000000000000000000000000000000..ff460b9bfc4ec06368bdd66314734a9120a3d8aa --- /dev/null +++ b/OpenOOD/scripts/basics/mnist/test_mnist.sh @@ -0,0 +1,18 @@ +#!/bin/bash +# sh scripts/0_basics/mnist_test.sh + +GPU=1 +CPU=1 +node=73 +jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ +python main.py \ +--config configs/datasets/mnist/mnist.yml \ +configs/preprocessors/base_preprocessor.yml \ +configs/networks/lenet.yml \ +configs/pipelines/test/test_acc.yml \ +--network.checkpoint ./results/checkpoints/mnist_lenet.ckpt diff --git a/OpenOOD/scripts/basics/mnist/train_mnist.sh b/OpenOOD/scripts/basics/mnist/train_mnist.sh new file mode 100644 index 0000000000000000000000000000000000000000..4120f359fd883e05444e230973143dcdc058f84e --- /dev/null +++ b/OpenOOD/scripts/basics/mnist/train_mnist.sh @@ -0,0 +1,17 @@ +#!/bin/bash +# sh scripts/basics/mnist/train_mnist.sh + +GPU=1 +CPU=1 +node=73 +jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +--kill-on-bad-exit=1 --job-name=${jobname} \ +python main.py \ +--config configs/datasets/mnist/mnist.yml \ +configs/preprocessors/base_preprocessor.yml \ +configs/networks/lenet.yml \ +configs/pipelines/train/baseline.yml diff --git a/OpenOOD/scripts/basics/osr_cifar50/train_cifar50.sh b/OpenOOD/scripts/basics/osr_cifar50/train_cifar50.sh new file mode 100644 index 0000000000000000000000000000000000000000..69502cbd991392f739f98f20d175ba097bdaf32e --- /dev/null +++ b/OpenOOD/scripts/basics/osr_cifar50/train_cifar50.sh @@ -0,0 +1,22 @@ +#!/bin/bash +# sh scripts/basics/osr_cifar50/train_cifar50.sh + +GPU=1 +CPU=1 +node=66 +jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +--kill-on-bad-exit=1 --job-name=${jobname} \ +python main.py \ +--config configs/datasets/osr_cifar50/cifar50_seed1.yml \ +configs/networks/resnet18_32x32.yml \ +configs/preprocessors/base_preprocessor.yml \ +configs/pipelines/train/baseline.yml \ +--network.pretrained False \ +--dataset.image_size 32 \ +--optimizer.num_epochs 100 \ +--num_workers 4 \ +--mark 4 & diff --git a/OpenOOD/scripts/basics/osr_cifar6/osr_cifar6_test_msp.sh b/OpenOOD/scripts/basics/osr_cifar6/osr_cifar6_test_msp.sh new file mode 100644 index 0000000000000000000000000000000000000000..71d38c3e315696e581fc67cdae1ed079b4d2016c --- /dev/null +++ b/OpenOOD/scripts/basics/osr_cifar6/osr_cifar6_test_msp.sh @@ -0,0 +1,21 @@ +#!/bin/bash +# sh scripts/basics/osr_cifar6/osr_cifar6_test_msp.sh + +GPU=1 +CPU=1 +node=73 +jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ +python main.py \ +--config configs/datasets/osr_cifar6/cifar6_seed1.yml \ +configs/datasets/osr_cifar6/cifar6_seed1_osr.yml \ +configs/networks/resnet18_32x32.yml \ +configs/pipelines/test/test_osr.yml \ +configs/preprocessors/base_preprocessor.yml \ +configs/postprocessors/msp.yml \ +--num_workers 8 \ +--network.checkpoint './results/cifar6_seed1_resnet18_32x32_base_e100_lr0.1_default/best.ckpt' diff --git a/OpenOOD/scripts/basics/osr_cifar6/train_cifar6.sh b/OpenOOD/scripts/basics/osr_cifar6/train_cifar6.sh new file mode 100644 index 0000000000000000000000000000000000000000..889908bbaee747b348bd3205621033136de6c217 --- /dev/null +++ b/OpenOOD/scripts/basics/osr_cifar6/train_cifar6.sh @@ -0,0 +1,17 @@ +#!/bin/bash +# sh scripts/basics/osr_cifar6/train_cifar6.sh + +GPU=1 +CPU=1 +node=73 +jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +--kill-on-bad-exit=1 --job-name=${jobname} \ +python main.py \ +--config configs/datasets/osr_cifar6/cifar6_seed5.yml \ +configs/preprocessors/base_preprocessor.yml \ +configs/networks/resnet18_32x32.yml \ +configs/pipelines/train/baseline.yml & diff --git a/OpenOOD/scripts/basics/osr_mnist6/train_mnist6.sh b/OpenOOD/scripts/basics/osr_mnist6/train_mnist6.sh new file mode 100644 index 0000000000000000000000000000000000000000..e92af9230a73f98892e0bd5857bb332e302c86b0 --- /dev/null +++ b/OpenOOD/scripts/basics/osr_mnist6/train_mnist6.sh @@ -0,0 +1,33 @@ +#!/bin/bash +# sh scripts/basics/osr_mnist6/train_mnist6.sh + +GPU=1 +CPU=1 +node=78 +jobname=openood + +if [ $USER == "jkyang" ]; then + PYTHONPATH='.':$PYTHONPATH \ + srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ + --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ + --kill-on-bad-exit=1 --job-name=${jobname} \ + python main.py \ + --config configs/datasets/osr_mnist6/mnist6_seed2.yml \ + configs/networks/lenet.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/pipelines/train/baseline.yml \ + --network.pretrained False +else + PYTHONPATH='.':$PYTHONPATH \ + python main.py \ + --config configs/datasets/osr_mnist6/mnist6_seed1.yml \ + configs/networks/lenet.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/pipelines/train/baseline.yml \ + --network.pretrained False \ + --dataset.image_size 28 \ + --optimizer.num_epochs 100 \ + --num_workers 4 +fi + +cp ./results/cifar6_seed5_resnet18_32x32_base_e100_lr0.1_default/best.ckpt ./results/checkpoints/osr/cifar6_seed5.ckpt diff --git a/OpenOOD/scripts/basics/osr_tin20/train_tin20.sh b/OpenOOD/scripts/basics/osr_tin20/train_tin20.sh new file mode 100644 index 0000000000000000000000000000000000000000..dfebbef114e235735686fff7ed75e2339d591f90 --- /dev/null +++ b/OpenOOD/scripts/basics/osr_tin20/train_tin20.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# sh scripts/basics/osr_tin20/train_tin20.sh +# python -m pdb -c continue main.py \ + +GPU=1 +CPU=1 +node=75 +jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +--kill-on-bad-exit=1 --job-name=${jobname} \ +python main.py \ +--config configs/datasets/osr_tin20/tin20_seed1.yml \ +configs/networks/resnet18_64x64.yml \ +configs/preprocessors/base_preprocessor.yml \ +configs/pipelines/train/baseline.yml \ +--network.pretrained False \ +--dataset.image_size 64 \ +--optimizer.num_epochs 100 \ +--num_workers 4 \ +--mark 5 & diff --git a/OpenOOD/scripts/download/download.py b/OpenOOD/scripts/download/download.py new file mode 100644 index 0000000000000000000000000000000000000000..e59f51bcff5822d8afd3e3517a81c8832f9daf53 --- /dev/null +++ b/OpenOOD/scripts/download/download.py @@ -0,0 +1,226 @@ +import argparse +import os +import zipfile + +import gdown + +benchmarks_dict = { + 'bimcv': [ + 'bimcv', 'ct', 'xraybone', 'actmed', 'mnist', 'cifar10', 'texture', + 'tin' + ], + 'mnist': [ + 'mnist', 'notmnist', 'fashionmnist', 'texture', 'cifar10', 'tin', + 'places365', 'cinic10' + ], + 'cifar-10': [ + 'cifar10', 'cifar100', 'tin', 'mnist', 'svhn', 'texture', 'places365', + 'tin597' + ], + 'cifar-100': + ['cifar100', 'cifar10', 'tin', 'svhn', 'texture', 'places365', 'tin597'], + 'imagenet-200': [ + 'imagenet_1k', 'ssb_hard', 'ninco', 'inaturalist', 'texture', + 'openimage_o', 'imagenet_v2', 'imagenet_c', 'imagenet_r' + ], + 'imagenet-1k': [ + 'imagenet_1k', 'ssb_hard', 'ninco', 'inaturalist', 'texture', + 'openimage_o', 'imagenet_v2', 'imagenet_c', 'imagenet_r' + ], + 'misc': [ + 'cifar10c', + 'fractals_and_fvis', + 'usps', + 'imagenet10', + 'hannover', + # 'imagenet200_cae', 'imagenet200_edsr', 'imagenet200_stylized' + ], +} + +dir_dict = { + 'images_classic/': [ + 'cifar100', 'tin', 'tin597', 'svhn', 'cinic10', 'imagenet10', 'mnist', + 'fashionmnist', 'cifar10', 'cifar100c', 'places365', 'cifar10c', + 'fractals_and_fvis', 'usps', 'texture', 'notmnist' + ], + 'images_largescale/': [ + 'imagenet_1k', + 'species_sub', + 'ssb_hard', + 'ninco', + 'inaturalist', + 'places', + 'sun', + 'openimage_o', + 'imagenet_v2', + 'imagenet_c', + 'imagenet_r', + # 'imagenet200_cae', 'imagenet200_edsr', 'imagenet200_stylized' + ], + 'images_medical/': ['actmed', 'bimcv', 'ct', 'hannover', 'xraybone'], +} + +download_id_dict = { + 'osr': '1L9MpK9QZq-o-JrFHrfo5lM4-FsFPk0e9', + 'mnist_lenet': '13mEvYF9rVIuch8u0RVDLf_JMOk3PAYCj', + 'cifar10_res18': '1rPEScK7TFjBn_W_frO-8RSPwIG6_x0fJ', + 'cifar100_res18': '1OOf88A48yXFw4fSU02XQT-3OQKf31Csy', + 'imagenet_res50': '1tgY_PsfkazLDyI1pniDMDEehntBhFyF3', + 'cifar10_res18_v1.5': '1byGeYxM_PlLjT72wZsMQvP6popJeWBgt', + 'cifar100_res18_v1.5': '1s-1oNrRtmA0pGefxXJOUVRYpaoAML0C-', + 'imagenet200_res18_v1.5': '1ddVmwc8zmzSjdLUO84EuV4Gz1c7vhIAs', + 'imagenet_res50_v1.5': '15PdDMNRfnJ7f2oxW6lI-Ge4QJJH3Z0Fy', + 'benchmark_imglist': '1XKzBdWCqg3vPoj-D32YixJyJJ0hL63gP', + 'usps': '1KhbWhlFlpFjEIb4wpvW0s9jmXXsHonVl', + 'cifar100': '1PGKheHUsf29leJPPGuXqzLBMwl8qMF8_', + 'cifar10': '1Co32RiiWe16lTaiOU6JMMnyUYS41IlO1', + 'cifar10c': '170DU_ficWWmbh6O2wqELxK9jxRiGhlJH', + 'cinic10': '190gdcfbvSGbrRK6ZVlJgg5BqqED6H_nn', + 'svhn': '1DQfc11HOtB1nEwqS4pWUFp8vtQ3DczvI', + 'fashionmnist': '1nVObxjUBmVpZ6M0PPlcspsMMYHidUMfa', + 'cifar100c': '1MnETiQh9RTxJin2EHeSoIAJA28FRonHx', + 'mnist': '1CCHAGWqA1KJTFFswuF9cbhmB-j98Y1Sb', + 'fractals_and_fvis': '1EZP8RGOP-XbMsKex3r-BGI5F1WAP_PJ3', + 'tin': '1PZ-ixyx52U989IKsMA2OT-24fToTrelC', + 'tin597': '1R0d8zBcUxWNXz6CPXanobniiIfQbpKzn', + 'texture': '1OSz1m3hHfVWbRdmMwKbUzoU8Hg9UKcam', + 'imagenet10': '1qRKp-HCLkmfiWwR-PXthN7-2dxIQVKxP', + 'notmnist': '16ueghlyzunbksnc_ccPgEAloRW9pKO-K', + 'places365': '1Ec-LRSTf6u5vEctKX9vRp9OA6tqnJ0Ay', + 'places': '1fZ8TbPC4JGqUCm-VtvrmkYxqRNp2PoB3', + 'sun': '1ISK0STxWzWmg-_uUr4RQ8GSLFW7TZiKp', + 'species_sub': '1-JCxDx__iFMExkYRMylnGJYTPvyuX6aq', + 'imagenet_1k': '1i1ipLDFARR-JZ9argXd2-0a6DXwVhXEj', + 'ssb_hard': '1PzkA-WGG8Z18h0ooL_pDdz9cO-DCIouE', + 'ninco': '1Z82cmvIB0eghTehxOGP5VTdLt7OD3nk6', + 'imagenet_v2': '1akg2IiE22HcbvTBpwXQoD7tgfPCdkoho', + 'imagenet_r': '1EzjMN2gq-bVV7lg-MEAdeuBuz-7jbGYU', + 'imagenet_c': '1JeXL9YH4BO8gCJ631c5BHbaSsl-lekHt', + 'imagenet_o': '1S9cFV7fGvJCcka220-pIO9JPZL1p1V8w', + 'openimage_o': '1VUFXnB_z70uHfdgJG2E_pjYOcEgqM7tE', + 'inaturalist': '1zfLfMvoUD0CUlKNnkk7LgxZZBnTBipdj', + 'actmed': '1tibxL_wt6b3BjliPaQ2qjH54Wo4ZXWYb', + 'ct': '1k5OYN4inaGgivJBJ5L8pHlopQSVnhQ36', + 'hannover': '1NmqBDlcA1dZQKOvgcILG0U1Tm6RP0s2N', + 'xraybone': '1ZzO3y1-V_IeksJXEvEfBYKRoQLLvPYe9', + 'bimcv': '1nAA45V6e0s5FAq2BJsj9QH5omoihb7MZ', +} + + +def require_download(filename, path): + for item in os.listdir(path): + if item.startswith(filename) or filename.startswith( + item) or path.endswith(filename): + return False + + else: + print(filename + ' needs download:') + return True + + +def download_dataset(dataset, args): + for key in dir_dict.keys(): + if dataset in dir_dict[key]: + store_path = os.path.join(args.save_dir[0], key, dataset) + if not os.path.exists(store_path): + os.makedirs(store_path) + break + else: + print('Invalid dataset detected {}'.format(dataset)) + return + + if require_download(dataset, store_path): + print(store_path) + if not store_path.endswith('/'): + store_path = store_path + '/' + gdown.download(id=download_id_dict[dataset], output=store_path) + + file_path = os.path.join(store_path, dataset + '.zip') + with zipfile.ZipFile(file_path, 'r') as zip_file: + zip_file.extractall(store_path) + os.remove(file_path) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description='Download datasets and checkpoints') + parser.add_argument('--contents', + nargs='+', + default=['datasets', 'checkpoints']) + parser.add_argument('--datasets', nargs='+', default=['default']) + parser.add_argument('--checkpoints', nargs='+', default=['all']) + parser.add_argument('--save_dir', + nargs='+', + default=['./data', './results']) + parser.add_argument('--dataset_mode', default='default') + args = parser.parse_args() + + if args.datasets[0] == 'default': + args.datasets = ['mnist', 'cifar-10', 'cifar-100'] + elif args.datasets[0] == 'ood_v1.5': + args.datasets = [ + 'cifar-10', 'cifar-100', 'imagenet-200', 'imagenet-1k' + ] + elif args.datasets[0] == 'all': + args.datasets = list(benchmarks_dict.keys()) + + if args.checkpoints[0] == 'ood': + args.checkpoints = [ + 'mnist_lenet', 'cifar10_res18', 'cifar100_res18', 'imagenet_res50' + ] + elif args.checkpoints[0] == 'ood_v1.5': + args.checkpoints = [ + 'cifar10_res18_v1.5', 'cifar100_res18_v1.5', + 'imagenet200_res18_v1.5', 'imagenet_res50_v1.5' + ] + elif args.checkpoints[0] == 'all': + args.checkpoints = [ + 'mnist_lenet', 'cifar10_res18', 'cifar100_res18', 'imagenet_res50', + 'osr' + ] + + for content in args.contents: + if content == 'datasets': + + store_path = args.save_dir[0] + if not store_path.endswith('/'): + store_path = store_path + '/' + if not os.path.exists(os.path.join(store_path, + 'benchmark_imglist')): + gdown.download(id=download_id_dict['benchmark_imglist'], + output=store_path) + file_path = os.path.join(args.save_dir[0], + 'benchmark_imglist.zip') + with zipfile.ZipFile(file_path, 'r') as zip_file: + zip_file.extractall(store_path) + os.remove(file_path) + + if args.dataset_mode == 'default' or \ + args.dataset_mode == 'benchmark': + for benchmark in args.datasets: + for dataset in benchmarks_dict[benchmark]: + download_dataset(dataset, args) + + if args.dataset_mode == 'dataset': + for dataset in args.datasets: + download_dataset(dataset, args) + + elif content == 'checkpoints': + if 'v1.5' in args.checkpoints[0]: + store_path = args.save_dir[1] + else: + store_path = os.path.join(args.save_dir[1], 'checkpoints/') + if not os.path.exists(store_path): + os.makedirs(store_path) + + if not store_path.endswith('/'): + store_path = store_path + '/' + + for checkpoint in args.checkpoints: + if require_download(checkpoint, store_path): + gdown.download(id=download_id_dict[checkpoint], + output=store_path) + file_path = os.path.join(store_path, checkpoint + '.zip') + with zipfile.ZipFile(file_path, 'r') as zip_file: + zip_file.extractall(store_path) + os.remove(file_path) diff --git a/OpenOOD/scripts/download/download.sh b/OpenOOD/scripts/download/download.sh new file mode 100644 index 0000000000000000000000000000000000000000..bcddc599fa37d43eb9ac35d59575ae976c39c329 --- /dev/null +++ b/OpenOOD/scripts/download/download.sh @@ -0,0 +1,10 @@ +# sh ./scripts/download/dowanload.sh + +# download the up-to-date benchmarks and checkpoints +# provided by OpenOOD v1.5 +python ./scripts/download/download.py \ + --contents 'datasets' 'checkpoints' \ + --datasets 'ood_v1.5' \ + --checkpoints 'ood_v1.5' \ + --save_dir './data' './results' \ + --dataset_mode 'benchmark' diff --git a/OpenOOD/scripts/eval_ood_imagenet.py b/OpenOOD/scripts/eval_ood_imagenet.py new file mode 100644 index 0000000000000000000000000000000000000000..8c2c1c98b5fdc7464e1643bb4086b9e51b1dd972 --- /dev/null +++ b/OpenOOD/scripts/eval_ood_imagenet.py @@ -0,0 +1,188 @@ +import collections +import os, sys +ROOT_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..') +sys.path.append(ROOT_DIR) +import numpy as np +import pandas as pd +import argparse +import pickle + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from torchvision.models import ResNet50_Weights, Swin_T_Weights, ViT_B_16_Weights, RegNet_Y_16GF_Weights +from torchvision import transforms as trn +from torch.hub import load_state_dict_from_url + +from openood.evaluation_api import Evaluator + +from openood.networks import ResNet50, Swin_T, ViT_B_16, RegNet_Y_16GF +from openood.networks.conf_branch_net import ConfBranchNet +from openood.networks.godin_net import GodinNet +from openood.networks.rot_net import RotNet +from openood.networks.cider_net import CIDERNet + + +def update(d, u): + for k, v in u.items(): + if isinstance(v, collections.abc.Mapping): + d[k] = update(d.get(k, {}), v) + else: + d[k] = v + return d + + +parser = argparse.ArgumentParser() +parser.add_argument('--arch', + default='resnet50', + choices=['resnet50', 'swin-t', 'vit-b-16', 'regnet']) +parser.add_argument('--tvs-version', default=1, choices=[1, 2]) +parser.add_argument('--ckpt-path', default=None) +parser.add_argument('--tvs-pretrained', action='store_true') +parser.add_argument('--postprocessor', default='msp') +parser.add_argument('--save-csv', action='store_true') +parser.add_argument('--save-score', action='store_true') +parser.add_argument('--fsood', action='store_true') +parser.add_argument('--batch-size', default=2000, type=int) +args = parser.parse_args() + +if not args.tvs_pretrained: + assert args.ckpt_path is not None + root = '/'.join(args.ckpt_path.split('/')[:-1]) +else: + root = os.path.join( + ROOT_DIR, 'results', + f'imagenet_{args.arch}_tvsv{args.tvs_version}_base_default') + if not os.path.exists(root): + os.makedirs(root) + +# specify an implemented postprocessor +# 'openmax', 'msp', 'temp_scaling', 'odin'... +postprocessor_name = args.postprocessor +# load pre-setup postprocessor if exists +if os.path.isfile( + os.path.join(root, 'postprocessors', f'{postprocessor_name}.pkl')): + with open( + os.path.join(root, 'postprocessors', f'{postprocessor_name}.pkl'), + 'rb') as f: + postprocessor = pickle.load(f) +else: + postprocessor = None + +# assuming the model is either +# 1) torchvision pre-trained; or +# 2) a specified checkpoint +if args.tvs_pretrained: + if args.arch == 'resnet50': + net = ResNet50() + weights = eval(f'ResNet50_Weights.IMAGENET1K_V{args.tvs_version}') + net.load_state_dict(load_state_dict_from_url(weights.url)) + preprocessor = weights.transforms() + elif args.arch == 'swin-t': + net = Swin_T() + weights = eval(f'Swin_T_Weights.IMAGENET1K_V{args.tvs_version}') + net.load_state_dict(load_state_dict_from_url(weights.url)) + preprocessor = weights.transforms() + elif args.arch == 'vit-b-16': + net = ViT_B_16() + weights = eval(f'ViT_B_16_Weights.IMAGENET1K_V{args.tvs_version}') + net.load_state_dict(load_state_dict_from_url(weights.url)) + preprocessor = weights.transforms() + elif args.arch == 'regnet': + net = RegNet_Y_16GF() + weights = eval( + f'RegNet_Y_16GF_Weights.IMAGENET1K_SWAG_E2E_V{args.tvs_version}') + net.load_state_dict(load_state_dict_from_url(weights.url)) + preprocessor = weights.transforms() + else: + raise NotImplementedError +else: + if args.arch == 'resnet50': + if postprocessor_name == 'conf_branch': + net = ConfBranchNet(backbone=ResNet50(), num_classes=1000) + elif postprocessor_name == 'godin': + backbone = ResNet50() + net = GodinNet(backbone=backbone, + feature_size=backbone.feature_size, + num_classes=1000) + elif postprocessor_name == 'rotpred': + net = RotNet(backbone=ResNet50(), num_classes=1000) + elif postprocessor_name == 'cider': + net = CIDERNet(backbone=ResNet50(), + head='mlp', + feat_dim=128, + num_classes=1000) + else: + net = ResNet50() + + ckpt = torch.load(args.ckpt_path, map_location='cpu') + net.load_state_dict(ckpt) + preprocessor = trn.Compose([ + trn.Resize(256), + trn.CenterCrop(224), + trn.ToTensor(), + trn.Normalize(mean=[0.485, 0.456, 0.406], + std=[0.229, 0.224, 0.225]) + ]) + else: + raise NotImplementedError + +net.cuda() +net.eval() +# a unified evaluator +evaluator = Evaluator( + net, + id_name='imagenet', # the target ID dataset + data_root=os.path.join(ROOT_DIR, 'data'), + config_root=os.path.join(ROOT_DIR, 'configs'), + preprocessor=preprocessor, # default preprocessing + postprocessor_name=postprocessor_name, + postprocessor=postprocessor, + batch_size=args. + batch_size, # for certain methods the results can be slightly affected by batch size + shuffle=True, + num_workers=8) + +# load pre-computed scores if exists +if os.path.isfile(os.path.join(root, 'scores', f'{postprocessor_name}.pkl')): + with open(os.path.join(root, 'scores', f'{postprocessor_name}.pkl'), + 'rb') as f: + scores = pickle.load(f) + update(evaluator.scores, scores) + print('Loaded pre-computed scores from file.') + +# save postprocessor for future reuse +if hasattr(evaluator.postprocessor, 'setup_flag' + ) or evaluator.postprocessor.hyperparam_search_done is True: + pp_save_root = os.path.join(root, 'postprocessors') + if not os.path.exists(pp_save_root): + os.makedirs(pp_save_root) + + if not os.path.isfile( + os.path.join(pp_save_root, f'{postprocessor_name}.pkl')): + with open(os.path.join(pp_save_root, f'{postprocessor_name}.pkl'), + 'wb') as f: + pickle.dump(evaluator.postprocessor, f, pickle.HIGHEST_PROTOCOL) + +# the metrics is a dataframe +metrics = evaluator.eval_ood(fsood=args.fsood) + +# saving and recording +if args.save_csv: + saving_root = os.path.join(root, 'ood' if not args.fsood else 'fsood') + if not os.path.exists(saving_root): + os.makedirs(saving_root) + + if not os.path.isfile( + os.path.join(saving_root, f'{postprocessor_name}.csv')): + metrics.to_csv(os.path.join(saving_root, f'{postprocessor_name}.csv'), + float_format='{:.2f}'.format) + +if args.save_score: + score_save_root = os.path.join(root, 'scores') + if not os.path.exists(score_save_root): + os.makedirs(score_save_root) + with open(os.path.join(score_save_root, f'{postprocessor_name}.pkl'), + 'wb') as f: + pickle.dump(evaluator.scores, f, pickle.HIGHEST_PROTOCOL) diff --git a/OpenOOD/scripts/eval_ood_imagenet_foundation_models.py b/OpenOOD/scripts/eval_ood_imagenet_foundation_models.py new file mode 100644 index 0000000000000000000000000000000000000000..5da205c79c3d26d79c677a45f0af4dc09b54c807 --- /dev/null +++ b/OpenOOD/scripts/eval_ood_imagenet_foundation_models.py @@ -0,0 +1,440 @@ +import collections +import os, sys +ROOT_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..') +sys.path.append(ROOT_DIR) +import numpy as np +import pandas as pd +import argparse +import pickle + +import clip +import torch +import torch.nn as nn +import torch.nn.functional as F +from torchvision import transforms as trn + +from openood.evaluation_api import Evaluator +from openood.networks import CLIPZeroshot + +# hard-coding imagenet classnames and templates +imagenet_classes = [ + 'tench', 'goldfish', 'great white shark', 'tiger shark', + 'hammerhead shark', 'electric ray', 'stingray', 'rooster', 'hen', + 'ostrich', 'brambling', 'goldfinch', 'house finch', 'junco', + 'indigo bunting', 'American robin', 'bulbul', 'jay', 'magpie', 'chickadee', + 'American dipper', 'kite (bird of prey)', 'bald eagle', 'vulture', + 'great grey owl', 'fire salamander', 'smooth newt', 'newt', + 'spotted salamander', 'axolotl', 'American bullfrog', 'tree frog', + 'tailed frog', 'loggerhead sea turtle', 'leatherback sea turtle', + 'mud turtle', 'terrapin', 'box turtle', 'banded gecko', 'green iguana', + 'Carolina anole', 'desert grassland whiptail lizard', 'agama', + 'frilled-necked lizard', 'alligator lizard', 'Gila monster', + 'European green lizard', 'chameleon', 'Komodo dragon', 'Nile crocodile', + 'American alligator', 'triceratops', 'worm snake', 'ring-necked snake', + 'eastern hog-nosed snake', 'smooth green snake', 'kingsnake', + 'garter snake', 'water snake', 'vine snake', 'night snake', + 'boa constrictor', 'African rock python', 'Indian cobra', 'green mamba', + 'sea snake', 'Saharan horned viper', 'eastern diamondback rattlesnake', + 'sidewinder rattlesnake', 'trilobite', 'harvestman', 'scorpion', + 'yellow garden spider', 'barn spider', 'European garden spider', + 'southern black widow', 'tarantula', 'wolf spider', 'tick', 'centipede', + 'black grouse', 'ptarmigan', 'ruffed grouse', 'prairie grouse', 'peafowl', + 'quail', 'partridge', 'african grey parrot', 'macaw', + 'sulphur-crested cockatoo', 'lorikeet', 'coucal', 'bee eater', 'hornbill', + 'hummingbird', 'jacamar', 'toucan', 'duck', 'red-breasted merganser', + 'goose', 'black swan', 'tusker', 'echidna', 'platypus', 'wallaby', 'koala', + 'wombat', 'jellyfish', 'sea anemone', 'brain coral', 'flatworm', + 'nematode', 'conch', 'snail', 'slug', 'sea slug', 'chiton', + 'chambered nautilus', 'Dungeness crab', 'rock crab', 'fiddler crab', + 'red king crab', 'American lobster', 'spiny lobster', 'crayfish', + 'hermit crab', 'isopod', 'white stork', 'black stork', 'spoonbill', + 'flamingo', 'little blue heron', 'great egret', 'bittern bird', + 'crane bird', 'limpkin', 'common gallinule', 'American coot', 'bustard', + 'ruddy turnstone', 'dunlin', 'common redshank', 'dowitcher', + 'oystercatcher', 'pelican', 'king penguin', 'albatross', 'grey whale', + 'killer whale', 'dugong', 'sea lion', 'Chihuahua', 'Japanese Chin', + 'Maltese', 'Pekingese', 'Shih Tzu', 'King Charles Spaniel', 'Papillon', + 'toy terrier', 'Rhodesian Ridgeback', 'Afghan Hound', 'Basset Hound', + 'Beagle', 'Bloodhound', 'Bluetick Coonhound', 'Black and Tan Coonhound', + 'Treeing Walker Coonhound', 'English foxhound', 'Redbone Coonhound', + 'borzoi', 'Irish Wolfhound', 'Italian Greyhound', 'Whippet', + 'Ibizan Hound', 'Norwegian Elkhound', 'Otterhound', 'Saluki', + 'Scottish Deerhound', 'Weimaraner', 'Staffordshire Bull Terrier', + 'American Staffordshire Terrier', 'Bedlington Terrier', 'Border Terrier', + 'Kerry Blue Terrier', 'Irish Terrier', 'Norfolk Terrier', + 'Norwich Terrier', 'Yorkshire Terrier', 'Wire Fox Terrier', + 'Lakeland Terrier', 'Sealyham Terrier', 'Airedale Terrier', + 'Cairn Terrier', 'Australian Terrier', 'Dandie Dinmont Terrier', + 'Boston Terrier', 'Miniature Schnauzer', 'Giant Schnauzer', + 'Standard Schnauzer', 'Scottish Terrier', 'Tibetan Terrier', + 'Australian Silky Terrier', 'Soft-coated Wheaten Terrier', + 'West Highland White Terrier', 'Lhasa Apso', 'Flat-Coated Retriever', + 'Curly-coated Retriever', 'Golden Retriever', 'Labrador Retriever', + 'Chesapeake Bay Retriever', 'German Shorthaired Pointer', 'Vizsla', + 'English Setter', 'Irish Setter', 'Gordon Setter', 'Brittany dog', + 'Clumber Spaniel', 'English Springer Spaniel', 'Welsh Springer Spaniel', + 'Cocker Spaniel', 'Sussex Spaniel', 'Irish Water Spaniel', 'Kuvasz', + 'Schipperke', 'Groenendael dog', 'Malinois', 'Briard', 'Australian Kelpie', + 'Komondor', 'Old English Sheepdog', 'Shetland Sheepdog', 'collie', + 'Border Collie', 'Bouvier des Flandres dog', 'Rottweiler', + 'German Shepherd Dog', 'Dobermann', 'Miniature Pinscher', + 'Greater Swiss Mountain Dog', 'Bernese Mountain Dog', + 'Appenzeller Sennenhund', 'Entlebucher Sennenhund', 'Boxer', 'Bullmastiff', + 'Tibetan Mastiff', 'French Bulldog', 'Great Dane', 'St. Bernard', 'husky', + 'Alaskan Malamute', 'Siberian Husky', 'Dalmatian', 'Affenpinscher', + 'Basenji', 'pug', 'Leonberger', 'Newfoundland dog', 'Great Pyrenees dog', + 'Samoyed', 'Pomeranian', 'Chow Chow', 'Keeshond', 'brussels griffon', + 'Pembroke Welsh Corgi', 'Cardigan Welsh Corgi', 'Toy Poodle', + 'Miniature Poodle', 'Standard Poodle', + 'Mexican hairless dog (xoloitzcuintli)', 'grey wolf', + 'Alaskan tundra wolf', 'red wolf or maned wolf', 'coyote', 'dingo', + 'dhole', 'African wild dog', 'hyena', 'red fox', 'kit fox', 'Arctic fox', + 'grey fox', 'tabby cat', 'tiger cat', 'Persian cat', 'Siamese cat', + 'Egyptian Mau', 'cougar', 'lynx', 'leopard', 'snow leopard', 'jaguar', + 'lion', 'tiger', 'cheetah', 'brown bear', 'American black bear', + 'polar bear', 'sloth bear', 'mongoose', 'meerkat', 'tiger beetle', + 'ladybug', 'ground beetle', 'longhorn beetle', 'leaf beetle', + 'dung beetle', 'rhinoceros beetle', 'weevil', 'fly', 'bee', 'ant', + 'grasshopper', 'cricket insect', 'stick insect', 'cockroach', + 'praying mantis', 'cicada', 'leafhopper', 'lacewing', 'dragonfly', + 'damselfly', 'red admiral butterfly', 'ringlet butterfly', + 'monarch butterfly', 'small white butterfly', 'sulphur butterfly', + 'gossamer-winged butterfly', 'starfish', 'sea urchin', 'sea cucumber', + 'cottontail rabbit', 'hare', 'Angora rabbit', 'hamster', 'porcupine', + 'fox squirrel', 'marmot', 'beaver', 'guinea pig', 'common sorrel horse', + 'zebra', 'pig', 'wild boar', 'warthog', 'hippopotamus', 'ox', + 'water buffalo', 'bison', 'ram (adult male sheep)', 'bighorn sheep', + 'Alpine ibex', 'hartebeest', 'impala (antelope)', 'gazelle', + 'arabian camel', 'llama', 'weasel', 'mink', 'European polecat', + 'black-footed ferret', 'otter', 'skunk', 'badger', 'armadillo', + 'three-toed sloth', 'orangutan', 'gorilla', 'chimpanzee', 'gibbon', + 'siamang', 'guenon', 'patas monkey', 'baboon', 'macaque', 'langur', + 'black-and-white colobus', 'proboscis monkey', 'marmoset', + 'white-headed capuchin', 'howler monkey', 'titi monkey', + "Geoffroy's spider monkey", 'common squirrel monkey', 'ring-tailed lemur', + 'indri', 'Asian elephant', 'African bush elephant', 'red panda', + 'giant panda', 'snoek fish', 'eel', 'silver salmon', 'rock beauty fish', + 'clownfish', 'sturgeon', 'gar fish', 'lionfish', 'pufferfish', 'abacus', + 'abaya', 'academic gown', 'accordion', 'acoustic guitar', + 'aircraft carrier', 'airliner', 'airship', 'altar', 'ambulance', + 'amphibious vehicle', 'analog clock', 'apiary', 'apron', 'trash can', + 'assault rifle', 'backpack', 'bakery', 'balance beam', 'balloon', + 'ballpoint pen', 'Band-Aid', 'banjo', 'baluster / handrail', 'barbell', + 'barber chair', 'barbershop', 'barn', 'barometer', 'barrel', 'wheelbarrow', + 'baseball', 'basketball', 'bassinet', 'bassoon', 'swimming cap', + 'bath towel', 'bathtub', 'station wagon', 'lighthouse', 'beaker', + 'military hat (bearskin or shako)', 'beer bottle', 'beer glass', + 'bell tower', 'baby bib', 'tandem bicycle', 'bikini', 'ring binder', + 'binoculars', 'birdhouse', 'boathouse', 'bobsleigh', 'bolo tie', + 'poke bonnet', 'bookcase', 'bookstore', 'bottle cap', 'hunting bow', + 'bow tie', 'brass memorial plaque', 'bra', 'breakwater', 'breastplate', + 'broom', 'bucket', 'buckle', 'bulletproof vest', 'high-speed train', + 'butcher shop', 'taxicab', 'cauldron', 'candle', 'cannon', 'canoe', + 'can opener', 'cardigan', 'car mirror', 'carousel', 'tool kit', + 'cardboard box / carton', 'car wheel', 'automated teller machine', + 'cassette', 'cassette player', 'castle', 'catamaran', 'CD player', 'cello', + 'mobile phone', 'chain', 'chain-link fence', 'chain mail', 'chainsaw', + 'storage chest', 'chiffonier', 'bell or wind chime', 'china cabinet', + 'Christmas stocking', 'church', 'movie theater', 'cleaver', + 'cliff dwelling', 'cloak', 'clogs', 'cocktail shaker', 'coffee mug', + 'coffeemaker', 'spiral or coil', 'combination lock', 'computer keyboard', + 'candy store', 'container ship', 'convertible', 'corkscrew', 'cornet', + 'cowboy boot', 'cowboy hat', 'cradle', 'construction crane', + 'crash helmet', 'crate', 'infant bed', 'Crock Pot', 'croquet ball', + 'crutch', 'cuirass', 'dam', 'desk', 'desktop computer', + 'rotary dial telephone', 'diaper', 'digital clock', 'digital watch', + 'dining table', 'dishcloth', 'dishwasher', 'disc brake', 'dock', + 'dog sled', 'dome', 'doormat', 'drilling rig', 'drum', 'drumstick', + 'dumbbell', 'Dutch oven', 'electric fan', 'electric guitar', + 'electric locomotive', 'entertainment center', 'envelope', + 'espresso machine', 'face powder', 'feather boa', 'filing cabinet', + 'fireboat', 'fire truck', 'fire screen', 'flagpole', 'flute', + 'folding chair', 'football helmet', 'forklift', 'fountain', 'fountain pen', + 'four-poster bed', 'freight car', 'French horn', 'frying pan', 'fur coat', + 'garbage truck', 'gas mask or respirator', 'gas pump', 'goblet', 'go-kart', + 'golf ball', 'golf cart', 'gondola', 'gong', 'gown', 'grand piano', + 'greenhouse', 'radiator grille', 'grocery store', 'guillotine', + 'hair clip', 'hair spray', 'half-track', 'hammer', 'hamper', 'hair dryer', + 'hand-held computer', 'handkerchief', 'hard disk drive', 'harmonica', + 'harp', 'combine harvester', 'hatchet', 'holster', 'home theater', + 'honeycomb', 'hook', 'hoop skirt', 'gymnastic horizontal bar', + 'horse-drawn vehicle', 'hourglass', 'iPod', 'clothes iron', + 'carved pumpkin', 'jeans', 'jeep', 'T-shirt', 'jigsaw puzzle', 'rickshaw', + 'joystick', 'kimono', 'knee pad', 'knot', 'lab coat', 'ladle', 'lampshade', + 'laptop computer', 'lawn mower', 'lens cap', 'letter opener', 'library', + 'lifeboat', 'lighter', 'limousine', 'ocean liner', 'lipstick', + 'slip-on shoe', 'lotion', 'music speaker', 'loupe magnifying glass', + 'sawmill', 'magnetic compass', 'messenger bag', 'mailbox', 'tights', + 'one-piece bathing suit', 'manhole cover', 'maraca', 'marimba', 'mask', + 'matchstick', 'maypole', 'maze', 'measuring cup', 'medicine cabinet', + 'megalith', 'microphone', 'microwave oven', 'military uniform', 'milk can', + 'minibus', 'miniskirt', 'minivan', 'missile', 'mitten', 'mixing bowl', + 'mobile home', 'ford model t', 'modem', 'monastery', 'monitor', 'moped', + 'mortar and pestle', 'graduation cap', 'mosque', 'mosquito net', 'vespa', + 'mountain bike', 'tent', 'computer mouse', 'mousetrap', 'moving van', + 'muzzle', 'metal nail', 'neck brace', 'necklace', 'baby pacifier', + 'notebook computer', 'obelisk', 'oboe', 'ocarina', 'odometer', + 'oil filter', 'pipe organ', 'oscilloscope', 'overskirt', 'bullock cart', + 'oxygen mask', 'product packet / packaging', 'paddle', 'paddle wheel', + 'padlock', 'paintbrush', 'pajamas', 'palace', 'pan flute', 'paper towel', + 'parachute', 'parallel bars', 'park bench', 'parking meter', + 'railroad car', 'patio', 'payphone', 'pedestal', 'pencil case', + 'pencil sharpener', 'perfume', 'Petri dish', 'photocopier', 'plectrum', + 'Pickelhaube', 'picket fence', 'pickup truck', 'pier', 'piggy bank', + 'pill bottle', 'pillow', 'ping-pong ball', 'pinwheel', 'pirate ship', + 'drink pitcher', 'block plane', 'planetarium', 'plastic bag', 'plate rack', + 'farm plow', 'plunger', 'Polaroid camera', 'pole', 'police van', 'poncho', + 'pool table', 'soda bottle', 'plant pot', "potter's wheel", 'power drill', + 'prayer rug', 'printer', 'prison', 'missile', 'projector', 'hockey puck', + 'punching bag', 'purse', 'quill', 'quilt', 'race car', 'racket', + 'radiator', 'radio', 'radio telescope', 'rain barrel', + 'recreational vehicle', 'fishing casting reel', 'reflex camera', + 'refrigerator', 'remote control', 'restaurant', 'revolver', 'rifle', + 'rocking chair', 'rotisserie', 'eraser', 'rugby ball', + 'ruler measuring stick', 'sneaker', 'safe', 'safety pin', 'salt shaker', + 'sandal', 'sarong', 'saxophone', 'scabbard', 'weighing scale', + 'school bus', 'schooner', 'scoreboard', 'CRT monitor', 'screw', + 'screwdriver', 'seat belt', 'sewing machine', 'shield', 'shoe store', + 'shoji screen / room divider', 'shopping basket', 'shopping cart', + 'shovel', 'shower cap', 'shower curtain', 'ski', 'balaclava ski mask', + 'sleeping bag', 'slide rule', 'sliding door', 'slot machine', 'snorkel', + 'snowmobile', 'snowplow', 'soap dispenser', 'soccer ball', 'sock', + 'solar thermal collector', 'sombrero', 'soup bowl', 'keyboard space bar', + 'space heater', 'space shuttle', 'spatula', 'motorboat', 'spider web', + 'spindle', 'sports car', 'spotlight', 'stage', 'steam locomotive', + 'through arch bridge', 'steel drum', 'stethoscope', 'scarf', 'stone wall', + 'stopwatch', 'stove', 'strainer', 'tram', 'stretcher', 'couch', 'stupa', + 'submarine', 'suit', 'sundial', 'sunglasses', 'sunglasses', 'sunscreen', + 'suspension bridge', 'mop', 'sweatshirt', 'swim trunks / shorts', 'swing', + 'electrical switch', 'syringe', 'table lamp', 'tank', 'tape player', + 'teapot', 'teddy bear', 'television', 'tennis ball', 'thatched roof', + 'front curtain', 'thimble', 'threshing machine', 'throne', 'tile roof', + 'toaster', 'tobacco shop', 'toilet seat', 'torch', 'totem pole', + 'tow truck', 'toy store', 'tractor', 'semi-trailer truck', 'tray', + 'trench coat', 'tricycle', 'trimaran', 'tripod', 'triumphal arch', + 'trolleybus', 'trombone', 'hot tub', 'turnstile', 'typewriter keyboard', + 'umbrella', 'unicycle', 'upright piano', 'vacuum cleaner', 'vase', + 'vaulted or arched ceiling', 'velvet fabric', 'vending machine', + 'vestment', 'viaduct', 'violin', 'volleyball', 'waffle iron', 'wall clock', + 'wallet', 'wardrobe', 'military aircraft', 'sink', 'washing machine', + 'water bottle', 'water jug', 'water tower', 'whiskey jug', 'whistle', + 'hair wig', 'window screen', 'window shade', 'Windsor tie', 'wine bottle', + 'airplane wing', 'wok', 'wooden spoon', 'wool', 'split-rail fence', + 'shipwreck', 'sailboat', 'yurt', 'website', 'comic book', 'crossword', + 'traffic or street sign', 'traffic light', 'dust jacket', 'menu', 'plate', + 'guacamole', 'consomme', 'hot pot', 'trifle', 'ice cream', 'popsicle', + 'baguette', 'bagel', 'pretzel', 'cheeseburger', 'hot dog', + 'mashed potatoes', 'cabbage', 'broccoli', 'cauliflower', 'zucchini', + 'spaghetti squash', 'acorn squash', 'butternut squash', 'cucumber', + 'artichoke', 'bell pepper', 'cardoon', 'mushroom', 'Granny Smith apple', + 'strawberry', 'orange', 'lemon', 'fig', 'pineapple', 'banana', 'jackfruit', + 'cherimoya (custard apple)', 'pomegranate', 'hay', 'carbonara', + 'chocolate syrup', 'dough', 'meatloaf', 'pizza', 'pot pie', 'burrito', + 'red wine', 'espresso', 'tea cup', 'eggnog', 'mountain', 'bubble', 'cliff', + 'coral reef', 'geyser', 'lakeshore', 'promontory', 'sandbar', 'beach', + 'valley', 'volcano', 'baseball player', 'bridegroom', 'scuba diver', + 'rapeseed', 'daisy', "yellow lady's slipper", 'corn', 'acorn', 'rose hip', + 'horse chestnut seed', 'coral fungus', 'agaric', 'gyromitra', + 'stinkhorn mushroom', 'earth star fungus', 'hen of the woods mushroom', + 'bolete', 'corn cob', 'toilet paper' +] +imagenet_templates = [ + 'a bad photo of a {}.', + 'a photo of many {}.', + 'a sculpture of a {}.', + 'a photo of the hard to see {}.', + 'a low resolution photo of the {}.', + 'a rendering of a {}.', + 'graffiti of a {}.', + 'a bad photo of the {}.', + 'a cropped photo of the {}.', + 'a tattoo of a {}.', + 'the embroidered {}.', + 'a photo of a hard to see {}.', + 'a bright photo of a {}.', + 'a photo of a clean {}.', + 'a photo of a dirty {}.', + 'a dark photo of the {}.', + 'a drawing of a {}.', + 'a photo of my {}.', + 'the plastic {}.', + 'a photo of the cool {}.', + 'a close-up photo of a {}.', + 'a black and white photo of the {}.', + 'a painting of the {}.', + 'a painting of a {}.', + 'a pixelated photo of the {}.', + 'a sculpture of the {}.', + 'a bright photo of the {}.', + 'a cropped photo of a {}.', + 'a plastic {}.', + 'a photo of the dirty {}.', + 'a jpeg corrupted photo of a {}.', + 'a blurry photo of the {}.', + 'a photo of the {}.', + 'a good photo of the {}.', + 'a rendering of the {}.', + 'a {} in a video game.', + 'a photo of one {}.', + 'a doodle of a {}.', + 'a close-up photo of the {}.', + 'a photo of a {}.', + 'the origami {}.', + 'the {} in a video game.', + 'a sketch of a {}.', + 'a doodle of the {}.', + 'a origami {}.', + 'a low resolution photo of a {}.', + 'the toy {}.', + 'a rendition of the {}.', + 'a photo of the clean {}.', + 'a photo of a large {}.', + 'a rendition of a {}.', + 'a photo of a nice {}.', + 'a photo of a weird {}.', + 'a blurry photo of a {}.', + 'a cartoon {}.', + 'art of a {}.', + 'a sketch of the {}.', + 'a embroidered {}.', + 'a pixelated photo of a {}.', + 'itap of the {}.', + 'a jpeg corrupted photo of the {}.', + 'a good photo of a {}.', + 'a plushie {}.', + 'a photo of the nice {}.', + 'a photo of the small {}.', + 'a photo of the weird {}.', + 'the cartoon {}.', + 'art of the {}.', + 'a drawing of the {}.', + 'a photo of the large {}.', + 'a black and white photo of a {}.', + 'the plushie {}.', + 'a dark photo of a {}.', + 'itap of a {}.', + 'graffiti of the {}.', + 'a toy {}.', + 'itap of my {}.', + 'a photo of a cool {}.', + 'a photo of a small {}.', + 'a tattoo of the {}.', +] + + +def update(d, u): + for k, v in u.items(): + if isinstance(v, collections.abc.Mapping): + d[k] = update(d.get(k, {}), v) + else: + d[k] = v + return d + + +parser = argparse.ArgumentParser() +parser.add_argument('--model-type', default='clip', choices=['clip', 'dinov2']) +parser.add_argument('--arch', + default='ViT-B/16', + choices=clip.available_models() + + ['ViT-S/14', 'ViT-B/14', 'ViT-L/14']) +parser.add_argument('--postprocessor', default='msp') +parser.add_argument('--save-csv', action='store_true') +parser.add_argument('--save-score', action='store_true') +parser.add_argument('--fsood', action='store_true') +parser.add_argument('--batch-size', default=200, type=int) +args = parser.parse_args() + +root = os.path.join( + ROOT_DIR, 'results', + f"imagenet_{args.model_type}_{args.arch.replace('/', '-')}") +if not os.path.exists(root): + os.makedirs(root) + +# specify an implemented postprocessor +# 'openmax', 'msp', 'temp_scaling', 'odin'... +postprocessor_name = args.postprocessor +# load pre-setup postprocessor if exists +if os.path.isfile( + os.path.join(root, 'postprocessors', f'{postprocessor_name}.pkl')): + with open( + os.path.join(root, 'postprocessors', f'{postprocessor_name}.pkl'), + 'rb') as f: + postprocessor = pickle.load(f) +else: + postprocessor = None + +if args.model_type == 'clip': + net = CLIPZeroshot(classnames=imagenet_classes, + templates=imagenet_templates, + backbone=args.arch) + preprocessor = net.preprocess +elif args.model_type == 'dinov2': + model_tag = args.arch.lower().replace('/', '').replace('-', '') + net = torch.hub.load('facebookresearch/dinov2', f'dinov2_{model_tag}_lc') + + preprocessor = trn.Compose([ + trn.Resize(256, interpolation=trn.InterpolationMode.BICUBIC), + trn.CenterCrop(224), + trn.ToTensor(), + trn.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) + ]) + +net.cuda() +net.eval() + +# a unified evaluator +evaluator = Evaluator( + net, + id_name='imagenet', # the target ID dataset + data_root=os.path.join(ROOT_DIR, 'data'), + config_root=os.path.join(ROOT_DIR, 'configs'), + preprocessor=preprocessor, # default preprocessing + postprocessor_name=postprocessor_name, + postprocessor=postprocessor, + batch_size=args. + batch_size, # for certain methods the results can be slightly affected by batch size + shuffle=False, + num_workers=8) + +# load pre-computed scores if exists +if os.path.isfile(os.path.join(root, 'scores', f'{postprocessor_name}.pkl')): + with open(os.path.join(root, 'scores', f'{postprocessor_name}.pkl'), + 'rb') as f: + scores = pickle.load(f) + update(evaluator.scores, scores) + print('Loaded pre-computed scores from file.') + +# save postprocessor for future reuse +if hasattr(evaluator.postprocessor, 'setup_flag' + ) or evaluator.postprocessor.hyperparam_search_done is True: + pp_save_root = os.path.join(root, 'postprocessors') + if not os.path.exists(pp_save_root): + os.makedirs(pp_save_root) + + if not os.path.isfile( + os.path.join(pp_save_root, f'{postprocessor_name}.pkl')): + with open(os.path.join(pp_save_root, f'{postprocessor_name}.pkl'), + 'wb') as f: + pickle.dump(evaluator.postprocessor, f, pickle.HIGHEST_PROTOCOL) + +# the metrics is a dataframe +metrics = evaluator.eval_ood(fsood=args.fsood) + +# saving and recording +if args.save_csv: + saving_root = os.path.join(root, 'ood' if not args.fsood else 'fsood') + if not os.path.exists(saving_root): + os.makedirs(saving_root) + + if not os.path.isfile( + os.path.join(saving_root, f'{postprocessor_name}.csv')): + metrics.to_csv(os.path.join(saving_root, f'{postprocessor_name}.csv'), + float_format='{:.2f}'.format) + +if args.save_score: + score_save_root = os.path.join(root, 'scores') + if not os.path.exists(score_save_root): + os.makedirs(score_save_root) + with open(os.path.join(score_save_root, f'{postprocessor_name}.pkl'), + 'wb') as f: + pickle.dump(evaluator.scores, f, pickle.HIGHEST_PROTOCOL) diff --git a/OpenOOD/scripts/ood/ash/cifar100_test_ood_ash.sh b/OpenOOD/scripts/ood/ash/cifar100_test_ood_ash.sh new file mode 100644 index 0000000000000000000000000000000000000000..972e35cbe7a314860980e75e0b4301c6342a16bf --- /dev/null +++ b/OpenOOD/scripts/ood/ash/cifar100_test_ood_ash.sh @@ -0,0 +1,33 @@ +#!/bin/bash +# sh scripts/ood/ash/cifar100_test_ood_ash.sh + +# GPU=1 +# CPU=1 +# node=73 +# jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +# --kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ + +python main.py \ + --config configs/datasets/cifar100/cifar100.yml \ + configs/datasets/cifar100/cifar100_ood.yml \ + configs/networks/resnet18_32x32.yml \ + configs/pipelines/test/test_ood.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/postprocessors/ash.yml \ + --network.checkpoint 'results/cifar100_resnet18_32x32_base_e100_lr0.1_default/s0/best.ckpt' + +############################################ +# alternatively, we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood.py +# especially if you want to get results from +# multiple runs +python scripts/eval_ood.py \ + --id-data cifar100 \ + --root ./results/cifar100_resnet18_32x32_base_e100_lr0.1_default \ + --postprocessor ash \ + --save-score --save-csv diff --git a/OpenOOD/scripts/ood/ash/cifar10_test_ood_ash.sh b/OpenOOD/scripts/ood/ash/cifar10_test_ood_ash.sh new file mode 100644 index 0000000000000000000000000000000000000000..dc3a18d1524cd3e66b2f15da0c6a536d1c382c17 --- /dev/null +++ b/OpenOOD/scripts/ood/ash/cifar10_test_ood_ash.sh @@ -0,0 +1,35 @@ +#!/bin/bash +# sh scripts/ood/ash/cifar10_test_ood_ash.sh + +# GPU=1 +# CPU=1 +# node=73 +# jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +# --kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ + +python main.py \ + --config configs/datasets/cifar10/cifar10.yml \ + configs/datasets/cifar10/cifar10_ood.yml \ + configs/networks/resnet18_32x32.yml \ + configs/pipelines/test/test_ood.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/postprocessors/ash.yml \ + --num_workers 8 \ + --network.checkpoint 'results/cifar10_resnet18_32x32_base_e100_lr0.1_default/s0/best.ckpt' \ + --mark 1 + +############################################ +# alternatively, we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood.py +# especially if you want to get results from +# multiple runs +python scripts/eval_ood.py \ + --id-data cifar10 \ + --root ./results/cifar10_resnet18_32x32_base_e100_lr0.1_default \ + --postprocessor ash \ + --save-score --save-csv diff --git a/OpenOOD/scripts/ood/ash/imagenet200_test_ood_ash.sh b/OpenOOD/scripts/ood/ash/imagenet200_test_ood_ash.sh new file mode 100644 index 0000000000000000000000000000000000000000..96f6400fc13ee105392c2cd902eee105c5173739 --- /dev/null +++ b/OpenOOD/scripts/ood/ash/imagenet200_test_ood_ash.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# sh scripts/ood/ash/imagenet200_test_ood_ash.sh + +############################################ +# alternatively, we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood.py +# especially if you want to get results from +# multiple runs + +# ood +python scripts/eval_ood.py \ + --id-data imagenet200 \ + --root ./results/imagenet200_resnet18_224x224_base_e90_lr0.1_default \ + --postprocessor ash \ + --save-score --save-csv #--fsood + +# full-spectrum ood +python scripts/eval_ood.py \ + --id-data imagenet200 \ + --root ./results/imagenet200_resnet18_224x224_base_e90_lr0.1_default \ + --postprocessor ash \ + --save-score --save-csv --fsood diff --git a/OpenOOD/scripts/ood/ash/imagenet_test_ood_ash.sh b/OpenOOD/scripts/ood/ash/imagenet_test_ood_ash.sh new file mode 100644 index 0000000000000000000000000000000000000000..aabe8fbdb2eed962b5fd4e2c32157644eff1abd1 --- /dev/null +++ b/OpenOOD/scripts/ood/ash/imagenet_test_ood_ash.sh @@ -0,0 +1,47 @@ +#!/bin/bash +# sh scripts/ood/ash/imagenet_test_ood_ash.sh + +GPU=1 +CPU=1 +node=63 +jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ +python main.py \ + --config configs/datasets/imagenet/imagenet.yml \ + configs/datasets/imagenet/imagenet_ood.yml \ + configs/networks/resnet50.yml \ + configs/pipelines/test/test_ood.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/postprocessors/ash.yml \ + --num_workers 4 \ + --ood_dataset.image_size 256 \ + --dataset.test.batch_size 256 \ + --dataset.val.batch_size 256 \ + --network.pretrained True \ + --network.checkpoint 'results/pretrained_weights/resnet50_imagenet1k_v1.pth' \ + --merge_option merge + +############################################ +# we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood_imagenet.py + +# available architectures: +# resnet50, swin-t, vit-b-16 +# ood +python scripts/eval_ood_imagenet.py \ + --tvs-pretrained \ + --arch resnet50 \ + --postprocessor ash \ + --save-score --save-csv #--fsood + +# full-spectrum ood +python scripts/eval_ood_imagenet.py \ + --tvs-pretrained \ + --arch resnet50 \ + --postprocessor ash \ + --save-score --save-csv --fsood diff --git a/OpenOOD/scripts/ood/cider/cifar100_test_cider.sh b/OpenOOD/scripts/ood/cider/cifar100_test_cider.sh new file mode 100644 index 0000000000000000000000000000000000000000..bc49445bbdb2867b4225328832e65761dfe3f450 --- /dev/null +++ b/OpenOOD/scripts/ood/cider/cifar100_test_cider.sh @@ -0,0 +1,14 @@ +#!/bin/bash +# sh scripts/ood/cider/cifar100_test_cider.sh + +############################################ +# alternatively, we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood.py +# especially if you want to get results from +# multiple runs +python scripts/eval_ood.py \ + --id-data cifar100 \ + --root ./results/cifar100_cider_net_cider_e100_lr0.5_protom0.5_default \ + --postprocessor cider \ + --save-score --save-csv diff --git a/OpenOOD/scripts/ood/cider/cifar100_train_cider.sh b/OpenOOD/scripts/ood/cider/cifar100_train_cider.sh new file mode 100644 index 0000000000000000000000000000000000000000..643cc112a1c547845f8404375863d7a2273fa513 --- /dev/null +++ b/OpenOOD/scripts/ood/cider/cifar100_train_cider.sh @@ -0,0 +1,15 @@ +#!/bin/bash +# sh scripts/ood/cider/cifar100_train_cider.sh + +python main.py \ + --config configs/datasets/cifar100/cifar100.yml \ + configs/networks/cider_net.yml \ + configs/pipelines/train/train_cider.yml \ + configs/preprocessors/base_preprocessor.yml \ + --preprocessor.name cider \ + --network.backbone.name resnet18_32x32 \ + --dataset.train.batch_size 512 \ + --trainer.trainer_args.proto_m 0.5 \ + --num_workers 8 \ + --optimizer.num_epochs 100 \ + --seed 0 diff --git a/OpenOOD/scripts/ood/cider/cifar10_test_cider.sh b/OpenOOD/scripts/ood/cider/cifar10_test_cider.sh new file mode 100644 index 0000000000000000000000000000000000000000..d16ed9af5ea021ff73517e90c69568d4a8f9ef15 --- /dev/null +++ b/OpenOOD/scripts/ood/cider/cifar10_test_cider.sh @@ -0,0 +1,14 @@ +#!/bin/bash +# sh scripts/ood/cider/cifar10_test_cider.sh + +############################################ +# alternatively, we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood.py +# especially if you want to get results from +# multiple runs +python scripts/eval_ood.py \ + --id-data cifar10 \ + --root ./results/cifar10_cider_net_cider_e100_lr0.5_protom0.95_default \ + --postprocessor cider \ + --save-score --save-csv diff --git a/OpenOOD/scripts/ood/cider/cifar10_train_cider.sh b/OpenOOD/scripts/ood/cider/cifar10_train_cider.sh new file mode 100644 index 0000000000000000000000000000000000000000..a7e6ad05bf814728bf4b02ba57a7bfcd9bc04c14 --- /dev/null +++ b/OpenOOD/scripts/ood/cider/cifar10_train_cider.sh @@ -0,0 +1,15 @@ +#!/bin/bash +# sh scripts/ood/cider/cifar10_train_cider.sh + +python main.py \ + --config configs/datasets/cifar10/cifar10.yml \ + configs/networks/cider_net.yml \ + configs/pipelines/train/train_cider.yml \ + configs/preprocessors/base_preprocessor.yml \ + --preprocessor.name cider \ + --network.backbone.name resnet18_32x32 \ + --dataset.train.batch_size 512 \ + --trainer.trainer_args.proto_m 0.95 \ + --num_workers 8 \ + --optimizer.num_epochs 100 \ + --seed 0 diff --git a/OpenOOD/scripts/ood/cider/imagenet200_test_cider.sh b/OpenOOD/scripts/ood/cider/imagenet200_test_cider.sh new file mode 100644 index 0000000000000000000000000000000000000000..ea2b9b56a487cc7b472f8f36ec7b1fcf6693aeab --- /dev/null +++ b/OpenOOD/scripts/ood/cider/imagenet200_test_cider.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# sh scripts/ood/cider/imagenet200_test_cider.sh + +############################################ +# alternatively, we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood.py +# especially if you want to get results from +# multiple runs + +# ood +python scripts/eval_ood.py \ + --id-data imagenet200 \ + --root ./results/imagenet200_cider_net_cider_e10_lr0.01_protom0.95_default \ + --postprocessor cider \ + --save-score --save-csv #--fsood + +# full-spectrum ood +python scripts/eval_ood.py \ + --id-data imagenet200 \ + --root ./results/imagenet200_cider_net_cider_e10_lr0.01_protom0.95_default \ + --postprocessor cider \ + --save-score --save-csv --fsood diff --git a/OpenOOD/scripts/ood/cider/imagenet200_train_cider.sh b/OpenOOD/scripts/ood/cider/imagenet200_train_cider.sh new file mode 100644 index 0000000000000000000000000000000000000000..37725f73d3a732aec503ec0b3d702f6b6ba226c8 --- /dev/null +++ b/OpenOOD/scripts/ood/cider/imagenet200_train_cider.sh @@ -0,0 +1,20 @@ +#!/bin/bash +# sh scripts/ood/cider/imagenet200_train_cider.sh + +SEED=0 +python main.py \ + --config configs/datasets/imagenet200/imagenet200.yml \ + configs/networks/cider_net.yml \ + configs/pipelines/train/train_cider.yml \ + configs/preprocessors/base_preprocessor.yml \ + --preprocessor.name cider \ + --network.backbone.name resnet18_224x224 \ + --network.backbone.pretrained True \ + --network.backbone.checkpoint ./results/imagenet200_resnet18_224x224_base_e90_lr0.1_default/s${SEED}/best.ckpt \ + --optimizer.lr 0.01 \ + --optimizer.num_epochs 10 \ + --dataset.train.batch_size 512 \ + --trainer.trainer_args.proto_m 0.95 \ + --num_gpus 1 --num_workers 16 \ + --merge_option merge \ + --seed ${SEED} diff --git a/OpenOOD/scripts/ood/cider/imagenet_test_cider.sh b/OpenOOD/scripts/ood/cider/imagenet_test_cider.sh new file mode 100644 index 0000000000000000000000000000000000000000..0eef59c6c2562b99bf765fd81b693997ecd81b77 --- /dev/null +++ b/OpenOOD/scripts/ood/cider/imagenet_test_cider.sh @@ -0,0 +1,24 @@ +#!/bin/bash +# sh scripts/ood/cider/imagenet_test_cider.sh + +############################################ +# we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood_imagenet.py + +# available architectures: +# resnet50 +# ood +python scripts/eval_ood_imagenet.py \ + --ckpt-path ./results/imagenet_cider_net_cider_e10_lr0.001_protom0.95_default/s0/best.ckpt \ + --arch resnet50 \ + --postprocessor cider \ + --save-score --save-csv #--fsood + + +# full-spectrum ood +python scripts/eval_ood_imagenet.py \ + --ckpt-path ./results/imagenet_cider_net_cider_e10_lr0.001_protom0.95_default/s0/best.ckpt \ + --arch resnet50 \ + --postprocessor cider \ + --save-score --save-csv --fsood diff --git a/OpenOOD/scripts/ood/cider/imagenet_train_cider.sh b/OpenOOD/scripts/ood/cider/imagenet_train_cider.sh new file mode 100644 index 0000000000000000000000000000000000000000..6f8f7f287b5e686882f837e962fb64eba1c1e2cc --- /dev/null +++ b/OpenOOD/scripts/ood/cider/imagenet_train_cider.sh @@ -0,0 +1,19 @@ +#!/bin/bash +# sh scripts/ood/cider/imagenet_train_cider.sh + +python main.py \ + --config configs/datasets/imagenet/imagenet.yml \ + configs/networks/cider_net.yml \ + configs/pipelines/train/train_cider.yml \ + configs/preprocessors/base_preprocessor.yml \ + --preprocessor.name cider \ + --network.backbone.name resnet50 \ + --network.backbone.pretrained True \ + --network.backbone.checkpoint ./results/pretrained_weights/resnet50_imagenet1k_v1.pth \ + --optimizer.lr 0.001 \ + --optimizer.num_epochs 10 \ + --dataset.train.batch_size 512 \ + --trainer.trainer_args.proto_m 0.95 \ + --num_gpus 1 --num_workers 16 \ + --merge_option merge \ + --seed 0 diff --git a/OpenOOD/scripts/ood/conf_branch/cifar100_test_conf_branch.sh b/OpenOOD/scripts/ood/conf_branch/cifar100_test_conf_branch.sh new file mode 100644 index 0000000000000000000000000000000000000000..653703dbcf7ae7decf15dbf53d916c4a998f4de5 --- /dev/null +++ b/OpenOOD/scripts/ood/conf_branch/cifar100_test_conf_branch.sh @@ -0,0 +1,31 @@ +#!/bin/bash +# sh scripts/ood/conf_branch/cifar100_test_conf_branch.sh + +PYTHONPATH='.':$PYTHONPATH \ +#srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +#--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +#--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ + +python main.py \ + --config configs/datasets/cifar100/cifar100.yml \ + configs/datasets/cifar100/cifar100_ood.yml \ + configs/networks/conf_branch.yml \ + configs/pipelines/test/test_ood.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/postprocessors/conf_branch.yml \ + --network.backbone.name resnet18_32x32 \ + --network.backbone.pretrained False \ + --network.pretrained True \ + --network.checkpoint 'results/cifar100_conf_branch_net_conf_branch_e100_lr0.1/s0/best.ckpt' + +############################################ +# alternatively, we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood.py +# especially if you want to get results from +# multiple runs +python scripts/eval_ood.py \ + --id-data cifar100 \ + --root ./results/cifar100_conf_branch_net_conf_branch_e100_lr0.1_default \ + --postprocessor conf_branch \ + --save-score --save-csv diff --git a/OpenOOD/scripts/ood/conf_branch/cifar100_train_conf_branch.sh b/OpenOOD/scripts/ood/conf_branch/cifar100_train_conf_branch.sh new file mode 100644 index 0000000000000000000000000000000000000000..4a503c55175b42763b7e96184e6224d55a21f210 --- /dev/null +++ b/OpenOOD/scripts/ood/conf_branch/cifar100_train_conf_branch.sh @@ -0,0 +1,15 @@ +#!/bin/bash +# sh scripts/ood/conf_branch/cifar100_train_conf_branch.sh + +PYTHONPATH='.':$PYTHONPATH \ +#srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +#--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +#--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ + +python main.py \ + --config configs/datasets/cifar100/cifar100.yml \ + configs/networks/conf_branch.yml \ + configs/pipelines/train/train_conf_branch.yml \ + configs/preprocessors/base_preprocessor.yml \ + --network.backbone.name resnet18_32x32 \ + --seed 0 diff --git a/OpenOOD/scripts/ood/conf_branch/cifar10_test_conf_branch.sh b/OpenOOD/scripts/ood/conf_branch/cifar10_test_conf_branch.sh new file mode 100644 index 0000000000000000000000000000000000000000..d6946093851cba0b99a0611b603960c612521fbc --- /dev/null +++ b/OpenOOD/scripts/ood/conf_branch/cifar10_test_conf_branch.sh @@ -0,0 +1,32 @@ +#!/bin/bash +# sh scripts/ood/conf_branch/cifar10_test_conf_branch.sh + +PYTHONPATH='.':$PYTHONPATH \ +#srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +#--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +#--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ + +python main.py \ + --config configs/datasets/cifar10/cifar10.yml \ + configs/datasets/cifar10/cifar10_ood.yml \ + configs/networks/conf_branch.yml \ + configs/pipelines/test/test_ood.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/postprocessors/conf_branch.yml \ + --network.backbone.name resnet18_32x32 \ + --network.backbone.pretrained False \ + --network.pretrained True \ + --network.checkpoint 'results/cifar10_conf_branch_net_conf_branch_e100_lr0.1/s0/best.ckpt' \ + --mark epoch_100 + +############################################ +# alternatively, we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood.py +# especially if you want to get results from +# multiple runs +python scripts/eval_ood.py \ + --id-data cifar10 \ + --root ./results/cifar10_conf_branch_net_conf_branch_e100_lr0.1_default \ + --postprocessor conf_branch \ + --save-score --save-csv diff --git a/OpenOOD/scripts/ood/conf_branch/cifar10_train_conf_branch.sh b/OpenOOD/scripts/ood/conf_branch/cifar10_train_conf_branch.sh new file mode 100644 index 0000000000000000000000000000000000000000..c0b73e41dd1efc1af989a9d84fbe1218d06f026c --- /dev/null +++ b/OpenOOD/scripts/ood/conf_branch/cifar10_train_conf_branch.sh @@ -0,0 +1,15 @@ +#!/bin/bash +# sh scripts/ood/conf_branch/cifar10_train_conf_branch.sh + +PYTHONPATH='.':$PYTHONPATH \ +#srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +#--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +#--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ + +python main.py \ + --config configs/datasets/cifar10/cifar10.yml \ + configs/networks/conf_branch.yml \ + configs/pipelines/train/train_conf_branch.yml \ + configs/preprocessors/base_preprocessor.yml \ + --network.backbone.name resnet18_32x32 \ + --seed ${SEED} diff --git a/OpenOOD/scripts/ood/conf_branch/imagenet200_test_conf_branch.sh b/OpenOOD/scripts/ood/conf_branch/imagenet200_test_conf_branch.sh new file mode 100644 index 0000000000000000000000000000000000000000..d8583eaac48dec23e724681cdf4c98c6ded6e70d --- /dev/null +++ b/OpenOOD/scripts/ood/conf_branch/imagenet200_test_conf_branch.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# sh scripts/ood/conf_branch/imagenet200_test_conf_branch.sh + +############################################ +# we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood.py +# especially if you want to get results from +# multiple runs + +# ood +python scripts/eval_ood.py \ + --id-data imagenet200 \ + --root ./results/imagenet200_conf_branch_net_conf_branch_e90_lr0.1_default \ + --postprocessor conf_branch \ + --save-score --save-csv #--fsood + +# full-spectrum ood +python scripts/eval_ood.py \ + --id-data imagenet200 \ + --root ./results/imagenet200_conf_branch_net_conf_branch_e90_lr0.1_default \ + --postprocessor conf_branch \ + --save-score --save-csv --fsood diff --git a/OpenOOD/scripts/ood/conf_branch/imagenet200_train_conf_branch.sh b/OpenOOD/scripts/ood/conf_branch/imagenet200_train_conf_branch.sh new file mode 100644 index 0000000000000000000000000000000000000000..973a8fee647c771b563ec05ba42f72a398693d99 --- /dev/null +++ b/OpenOOD/scripts/ood/conf_branch/imagenet200_train_conf_branch.sh @@ -0,0 +1,11 @@ +python main.py \ + --config configs/datasets/imagenet200/imagenet200.yml \ + configs/networks/conf_branch.yml \ + configs/pipelines/train/train_conf_branch.yml \ + configs/preprocessors/base_preprocessor.yml \ + --network.backbone.name resnet18_224x224 \ + --optimizer.num_epochs 90 \ + --dataset.train.batch_size 128 \ + --num_gpus 2 --num_workers 16 \ + --merge_option merge \ + --seed 0 diff --git a/OpenOOD/scripts/ood/conf_branch/imagenet_test_conf_branch.sh b/OpenOOD/scripts/ood/conf_branch/imagenet_test_conf_branch.sh new file mode 100644 index 0000000000000000000000000000000000000000..b00e0c63ffe8b6b5af8abbcd9b77e5a7a705f69c --- /dev/null +++ b/OpenOOD/scripts/ood/conf_branch/imagenet_test_conf_branch.sh @@ -0,0 +1,21 @@ +#!/bin/bash +# sh scripts/ood/conf_branch/imagenet_test_conf_branch.sh + +############################################ +# we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood_imagenet.py + +# ood +python scripts/eval_ood_imagenet.py \ + --ckpt-path ./results/imagenet_conf_branch_net_conf_branch_e30_lr0.001_default/s0/best.ckpt \ + --arch resnet50 \ + --postprocessor conf_branch \ + --save-score --save-csv #--fsood + +# full-spectrum ood +python scripts/eval_ood_imagenet.py \ + --ckpt-path ./results/imagenet_conf_branch_net_conf_branch_e30_lr0.001_default/s0/best.ckpt \ + --arch resnet50 \ + --postprocessor conf_branch \ + --save-score --save-csv --fsood diff --git a/OpenOOD/scripts/ood/conf_branch/imagenet_train_conf_branch.sh b/OpenOOD/scripts/ood/conf_branch/imagenet_train_conf_branch.sh new file mode 100644 index 0000000000000000000000000000000000000000..4eb61d1228969b5988466283f4a3d166ad1e0e2d --- /dev/null +++ b/OpenOOD/scripts/ood/conf_branch/imagenet_train_conf_branch.sh @@ -0,0 +1,14 @@ +python main.py \ + --config configs/datasets/imagenet/imagenet.yml \ + configs/networks/conf_branch.yml \ + configs/pipelines/train/train_conf_branch.yml \ + configs/preprocessors/base_preprocessor.yml \ + --network.backbone.name resnet50 \ + --network.backbone.pretrained True \ + --network.backbone.checkpoint ./results/pretrained_weights/resnet50_imagenet1k_v1.pth \ + --optimizer.lr 0.001 \ + --optimizer.num_epochs 30 \ + --dataset.train.batch_size 128 \ + --num_gpus 2 --num_workers 16 \ + --merge_option merge \ + --seed 0 diff --git a/OpenOOD/scripts/ood/conf_branch/train_conf_branch.sh b/OpenOOD/scripts/ood/conf_branch/train_conf_branch.sh new file mode 100644 index 0000000000000000000000000000000000000000..1c56ab29546c8bf70d8e70235ca0bfbb7b3d6fd0 --- /dev/null +++ b/OpenOOD/scripts/ood/conf_branch/train_conf_branch.sh @@ -0,0 +1,11 @@ +#!/bin/bash +# sh scripts/ood/train_conf_branch.sh + +PYTHONPATH='.':$PYTHONPATH \ +srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ +python main.py \ +--config configs/datasets/digits/mnist.yml \ +configs/pipelines/train/train_conf_esti.yml \ +configs/networks/conf_net.yml diff --git a/OpenOOD/scripts/ood/csi/cifar100_test_ood_csi.sh b/OpenOOD/scripts/ood/csi/cifar100_test_ood_csi.sh new file mode 100644 index 0000000000000000000000000000000000000000..226cb0c0779412bc309dc4b6ca76aaff278e1d8b --- /dev/null +++ b/OpenOOD/scripts/ood/csi/cifar100_test_ood_csi.sh @@ -0,0 +1,35 @@ +#!/bin/bash +# sh scripts/ood/csi/cifar100_test_ood_csi.sh + +# GPU=1 +# CPU=1 +# node=36 +# jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +#srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +#--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +#--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ + +python main.py \ + --config configs/datasets/cifar100/cifar100.yml \ + configs/datasets/cifar100/cifar100_ood.yml \ + configs/networks/csi_net.yml \ + configs/pipelines/test/test_ood.yml \ + configs/postprocessors/msp.yml \ + configs/preprocessors/base_preprocessor.yml \ + --network.pretrained True \ + --network.checkpoint 'results/cifar100_csi_net_csi_step2_e100_lr0.1/s0/best.ckpt' \ + --merge_option merge + +############################################ +# alternatively, we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood.py +# especially if you want to get results from +# multiple runs +python scripts/eval_ood.py \ + --id-data cifar100 \ + --root ./results/cifar100_csi_net_csi_step2_e100_lr0.1 \ + --postprocessor msp \ + --save-score --save-csv diff --git a/OpenOOD/scripts/ood/csi/cifar100_train_csi_step1.sh b/OpenOOD/scripts/ood/csi/cifar100_train_csi_step1.sh new file mode 100644 index 0000000000000000000000000000000000000000..9141e4a421802345a5a3106d7d51110dc141a37d --- /dev/null +++ b/OpenOOD/scripts/ood/csi/cifar100_train_csi_step1.sh @@ -0,0 +1,25 @@ +#!/bin/bash +# sh scripts/ood/csi/cifar100_train_csi_step1.sh + +# GPU=1 +# CPU=1 +# node=73 +# jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +# --kill-on-bad-exit=1 --job-name=${jobname} \ +# -w SG-IDC1-10-51-2-${node} \ + +python main.py \ + --config configs/datasets/cifar100/cifar100.yml \ + configs/networks/csi_net.yml \ + configs/pipelines/train/train_csi.yml \ + configs/preprocessors/csi_preprocessor.yml \ + --network.pretrained False \ + --optimizer.num_epochs 100 \ + --dataset.train.batch_size 64 \ + --merge_option merge \ + --mode csi_step1 \ + --seed 0 diff --git a/OpenOOD/scripts/ood/csi/cifar100_train_csi_step2.sh b/OpenOOD/scripts/ood/csi/cifar100_train_csi_step2.sh new file mode 100644 index 0000000000000000000000000000000000000000..fe52140dcdb1ec66c0b1293aa65cdae1da756329 --- /dev/null +++ b/OpenOOD/scripts/ood/csi/cifar100_train_csi_step2.sh @@ -0,0 +1,26 @@ +#!/bin/bash +# sh scripts/ood/csi/cifar100_train_csi_step2.sh + +# GPU=1 +# CPU=1 +# node=73 +# jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +# --kill-on-bad-exit=1 --job-name=${jobname} \ +# -w SG-IDC1-10-51-2-${node} \ + +SEED=0 +python main.py \ + --config configs/datasets/cifar100/cifar100.yml \ + configs/networks/csi_net.yml \ + configs/pipelines/train/train_csi.yml \ + configs/preprocessors/base_preprocessor.yml \ + --network.pretrained True \ + --network.checkpoint ./results/cifar100_csi_net_csi_step1_e100_lr0.1/s${SEED}/best.ckpt \ + --optimizer.num_epochs 100 \ + --dataset.train.batch_size 128 \ + --mode csi_step2 \ + --seed ${SEED} diff --git a/OpenOOD/scripts/ood/csi/cifar10_test_ood_csi.sh b/OpenOOD/scripts/ood/csi/cifar10_test_ood_csi.sh new file mode 100644 index 0000000000000000000000000000000000000000..0a7d8549743478f7c3ff5bed3e3760d8bd16d749 --- /dev/null +++ b/OpenOOD/scripts/ood/csi/cifar10_test_ood_csi.sh @@ -0,0 +1,34 @@ +#!/bin/bash +# sh scripts/ood/csi/cifar10_test_ood_csi.sh + +GPU=1 +CPU=1 +node=36 +jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +# --kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ +python main.py \ + --config configs/datasets/cifar10/cifar10.yml \ + configs/datasets/cifar10/cifar10_ood.yml \ + configs/networks/csi_net.yml \ + configs/pipelines/test/test_ood.yml \ + configs/postprocessors/msp.yml \ + configs/preprocessors/base_preprocessor.yml \ + --network.pretrained True \ + --network.checkpoint 'results/cifar10_csi_net_csi_step2_e100_lr0.1/s0/best.ckpt' \ + --merge_option merge + +############################################ +# alternatively, we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood.py +# especially if you want to get results from +# multiple runs +python scripts/eval_ood.py \ + --id-data cifar10 \ + --root ./results/cifar10_csi_net_csi_step2_e100_lr0.1 \ + --postprocessor msp \ + --save-score --save-csv diff --git a/OpenOOD/scripts/ood/csi/cifar10_train_csi_step1.sh b/OpenOOD/scripts/ood/csi/cifar10_train_csi_step1.sh new file mode 100644 index 0000000000000000000000000000000000000000..7d7de4db2716636e11658b656a62f8b75b72b5cf --- /dev/null +++ b/OpenOOD/scripts/ood/csi/cifar10_train_csi_step1.sh @@ -0,0 +1,25 @@ +#!/bin/bash +# sh scripts/ood/csi/cifar10_train_csi_step1.sh + +# GPU=1 +# CPU=1 +# node=73 +# jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +# --kill-on-bad-exit=1 --job-name=${jobname} \ +# -w SG-IDC1-10-51-2-${node} \ + +python main.py \ + --config configs/datasets/cifar10/cifar10.yml \ + configs/networks/csi_net.yml \ + configs/pipelines/train/train_csi.yml \ + configs/preprocessors/csi_preprocessor.yml \ + --network.pretrained False \ + --optimizer.num_epochs 100 \ + --dataset.train.batch_size 64 \ + --merge_option merge \ + --mode csi_step1 \ + --seed 0 diff --git a/OpenOOD/scripts/ood/csi/cifar10_train_csi_step2.sh b/OpenOOD/scripts/ood/csi/cifar10_train_csi_step2.sh new file mode 100644 index 0000000000000000000000000000000000000000..9cf9afd44fcdb280590c66462d289edb3d7de3be --- /dev/null +++ b/OpenOOD/scripts/ood/csi/cifar10_train_csi_step2.sh @@ -0,0 +1,26 @@ +#!/bin/bash +# sh scripts/ood/csi/cifar10_train_csi_step2.sh + +# GPU=1 +# CPU=1 +# node=73 +# jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +# --kill-on-bad-exit=1 --job-name=${jobname} \ +# -w SG-IDC1-10-51-2-${node} \ + +SEED=0 +python main.py \ + --config configs/datasets/cifar10/cifar10.yml \ + configs/networks/csi_net.yml \ + configs/pipelines/train/train_csi.yml \ + configs/preprocessors/base_preprocessor.yml \ + --network.pretrained True \ + --network.checkpoint ./results/cifar10_csi_net_csi_step1_e100_lr0.1/s${SEED}/best.ckpt \ + --optimizer.num_epochs 100 \ + --dataset.train.batch_size 128 \ + --mode csi_step2 \ + --seed ${SEED} diff --git a/OpenOOD/scripts/ood/dice/cifar100_test_ood_dice.sh b/OpenOOD/scripts/ood/dice/cifar100_test_ood_dice.sh new file mode 100644 index 0000000000000000000000000000000000000000..b71318cda072b3933b93e9db2774e1729913fbb9 --- /dev/null +++ b/OpenOOD/scripts/ood/dice/cifar100_test_ood_dice.sh @@ -0,0 +1,35 @@ +#!/bin/bash +# sh scripts/ood/dice/cifar100_test_ood_dice.sh + +# GPU=1 +# CPU=1 +# node=36 +# jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +#srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +#--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +#--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ + +python main.py \ + --config configs/datasets/cifar100/cifar100.yml \ + configs/datasets/cifar100/cifar100_ood.yml \ + configs/networks/resnet18_32x32.yml \ + configs/pipelines/test/test_ood.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/postprocessors/dice.yml \ + --num_workers 8 \ + --network.checkpoint './results/cifar100_resnet18_32x32_base_e100_lr0.1_default/s0/best.ckpt' \ + --mark 0 + +############################################ +# alternatively, we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood.py +# especially if you want to get results from +# multiple runs +python scripts/eval_ood.py \ + --id-data cifar100 \ + --root ./results/cifar100_resnet18_32x32_base_e100_lr0.1_default \ + --postprocessor dice \ + --save-score --save-csv diff --git a/OpenOOD/scripts/ood/dice/cifar10_test_ood_dice.sh b/OpenOOD/scripts/ood/dice/cifar10_test_ood_dice.sh new file mode 100644 index 0000000000000000000000000000000000000000..ac7f008b7cdd079479fb581a3031f7791343fc62 --- /dev/null +++ b/OpenOOD/scripts/ood/dice/cifar10_test_ood_dice.sh @@ -0,0 +1,35 @@ +#!/bin/bash +# sh scripts/ood/dice/cifar10_test_ood_dice.sh + +# GPU=1 +# CPU=1 +# node=36 +# jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +#srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +#--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +#--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ + +python main.py \ + --config configs/datasets/cifar10/cifar10.yml \ + configs/datasets/cifar10/cifar10_ood.yml \ + configs/networks/resnet18_32x32.yml \ + configs/pipelines/test/test_ood.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/postprocessors/dice.yml \ + --num_workers 8 \ + --network.checkpoint './results/cifar10_resnet18_32x32_base_e100_lr0.1_default/s0/best.ckpt' \ + --mark 0 + +############################################ +# alternatively, we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood.py +# especially if you want to get results from +# multiple runs +python scripts/eval_ood.py \ + --id-data cifar10 \ + --root ./results/cifar10_resnet18_32x32_base_e100_lr0.1_default \ + --postprocessor dice \ + --save-score --save-csv diff --git a/OpenOOD/scripts/ood/dice/imagenet200_test_ood_dice.sh b/OpenOOD/scripts/ood/dice/imagenet200_test_ood_dice.sh new file mode 100644 index 0000000000000000000000000000000000000000..7e7a85c06797a81799b724c0df4335ccf66478da --- /dev/null +++ b/OpenOOD/scripts/ood/dice/imagenet200_test_ood_dice.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# sh scripts/ood/dice/imagenet200_test_ood_dice.sh + +############################################ +# alternatively, we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood.py +# especially if you want to get results from +# multiple runs + +# ood +python scripts/eval_ood.py \ + --id-data imagenet200 \ + --root ./results/imagenet200_resnet18_224x224_base_e90_lr0.1_default \ + --postprocessor dice \ + --save-score --save-csv #--fsood + +# full-spectrum ood +python scripts/eval_ood.py \ + --id-data imagenet200 \ + --root ./results/imagenet200_resnet18_224x224_base_e90_lr0.1_default \ + --postprocessor dice \ + --save-score --save-csv --fsood diff --git a/OpenOOD/scripts/ood/dice/imagenet_test_ood_dice.sh b/OpenOOD/scripts/ood/dice/imagenet_test_ood_dice.sh new file mode 100644 index 0000000000000000000000000000000000000000..690a0e1ff9279eb6d3a03c64e61efe23b07be45b --- /dev/null +++ b/OpenOOD/scripts/ood/dice/imagenet_test_ood_dice.sh @@ -0,0 +1,47 @@ +#!/bin/bash +# sh scripts/ood/dice/imagenet_test_ood_dice.sh + +GPU=1 +CPU=1 +node=35 +jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ +python main.py \ + --config configs/datasets/imagenet/imagenet.yml \ + configs/datasets/imagenet/imagenet_ood.yml \ + configs/networks/resnet50.yml \ + configs/pipelines/test/test_ood.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/postprocessors/dice.yml \ + --num_workers 4 \ + --ood_dataset.image_size 256 \ + --dataset.test.batch_size 256 \ + --dataset.val.batch_size 256 \ + --network.pretrained True \ + --network.checkpoint 'results/pretrained_weights/resnet50_imagenet1k_v1.pth' \ + --merge_option merge + +############################################ +# we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood_imagenet.py + +# available architectures: +# resnet50, swin-t, vit-b-16 +# ood +python scripts/eval_ood_imagenet.py \ + --tvs-pretrained \ + --arch resnet50 \ + --postprocessor dice \ + --save-score --save-csv #--fsood + +# full-spectrum ood +python scripts/eval_ood_imagenet.py \ + --tvs-pretrained \ + --arch resnet50 \ + --postprocessor dice \ + --save-score --save-csv --fsood diff --git a/OpenOOD/scripts/ood/dice/mnist_test_ood_dice.sh b/OpenOOD/scripts/ood/dice/mnist_test_ood_dice.sh new file mode 100644 index 0000000000000000000000000000000000000000..5eb2c7ef1005202cb797ac4f907f8cf440d8f7e0 --- /dev/null +++ b/OpenOOD/scripts/ood/dice/mnist_test_ood_dice.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# sh scripts/ood/dice/mnist_test_ood_dice.sh + +# GPU=1 +# CPU=1 +# node=36 +# jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +#srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +#--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +#--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ + +python main.py \ +--config configs/datasets/mnist/mnist.yml \ +configs/datasets/mnist/mnist_ood.yml \ +configs/networks/lenet.yml \ +configs/pipelines/test/test_ood.yml \ +configs/preprocessors/base_preprocessor.yml \ +configs/postprocessors/dice.yml \ +--num_workers 8 \ +--network.checkpoint 'results/checkpoints/mnist_lenet_acc99.30.ckpt' \ +--mark 0 diff --git a/OpenOOD/scripts/ood/dice/mnist_test_osr_dice.sh b/OpenOOD/scripts/ood/dice/mnist_test_osr_dice.sh new file mode 100644 index 0000000000000000000000000000000000000000..ebbe5710f0bb72eb0ab0faf41cb743120c61d2b3 --- /dev/null +++ b/OpenOOD/scripts/ood/dice/mnist_test_osr_dice.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# sh scripts/ood/dice/mnist_test_osr_dice.sh + +# GPU=1 +# CPU=1 +# node=36 +# jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +#srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +#--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +#--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ + +python main.py \ +--config configs/datasets/osr_mnist6/mnist6_seed1.yml \ +configs/datasets/osr_mnist6/mnist6_seed1_ood.yml \ +configs/networks/lenet.yml \ +configs/pipelines/test/test_osr.yml \ +configs/preprocessors/base_preprocessor.yml \ +configs/postprocessors/dice.yml \ +--num_workers 8 \ +--network.checkpoint 'results/checkpoints/osr/mnist6_seed1.ckpt' \ +--mark 0 diff --git a/OpenOOD/scripts/ood/dice/sweep_osr.py b/OpenOOD/scripts/ood/dice/sweep_osr.py new file mode 100644 index 0000000000000000000000000000000000000000..a7e9f4c60e11275e81133fc963d62dfb969455b7 --- /dev/null +++ b/OpenOOD/scripts/ood/dice/sweep_osr.py @@ -0,0 +1,39 @@ +# python scripts/ood/dice/sweep_osr.py +import os + +config = [ + [ + 'osr_cifar6/cifar6_seed1.yml', 'osr_cifar6/cifar6_seed1_ood.yml', + 'resnet18_32x32', 'results/checkpoints/osr/cifar6_seed1.ckpt' + ], + [ + 'osr_cifar50/cifar50_seed1.yml', 'osr_cifar50/cifar50_seed1_ood.yml', + 'resnet18_32x32', 'results/checkpoints/osr/cifar50_seed1.ckpt' + ], + [ + 'osr_tin20/tin20_seed1.yml', 'osr_tin20/tin20_seed1_ood.yml', + 'resnet18_64x64', 'results/checkpoints/osr/tin20_seed1.ckpt' + ], + [ + 'osr_mnist6/mnist6_seed1.yml', 'osr_mnist6/mnist6_seed1_ood.yml', + 'lenet', 'results/checkpoints/osr/mnist6_seed1.ckpt' + ], +] + +for [dataset, ood_dataset, network, pth] in config: + command = (f"PYTHONPATH='.':$PYTHONPATH \ + srun -p dsta --mpi=pmi2 --gres=gpu:1 -n1 \ + --cpus-per-task=1 --ntasks-per-node=1 \ + --kill-on-bad-exit=1 --job-name=openood \ + python main.py \ + --config configs/datasets/{dataset} \ + configs/datasets/{ood_dataset} \ + configs/networks/{network}.yml \ + configs/pipelines/test/test_osr.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/postprocessors/dice.yml \ + --network.pretrained True \ + --network.checkpoint {pth} \ + --num_workers 8 \ + --merge_option merge &") + os.system(command) diff --git a/OpenOOD/scripts/ood/ebo/cifar100_test_ood_ebo.sh b/OpenOOD/scripts/ood/ebo/cifar100_test_ood_ebo.sh new file mode 100644 index 0000000000000000000000000000000000000000..7bb1e271fe9cabede06115e3348cdccc0c13c7ca --- /dev/null +++ b/OpenOOD/scripts/ood/ebo/cifar100_test_ood_ebo.sh @@ -0,0 +1,33 @@ +#!/bin/bash +# sh scripts/ood/ebo/cifar100_test_ood_ebo.sh + +# GPU=1 +# CPU=1 +# node=73 +# jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +# --kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ + +python main.py \ + --config configs/datasets/cifar100/cifar100.yml \ + configs/datasets/cifar100/cifar100_ood.yml \ + configs/networks/resnet18_32x32.yml \ + configs/pipelines/test/test_ood.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/postprocessors/ebo.yml \ + --network.checkpoint 'results/cifar100_resnet18_32x32_base_e100_lr0.1_default/s0/best.ckpt' + +############################################ +# alternatively, we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood.py +# especially if you want to get results from +# multiple runs +python scripts/eval_ood.py \ + --id-data cifar100 \ + --root ./results/cifar100_resnet18_32x32_base_e100_lr0.1_default \ + --postprocessor ebo \ + --save-score --save-csv diff --git a/OpenOOD/scripts/ood/ebo/cifar100_train_ood_ebo.sh b/OpenOOD/scripts/ood/ebo/cifar100_train_ood_ebo.sh new file mode 100644 index 0000000000000000000000000000000000000000..51457797f465542259b53a735328f899aab8fbff --- /dev/null +++ b/OpenOOD/scripts/ood/ebo/cifar100_train_ood_ebo.sh @@ -0,0 +1,20 @@ +#!/bin/bash +# sh scripts/ood/ebo/cifar100_train_ood_ebo.sh + +# GPU=1 +# CPU=1 +# node=73 +# jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +# --kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ + +python main.py \ +--config configs/datasets/cifar100/cifar100.yml \ +configs/datasets/cifar100/cifar100_ood.yml \ +configs/networks/resnet18_32x32.yml \ +configs/pipelines/train/baseline.yml \ +configs/preprocessors/base_preprocessor.yml \ +configs/postprocessors/ebo.yml diff --git a/OpenOOD/scripts/ood/ebo/cifar10_test_ood_ebo.sh b/OpenOOD/scripts/ood/ebo/cifar10_test_ood_ebo.sh new file mode 100644 index 0000000000000000000000000000000000000000..106364aeef962e64e47c506a19f9c5c11559eddd --- /dev/null +++ b/OpenOOD/scripts/ood/ebo/cifar10_test_ood_ebo.sh @@ -0,0 +1,36 @@ +#!/bin/bash +# sh scripts/ood/ebo/cifar10_test_ood_ebo.sh + +# GPU=1 +# CPU=1 +# node=73 +# jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +# --kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ + +python main.py \ + --config configs/datasets/cifar10/cifar10.yml \ + configs/datasets/cifar10/cifar10_ood.yml \ + configs/networks/resnet18_32x32.yml \ + configs/pipelines/test/test_ood.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/postprocessors/ebo.yml \ + --num_workers 8 \ + --network.checkpoint 'results/cifar10_resnet18_32x32_base_e100_lr0.1_default/s0/best.ckpt' \ + --mark 1 \ + --postprocessor.postprocessor_args.temperature 1 + +############################################ +# alternatively, we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood.py +# especially if you want to get results from +# multiple runs +python scripts/eval_ood.py \ + --id-data cifar10 \ + --root ./results/cifar10_resnet18_32x32_base_e100_lr0.1_default \ + --postprocessor ebo \ + --save-score --save-csv diff --git a/OpenOOD/scripts/ood/ebo/imagenet200_test_ood_ebo.sh b/OpenOOD/scripts/ood/ebo/imagenet200_test_ood_ebo.sh new file mode 100644 index 0000000000000000000000000000000000000000..a9b04f9620a4f6bd94b643cf0b11697cbb220344 --- /dev/null +++ b/OpenOOD/scripts/ood/ebo/imagenet200_test_ood_ebo.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# sh scripts/ood/ebo/imagenet200_test_ood_ebo.sh + +############################################ +# alternatively, we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood.py +# especially if you want to get results from +# multiple runs + +# ood +python scripts/eval_ood.py \ + --id-data imagenet200 \ + --root ./results/imagenet200_resnet18_224x224_base_e90_lr0.1_default \ + --postprocessor ebo \ + --save-score --save-csv #--fsood + +# full-spectrum ood +python scripts/eval_ood.py \ + --id-data imagenet200 \ + --root ./results/imagenet200_resnet18_224x224_base_e90_lr0.1_default \ + --postprocessor ebo \ + --save-score --save-csv --fsood diff --git a/OpenOOD/scripts/ood/ebo/imagenet_test_ood_ebo.sh b/OpenOOD/scripts/ood/ebo/imagenet_test_ood_ebo.sh new file mode 100644 index 0000000000000000000000000000000000000000..97bbef7591623b53e17bfde01e14749398897ee0 --- /dev/null +++ b/OpenOOD/scripts/ood/ebo/imagenet_test_ood_ebo.sh @@ -0,0 +1,47 @@ +#!/bin/bash +# sh scripts/ood/ebo/imagenet_test_ood_ebo.sh + +GPU=1 +CPU=1 +node=63 +jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ +python main.py \ + --config configs/datasets/imagenet/imagenet.yml \ + configs/datasets/imagenet/imagenet_ood.yml \ + configs/networks/resnet50.yml \ + configs/pipelines/test/test_ood.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/postprocessors/ebo.yml \ + --num_workers 4 \ + --ood_dataset.image_size 256 \ + --dataset.test.batch_size 256 \ + --dataset.val.batch_size 256 \ + --network.pretrained True \ + --network.checkpoint 'results/pretrained_weights/resnet50_imagenet1k_v1.pth' \ + --merge_option merge + +############################################ +# we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood_imagenet.py + +# available architectures: +# resnet50, swin-t, vit-b-16 +# ood +python scripts/eval_ood_imagenet.py \ + --tvs-pretrained \ + --arch resnet50 \ + --postprocessor ebo \ + --save-score --save-csv #--fsood + +# full-spectrum ood +python scripts/eval_ood_imagenet.py \ + --tvs-pretrained \ + --arch resnet50 \ + --postprocessor ebo \ + --save-score --save-csv --fsood diff --git a/OpenOOD/scripts/ood/ebo/mnist_test_ood_ebo.sh b/OpenOOD/scripts/ood/ebo/mnist_test_ood_ebo.sh new file mode 100644 index 0000000000000000000000000000000000000000..41491bc5e83e6361ac592af04b2a11b068662e6d --- /dev/null +++ b/OpenOOD/scripts/ood/ebo/mnist_test_ood_ebo.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# sh scripts/ood/ebo/mnist_test_ood_ebo.sh + +GPU=1 +CPU=1 +node=73 +jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +# --kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ + +python main.py \ +--config configs/datasets/mnist/mnist.yml \ +configs/datasets/mnist/mnist_ood.yml \ +configs/networks/lenet.yml \ +configs/pipelines/test/test_ood.yml \ +configs/preprocessors/base_preprocessor.yml \ +configs/postprocessors/ebo.yml \ +--num_workers 8 \ +--network.checkpoint 'results/checkpoints/mnist_lenet_acc98.50.ckpt' \ +--mark 0 diff --git a/OpenOOD/scripts/ood/ebo/mnist_test_ood_ebo_aps.sh b/OpenOOD/scripts/ood/ebo/mnist_test_ood_ebo_aps.sh new file mode 100644 index 0000000000000000000000000000000000000000..2c61757355ac6d1abb7f42d33134e871337f8527 --- /dev/null +++ b/OpenOOD/scripts/ood/ebo/mnist_test_ood_ebo_aps.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# sh scripts/ood/ebo/mnist_test_ood_ebo_aps.sh + +GPU=1 +CPU=1 +node=73 +jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +# --kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ + +python main.py \ +--config configs/datasets/mnist/mnist.yml \ +configs/datasets/mnist/mnist_ood.yml \ +configs/networks/lenet.yml \ +configs/pipelines/test/test_ood_aps.yml \ +configs/preprocessors/base_preprocessor.yml \ +configs/postprocessors/ebo.yml \ +--num_workers 8 \ +--network.checkpoint 'results/checkpoints/mnist_lenet_acc98.50.ckpt' \ +--mark 0 diff --git a/OpenOOD/scripts/ood/ebo/mnist_test_osr_ebo.sh b/OpenOOD/scripts/ood/ebo/mnist_test_osr_ebo.sh new file mode 100644 index 0000000000000000000000000000000000000000..591c3ea7e3578f2a9d449122fe39ce60e5932dbb --- /dev/null +++ b/OpenOOD/scripts/ood/ebo/mnist_test_osr_ebo.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# sh scripts/ood/ebo/mnist_test_osr_ebo.sh + +GPU=1 +CPU=1 +node=73 +jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +# --kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ + +python main.py \ +--config configs/datasets/osr_mnist6/mnist6_seed1.yml \ +configs/datasets/osr_mnist6/mnist6_seed1_ood.yml \ +configs/networks/lenet.yml \ +configs/pipelines/test/test_osr.yml \ +configs/preprocessors/base_preprocessor.yml \ +configs/postprocessors/ebo.yml \ +--num_workers 8 \ +--network.checkpoint 'results/checkpoints/osr/mnist6_seed1.ckpt' \ +--mark 0 diff --git a/OpenOOD/scripts/ood/ebo/sweep_osr.py b/OpenOOD/scripts/ood/ebo/sweep_osr.py new file mode 100644 index 0000000000000000000000000000000000000000..6317cb5e56fc93402ab2959157825b13c19ffe71 --- /dev/null +++ b/OpenOOD/scripts/ood/ebo/sweep_osr.py @@ -0,0 +1,39 @@ +# python scripts/ood/ebo/sweep_osr.py +import os + +config = [ + [ + 'osr_cifar6/cifar6_seed1.yml', 'osr_cifar6/cifar6_seed1_ood.yml', + 'resnet18_32x32', 'results/checkpoints/osr/cifar6_seed1.ckpt' + ], + [ + 'osr_cifar50/cifar50_seed1.yml', 'osr_cifar50/cifar50_seed1_ood.yml', + 'resnet18_32x32', 'results/checkpoints/osr/cifar50_seed1.ckpt' + ], + [ + 'osr_tin20/tin20_seed1.yml', 'osr_tin20/tin20_seed1_ood.yml', + 'resnet18_64x64', 'results/checkpoints/osr/tin20_seed1.ckpt' + ], + [ + 'osr_mnist6/mnist6_seed1.yml', 'osr_mnist6/mnist6_seed1_ood.yml', + 'lenet', 'results/checkpoints/osr/mnist6_seed1.ckpt' + ], +] + +for [dataset, ood_dataset, network, pth] in config: + command = (f"PYTHONPATH='.':$PYTHONPATH \ + srun -p dsta --mpi=pmi2 --gres=gpu:1 -n1 \ + --cpus-per-task=1 --ntasks-per-node=1 \ + --kill-on-bad-exit=1 --job-name=openood \ + python main.py \ + --config configs/datasets/{dataset} \ + configs/datasets/{ood_dataset} \ + configs/networks/{network}.yml \ + configs/pipelines/test/test_osr.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/postprocessors/ebo.yml \ + --network.pretrained True \ + --network.checkpoint {pth} \ + --num_workers 8 \ + --merge_option merge &") + os.system(command) diff --git a/OpenOOD/scripts/ood/gen/cifar100_test_ood_gen.sh b/OpenOOD/scripts/ood/gen/cifar100_test_ood_gen.sh new file mode 100644 index 0000000000000000000000000000000000000000..d0b3c03ad0020c8081b5906ccb5b8ec5ef9af253 --- /dev/null +++ b/OpenOOD/scripts/ood/gen/cifar100_test_ood_gen.sh @@ -0,0 +1,33 @@ +#!/bin/bash +# sh scripts/ood/she/cifar100_test_ood_she.sh + +# GPU=1 +# CPU=1 +# node=73 +# jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +# --kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ + +python main.py \ + --config configs/datasets/cifar100/cifar100.yml \ + configs/datasets/cifar100/cifar100_ood.yml \ + configs/networks/resnet18_32x32.yml \ + configs/pipelines/test/test_ood.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/postprocessors/gen.yml \ + --network.checkpoint 'results/cifar100_resnet18_32x32_base_e100_lr0.1_default/s0/best.ckpt' + +############################################ +# alternatively, we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood.py +# especially if you want to get results from +# multiple runs +python scripts/eval_ood.py \ + --id-data cifar100 \ + --root ./results/cifar100_resnet18_32x32_base_e100_lr0.1_default \ + --postprocessor gen \ + --save-score --save-csv diff --git a/OpenOOD/scripts/ood/gen/cifar10_test_ood_gen.sh b/OpenOOD/scripts/ood/gen/cifar10_test_ood_gen.sh new file mode 100644 index 0000000000000000000000000000000000000000..3f2528689a9f10480d17d2d5fc679238fa0ac3b7 --- /dev/null +++ b/OpenOOD/scripts/ood/gen/cifar10_test_ood_gen.sh @@ -0,0 +1,35 @@ +#!/bin/bash +# sh scripts/ood/she/cifar10_test_ood_she.sh + +# GPU=1 +# CPU=1 +# node=73 +# jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +# --kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ + +python main.py \ + --config configs/datasets/cifar10/cifar10.yml \ + configs/datasets/cifar10/cifar10_ood.yml \ + configs/networks/resnet18_32x32.yml \ + configs/pipelines/test/test_ood.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/postprocessors/gen.yml \ + --num_workers 8 \ + --network.checkpoint 'results/cifar10_resnet18_32x32_base_e100_lr0.1_default/s0/best.ckpt' \ + --mark 1 + +############################################ +# alternatively, we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood.py +# especially if you want to get results from +# multiple runs +python scripts/eval_ood.py \ + --id-data cifar10 \ + --root ./results/cifar10_resnet18_32x32_base_e100_lr0.1_default \ + --postprocessor gen \ + --save-score --save-csv diff --git a/OpenOOD/scripts/ood/gen/imagenet200_test_ood_gen.sh b/OpenOOD/scripts/ood/gen/imagenet200_test_ood_gen.sh new file mode 100644 index 0000000000000000000000000000000000000000..889bb0bb92700ac1b1ef30a4fb2efdf7fa26a4d9 --- /dev/null +++ b/OpenOOD/scripts/ood/gen/imagenet200_test_ood_gen.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# sh scripts/ood/ash/imagenet200_test_ood_ash.sh + +############################################ +# alternatively, we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood.py +# especially if you want to get results from +# multiple runs + +# ood +python scripts/eval_ood.py \ + --id-data imagenet200 \ + --root ./results/imagenet200_resnet18_224x224_base_e90_lr0.1_default \ + --postprocessor gen \ + --save-score --save-csv #--fsood + +# full-spectrum ood +python scripts/eval_ood.py \ + --id-data imagenet200 \ + --root ./results/imagenet200_resnet18_224x224_base_e90_lr0.1_default \ + --postprocessor gen \ + --save-score --save-csv --fsood diff --git a/OpenOOD/scripts/ood/gen/imagenet_test_ood_gen.sh b/OpenOOD/scripts/ood/gen/imagenet_test_ood_gen.sh new file mode 100644 index 0000000000000000000000000000000000000000..4e68bd30f315fbb8a7d995d5c9df60bfa0f517e9 --- /dev/null +++ b/OpenOOD/scripts/ood/gen/imagenet_test_ood_gen.sh @@ -0,0 +1,47 @@ +#!/bin/bash +# sh scripts/ood/ash/imagenet_test_ood_ash.sh + +GPU=1 +CPU=1 +node=63 +jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +# --kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ +# python main.py \ +# --config configs/datasets/imagenet/imagenet.yml \ +# configs/datasets/imagenet/imagenet_ood.yml \ +# configs/networks/resnet50.yml \ +# configs/pipelines/test/test_ood.yml \ +# configs/preprocessors/base_preprocessor.yml \ +# configs/postprocessors/gen.yml \ +# --num_workers 4 \ +# --ood_dataset.image_size 256 \ +# --dataset.test.batch_size 256 \ +# --dataset.val.batch_size 256 \ +# --network.pretrained True \ +# --network.checkpoint 'results/pretrained_weights/resnet50_imagenet1k_v1.pth' \ +# --merge_option merge + +############################################ +# we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood_imagenet.py + +# available architectures: +# resnet50, swin-t, vit-b-16 +# ood +python scripts/eval_ood_imagenet.py \ + --tvs-pretrained \ + --arch resnet50 \ + --postprocessor gen \ + --save-score --save-csv #--fsood + +# full-spectrum ood +python scripts/eval_ood_imagenet.py \ + --tvs-pretrained \ + --arch resnet50 \ + --postprocessor gen \ + --save-score --save-csv --fsood diff --git a/OpenOOD/scripts/ood/godin/cifar100_test_ood_godin.sh b/OpenOOD/scripts/ood/godin/cifar100_test_ood_godin.sh new file mode 100644 index 0000000000000000000000000000000000000000..9b8fd65f3ea78669c1bfc25d4b6a60e4f57cdbae --- /dev/null +++ b/OpenOOD/scripts/ood/godin/cifar100_test_ood_godin.sh @@ -0,0 +1,36 @@ +#!/bin/bash +# sh scripts/ood/godin/cifar100_test_ood_godin.sh + +# GPU=1 +# CPU=1 +# node=73 +# jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +# --kill-on-bad-exit=1 --job-name=${jobname} \ +# -w SG-IDC1-10-51-2-${node} \ +python main.py \ + --config configs/datasets/cifar100/cifar100.yml \ + configs/datasets/cifar100/cifar100_ood.yml \ + configs/networks/godin_net.yml \ + configs/pipelines/test/test_ood.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/postprocessors/godin.yml \ + --network.backbone.name resnet18_32x32 \ + --num_workers 8 \ + --network.checkpoint 'results/cifar100_godin_net_godin_e100_lr0.1_default/s0/best.ckpt' \ + --merge_option merge + +############################################ +# alternatively, we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood.py +# especially if you want to get results from +# multiple runs +python scripts/eval_ood.py \ + --id-data cifar100 \ + --root ./results/cifar100_godin_net_godin_e100_lr0.1_default \ + --postprocessor godin \ + --save-score --save-csv diff --git a/OpenOOD/scripts/ood/godin/cifar100_train_godin.sh b/OpenOOD/scripts/ood/godin/cifar100_train_godin.sh new file mode 100644 index 0000000000000000000000000000000000000000..70887119ffd1f6605bc520b0c340f2e412dae0c3 --- /dev/null +++ b/OpenOOD/scripts/ood/godin/cifar100_train_godin.sh @@ -0,0 +1,25 @@ +#!/bin/bash +# sh scripts/ood/godin/cifar100_train_godin.sh + +# GPU=1 +# CPU=1 +# node=73 +# jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +# --kill-on-bad-exit=1 --job-name=${jobname} \ +# -w SG-IDC1-10-51-2-${node} \ +python main.py \ + --config configs/datasets/cifar100/cifar100.yml \ + configs/networks/godin_net.yml \ + configs/pipelines/train/baseline.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/postprocessors/godin.yml \ + --network.backbone.name resnet18_32x32 \ + --num_workers 8 \ + --trainer.name godin \ + --optimizer.num_epochs 100 \ + --merge_option merge \ + --seed 0 diff --git a/OpenOOD/scripts/ood/godin/cifar10_test_ood_godin.sh b/OpenOOD/scripts/ood/godin/cifar10_test_ood_godin.sh new file mode 100644 index 0000000000000000000000000000000000000000..ecee491b567f1e17c8fff53b14fdc8de512a6f6d --- /dev/null +++ b/OpenOOD/scripts/ood/godin/cifar10_test_ood_godin.sh @@ -0,0 +1,36 @@ +#!/bin/bash +# sh scripts/ood/godin/cifar10_test_ood_godin.sh + +# GPU=1 +# CPU=1 +# node=73 +# jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +# --kill-on-bad-exit=1 --job-name=${jobname} \ +# -w SG-IDC1-10-51-2-${node} \ +python main.py \ + --config configs/datasets/cifar10/cifar10.yml \ + configs/datasets/cifar10/cifar10_ood.yml \ + configs/networks/godin_net.yml \ + configs/pipelines/test/test_ood.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/postprocessors/godin.yml \ + --network.backbone.name resnet18_32x32 \ + --num_workers 8 \ + --network.checkpoint 'results/cifar10_godin_net_godin_e100_lr0.1_default/s0/best.ckpt' \ + --mark epoch_100 + +############################################ +# alternatively, we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood.py +# especially if you want to get results from +# multiple runs +python scripts/eval_ood.py \ + --id-data cifar10 \ + --root ./results/cifar10_godin_net_godin_e100_lr0.1_default \ + --postprocessor godin \ + --save-score --save-csv diff --git a/OpenOOD/scripts/ood/godin/cifar10_train_godin.sh b/OpenOOD/scripts/ood/godin/cifar10_train_godin.sh new file mode 100644 index 0000000000000000000000000000000000000000..37237c583e7e892df5a5c76bdfd8edefdcb431da --- /dev/null +++ b/OpenOOD/scripts/ood/godin/cifar10_train_godin.sh @@ -0,0 +1,25 @@ +#!/bin/bash +# sh scripts/ood/godin/cifar10_train_godin.sh + +# GPU=1 +# CPU=1 +# node=73 +# jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +# --kill-on-bad-exit=1 --job-name=${jobname} \ +# -w SG-IDC1-10-51-2-${node} \ +python main.py \ + --config configs/datasets/cifar10/cifar10.yml \ + configs/networks/godin_net.yml \ + configs/pipelines/train/baseline.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/postprocessors/godin.yml \ + --network.backbone.name resnet18_32x32 \ + --num_workers 8 \ + --trainer.name godin \ + --optimizer.num_epochs 100 \ + --merge_option merge \ + --seed 0 diff --git a/OpenOOD/scripts/ood/godin/imagenet200_test_ood_godin.sh b/OpenOOD/scripts/ood/godin/imagenet200_test_ood_godin.sh new file mode 100644 index 0000000000000000000000000000000000000000..eba18ee6d4626b024b26dba7844e6371d4c7d97b --- /dev/null +++ b/OpenOOD/scripts/ood/godin/imagenet200_test_ood_godin.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# sh scripts/ood/godin/imagenet200_test_ood_godin.sh + +############################################ +# alternatively, we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood.py +# especially if you want to get results from +# multiple runs + +# ood +python scripts/eval_ood.py \ + --id-data imagenet200 \ + --root ./results/imagenet200_godin_net_godin_e90_lr0.1_default \ + --postprocessor godin \ + --save-score --save-csv #--fsood + +# full-spectrum ood +python scripts/eval_ood.py \ + --id-data imagenet200 \ + --root ./results/imagenet200_godin_net_godin_e90_lr0.1_default \ + --postprocessor godin \ + --save-score --save-csv --fsood diff --git a/OpenOOD/scripts/ood/godin/imagenet200_train_godin.sh b/OpenOOD/scripts/ood/godin/imagenet200_train_godin.sh new file mode 100644 index 0000000000000000000000000000000000000000..155958ad60636afbceb393c6650158d931157a97 --- /dev/null +++ b/OpenOOD/scripts/ood/godin/imagenet200_train_godin.sh @@ -0,0 +1,16 @@ +#!/bin/bash +# sh scripts/ood/godin/imagenet200_train_godin.sh + +python main.py \ + --config configs/datasets/imagenet200/imagenet200.yml \ + configs/networks/godin_net.yml \ + configs/pipelines/train/baseline.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/postprocessors/godin.yml \ + --network.backbone.name resnet18_224x224 \ + --trainer.name godin \ + --optimizer.num_epochs 90 \ + --dataset.train.batch_size 128 \ + --num_gpus 2 --num_workers 16 \ + --merge_option merge \ + --seed 0 diff --git a/OpenOOD/scripts/ood/godin/imagenet_test_ood_godin.sh b/OpenOOD/scripts/ood/godin/imagenet_test_ood_godin.sh new file mode 100644 index 0000000000000000000000000000000000000000..1489dbd3433469809d76c547e1756eec0ee310f0 --- /dev/null +++ b/OpenOOD/scripts/ood/godin/imagenet_test_ood_godin.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# sh scripts/ood/godin/imagenet_test_ood_godin.sh + +############################################ +# we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood_imagenet.py + +# available architectures: +# resnet50 +# ood +python scripts/eval_ood_imagenet.py \ + --ckpt-path ./results/imagenet_godin_net_godin_e30_lr0.001_default/s0/best.ckpt \ + --arch resnet50 \ + --postprocessor godin \ + --save-score --save-csv #--fsood + +# full-spectrum ood +python scripts/eval_ood_imagenet.py \ + --ckpt-path ./results/imagenet_godin_net_godin_e30_lr0.001_default/s0/best.ckpt \ + --arch resnet50 \ + --postprocessor godin \ + --save-score --save-csv --fsood diff --git a/OpenOOD/scripts/ood/godin/imagenet_train_godin.sh b/OpenOOD/scripts/ood/godin/imagenet_train_godin.sh new file mode 100644 index 0000000000000000000000000000000000000000..95e055e1f8cb8fa3858e8f3b101966b83deab6c2 --- /dev/null +++ b/OpenOOD/scripts/ood/godin/imagenet_train_godin.sh @@ -0,0 +1,19 @@ +#!/bin/bash +# sh scripts/ood/godin/imagenet_train_godin.sh + +python main.py \ + --config configs/datasets/imagenet/imagenet.yml \ + configs/networks/godin_net.yml \ + configs/pipelines/train/baseline.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/postprocessors/godin.yml \ + --network.backbone.name resnet50 \ + --network.backbone.pretrained True \ + --network.backbone.checkpoint ./results/pretrained_weights/resnet50_imagenet1k_v1.pth \ + --trainer.name godin \ + --optimizer.lr 0.001 \ + --optimizer.num_epochs 30 \ + --dataset.train.batch_size 128 \ + --num_gpus 2 --num_workers 16 \ + --merge_option merge \ + --seed 0 diff --git a/OpenOOD/scripts/ood/gradnorm/cifar100_test_ood_gradnorm.sh b/OpenOOD/scripts/ood/gradnorm/cifar100_test_ood_gradnorm.sh new file mode 100644 index 0000000000000000000000000000000000000000..3213d44225c95f1eb7821df486c7a9a919abf444 --- /dev/null +++ b/OpenOOD/scripts/ood/gradnorm/cifar100_test_ood_gradnorm.sh @@ -0,0 +1,35 @@ +#!/bin/bash +# sh scripts/ood/gradnorm/cifar100_test_ood_gradnorm.sh + +# GPU=1 +# CPU=1 +# node=73 +# jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +# srun -p mediasuper -x SZ-IDC1-10-112-2-17 --gres=gpu:${GPU} \ +# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +# --kill-on-bad-exit=1 --job-name=${jobname} \ + +python main.py \ + --config configs/datasets/cifar100/cifar100.yml \ + configs/datasets/cifar100/cifar100_ood.yml \ + configs/networks/resnet18_32x32.yml \ + configs/pipelines/test/test_ood.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/postprocessors/gradnorm.yml \ + --num_workers 8 \ + --network.checkpoint 'results/cifar100_resnet18_32x32_base_e100_lr0.1_default/s0/best.ckpt' \ + --mark 0 + +############################################ +# alternatively, we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood.py +# especially if you want to get results from +# multiple runs +python scripts/eval_ood.py \ + --id-data cifar100 \ + --root ./results/cifar100_resnet18_32x32_base_e100_lr0.1_default \ + --postprocessor gradnorm \ + --save-score --save-csv diff --git a/OpenOOD/scripts/ood/gradnorm/cifar10_test_ood_gradnorm.sh b/OpenOOD/scripts/ood/gradnorm/cifar10_test_ood_gradnorm.sh new file mode 100644 index 0000000000000000000000000000000000000000..62ae8a232ef1b86c377a26d1f88a584d330287cd --- /dev/null +++ b/OpenOOD/scripts/ood/gradnorm/cifar10_test_ood_gradnorm.sh @@ -0,0 +1,35 @@ +#!/bin/bash +# sh scripts/ood/gradnorm/cifar10_test_ood_gradnorm.sh + +# GPU=1 +# CPU=1 +# node=73 +# jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +# srun -p mediasuper -x SZ-IDC1-10-112-2-17 --gres=gpu:${GPU} \ +# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +# --kill-on-bad-exit=1 --job-name=${jobname} \ + +python main.py \ + --config configs/datasets/cifar10/cifar10.yml \ + configs/datasets/cifar10/cifar10_ood.yml \ + configs/networks/resnet18_32x32.yml \ + configs/pipelines/test/test_ood.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/postprocessors/gradnorm.yml \ + --num_workers 8 \ + --network.checkpoint 'results/cifar10_resnet18_32x32_base_e100_lr0.1_default/s0/best.ckpt' \ + --mark 0 + +############################################ +# alternatively, we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood.py +# especially if you want to get results from +# multiple runs +python scripts/eval_ood.py \ + --id-data cifar10 \ + --root ./results/cifar10_resnet18_32x32_base_e100_lr0.1_default \ + --postprocessor gradnorm \ + --save-score --save-csv diff --git a/OpenOOD/scripts/ood/gradnorm/imagenet200_test_ood_gradnorm.sh b/OpenOOD/scripts/ood/gradnorm/imagenet200_test_ood_gradnorm.sh new file mode 100644 index 0000000000000000000000000000000000000000..7861eee21e7878a1f42315be728b94e48b07bd7b --- /dev/null +++ b/OpenOOD/scripts/ood/gradnorm/imagenet200_test_ood_gradnorm.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# sh scripts/ood/ebo/imagenet200_test_ood_gradnorm.sh + +############################################ +# alternatively, we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood.py +# especially if you want to get results from +# multiple runs + +# ood +python scripts/eval_ood.py \ + --id-data imagenet200 \ + --root ./results/imagenet200_resnet18_224x224_base_e90_lr0.1_default \ + --postprocessor gradnorm \ + --save-score --save-csv #--fsood + +# full-spectrum ood +python scripts/eval_ood.py \ + --id-data imagenet200 \ + --root ./results/imagenet200_resnet18_224x224_base_e90_lr0.1_default \ + --postprocessor gradnorm \ + --save-score --save-csv --fsood diff --git a/OpenOOD/scripts/ood/gradnorm/imagenet_test_ood_gradnorm.sh b/OpenOOD/scripts/ood/gradnorm/imagenet_test_ood_gradnorm.sh new file mode 100644 index 0000000000000000000000000000000000000000..46fab5eb40668b3d40dc1f4dce1c49e57c8ffb77 --- /dev/null +++ b/OpenOOD/scripts/ood/gradnorm/imagenet_test_ood_gradnorm.sh @@ -0,0 +1,48 @@ +#!/bin/bash +# sh scripts/ood/gradnorm/imagenet_test_ood_gradnorm.sh + +GPU=1 +CPU=1 +node=39 +jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ +python main.py \ + --config configs/datasets/imagenet/imagenet.yml \ + configs/datasets/imagenet/imagenet_ood.yml \ + configs/networks/resnet50.yml \ + configs/pipelines/test/test_ood.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/postprocessors/gradnorm.yml \ + --num_workers 4 \ + --ood_dataset.image_size 256 \ + --dataset.test.batch_size 256 \ + --dataset.val.batch_size 256 \ + --network.pretrained True \ + --network.checkpoint 'results/pretrained_weights/resnet50_imagenet1k_v1.pth' \ + --merge_option merge + +############################################ +# we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood_imagenet.py + +# available architectures: +# resnet50, swin-t, vit-b-16 +# ood +python scripts/eval_ood_imagenet.py \ + --tvs-pretrained \ + --arch resnet50 \ + --postprocessor gradnorm \ + --save-score --save-csv #--fsood + + +# full-spectrum ood +python scripts/eval_ood_imagenet.py \ + --tvs-pretrained \ + --arch resnet50 \ + --postprocessor gradnorm \ + --save-score --save-csv --fsood diff --git a/OpenOOD/scripts/ood/gradnorm/mnist_test_ood_gradnorm.sh b/OpenOOD/scripts/ood/gradnorm/mnist_test_ood_gradnorm.sh new file mode 100644 index 0000000000000000000000000000000000000000..c83f88292c2df54a07aa23044698cac69a659a6e --- /dev/null +++ b/OpenOOD/scripts/ood/gradnorm/mnist_test_ood_gradnorm.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# sh scripts/ood/gradnorm/mnist_test_ood_gradnorm.sh + +GPU=1 +CPU=1 +node=73 +jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +# --kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ + +python main.py \ +--config configs/datasets/mnist/mnist.yml \ +configs/datasets/mnist/mnist_ood.yml \ +configs/networks/lenet.yml \ +configs/pipelines/test/test_ood.yml \ +configs/preprocessors/base_preprocessor.yml \ +configs/postprocessors/gradnorm.yml \ +--num_workers 8 \ +--network.checkpoint 'results/checkpoints/mnist_lenet_acc99.30.ckpt' \ +--mark 0 diff --git a/OpenOOD/scripts/ood/gradnorm/mnist_test_osr_gradnorm.sh b/OpenOOD/scripts/ood/gradnorm/mnist_test_osr_gradnorm.sh new file mode 100644 index 0000000000000000000000000000000000000000..a19369371dc5581e774a1e22f3a78a70d6146801 --- /dev/null +++ b/OpenOOD/scripts/ood/gradnorm/mnist_test_osr_gradnorm.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# sh scripts/ood/gradnorm/mnist_test_osr_gradnorm.sh + +GPU=1 +CPU=1 +node=73 +jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +# --kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ + +python main.py \ +--config configs/datasets/osr_mnist6/mnist6_seed1.yml \ +configs/datasets/osr_mnist6/mnist6_seed1_ood.yml \ +configs/networks/lenet.yml \ +configs/pipelines/test/test_osr.yml \ +configs/preprocessors/base_preprocessor.yml \ +configs/postprocessors/gradnorm.yml \ +--num_workers 8 \ +--network.checkpoint 'results/checkpoints/osr/mnist6_seed1.ckpt' \ +--mark 0 diff --git a/OpenOOD/scripts/ood/gradnorm/sweep_osr.py b/OpenOOD/scripts/ood/gradnorm/sweep_osr.py new file mode 100644 index 0000000000000000000000000000000000000000..7723aedaf2372f28fd66f3f874427691f94ff719 --- /dev/null +++ b/OpenOOD/scripts/ood/gradnorm/sweep_osr.py @@ -0,0 +1,39 @@ +# python scripts/ood/gradnorm/sweep_osr.py +import os + +config = [ + [ + 'osr_cifar6/cifar6_seed1.yml', 'osr_cifar6/cifar6_seed1_ood.yml', + 'resnet18_32x32', 'results/checkpoints/osr/cifar6_seed1.ckpt' + ], + [ + 'osr_cifar50/cifar50_seed1.yml', 'osr_cifar50/cifar50_seed1_ood.yml', + 'resnet18_32x32', 'results/checkpoints/osr/cifar50_seed1.ckpt' + ], + [ + 'osr_tin20/tin20_seed1.yml', 'osr_tin20/tin20_seed1_ood.yml', + 'resnet18_64x64', 'results/checkpoints/osr/tin20_seed1.ckpt' + ], + [ + 'osr_mnist6/mnist6_seed1.yml', 'osr_mnist6/mnist6_seed1_ood.yml', + 'lenet', 'results/checkpoints/osr/mnist6_seed1.ckpt' + ], +] + +for [dataset, ood_dataset, network, pth] in config: + command = (f"PYTHONPATH='.':$PYTHONPATH \ + srun -p dsta --mpi=pmi2 --gres=gpu:1 -n1 \ + --cpus-per-task=1 --ntasks-per-node=1 \ + --kill-on-bad-exit=1 --job-name=openood \ + python main.py \ + --config configs/datasets/{dataset} \ + configs/datasets/{ood_dataset} \ + configs/networks/{network}.yml \ + configs/pipelines/test/test_osr.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/postprocessors/gradnorm.yml \ + --network.pretrained True \ + --network.checkpoint {pth} \ + --num_workers 8 \ + --merge_option merge &") + os.system(command) diff --git a/OpenOOD/scripts/ood/gram/cifar100_test_ood_gram.sh b/OpenOOD/scripts/ood/gram/cifar100_test_ood_gram.sh new file mode 100644 index 0000000000000000000000000000000000000000..c416f8a3dbd2f9fe2fe8b49fcef0a8cbbbd30ef7 --- /dev/null +++ b/OpenOOD/scripts/ood/gram/cifar100_test_ood_gram.sh @@ -0,0 +1,35 @@ +#!/bin/bash +# sh scripts/ood/gram/cifar100_test_ood_gram.sh + +# GPU=1 +# CPU=1 +# node=36 +# jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +#srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +#--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +#--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ + +python main.py \ + --config configs/datasets/cifar100/cifar100.yml \ + configs/datasets/cifar100/cifar100_ood.yml \ + configs/networks/resnet18_32x32.yml \ + configs/pipelines/test/test_ood.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/postprocessors/gram.yml \ + --num_workers 8 \ + --network.checkpoint 'results/cifar100_resnet18_32x32_base_e100_lr0.1_default/s0/best.ckpt' \ + --mark 0 + +############################################ +# alternatively, we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood.py +# especially if you want to get results from +# multiple runs +python scripts/eval_ood.py \ + --id-data cifar100 \ + --root ./results/cifar100_resnet18_32x32_base_e100_lr0.1_default \ + --postprocessor gram \ + --save-score --save-csv diff --git a/OpenOOD/scripts/ood/gram/cifar10_test_ood_gram.sh b/OpenOOD/scripts/ood/gram/cifar10_test_ood_gram.sh new file mode 100644 index 0000000000000000000000000000000000000000..c966090ce2a61a6f8669bc98935910838b310b53 --- /dev/null +++ b/OpenOOD/scripts/ood/gram/cifar10_test_ood_gram.sh @@ -0,0 +1,35 @@ +#!/bin/bash +# sh scripts/ood/gram/cifar10_test_ood_gram.sh + +# GPU=1 +# CPU=1 +# node=36 +# jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +#srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +#--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +#--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ + +python main.py \ + --config configs/datasets/cifar10/cifar10.yml \ + configs/datasets/cifar10/cifar10_ood.yml \ + configs/networks/resnet18_32x32.yml \ + configs/pipelines/test/test_ood.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/postprocessors/gram.yml \ + --num_workers 8 \ + --network.checkpoint 'results/cifar10_resnet18_32x32_base_e100_lr0.1_default/s0/best.ckpt' \ + --mark 0 + +############################################ +# alternatively, we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood.py +# especially if you want to get results from +# multiple runs +python scripts/eval_ood.py \ + --id-data cifar10 \ + --root ./results/cifar10_resnet18_32x32_base_e100_lr0.1_default \ + --postprocessor gram \ + --save-score --save-csv diff --git a/OpenOOD/scripts/ood/gram/imagenet200_test_ood_gram.sh b/OpenOOD/scripts/ood/gram/imagenet200_test_ood_gram.sh new file mode 100644 index 0000000000000000000000000000000000000000..e0b918141bc986a8ae721ebd7055b6031081e9e0 --- /dev/null +++ b/OpenOOD/scripts/ood/gram/imagenet200_test_ood_gram.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# sh scripts/ood/ebo/imagenet200_test_ood_gram.sh + +############################################ +# alternatively, we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood.py +# especially if you want to get results from +# multiple runs + +# ood +python scripts/eval_ood.py \ + --id-data imagenet200 \ + --root ./results/imagenet200_resnet18_224x224_base_e90_lr0.1_default \ + --postprocessor gram \ + --save-score --save-csv #--fsood + +# full-spectrum ood +python scripts/eval_ood.py \ + --id-data imagenet200 \ + --root ./results/imagenet200_resnet18_224x224_base_e90_lr0.1_default \ + --postprocessor gram \ + --save-score --save-csv --fsood diff --git a/OpenOOD/scripts/ood/gram/imagenet_test_ood_gram.sh b/OpenOOD/scripts/ood/gram/imagenet_test_ood_gram.sh new file mode 100644 index 0000000000000000000000000000000000000000..6a4de90db3cc99bb3ece7694c9f67b9d663a8fdc --- /dev/null +++ b/OpenOOD/scripts/ood/gram/imagenet_test_ood_gram.sh @@ -0,0 +1,47 @@ +#!/bin/bash +# sh scripts/ood/gram/imagenet_test_ood_gram.sh + +GPU=1 +CPU=1 +node=63 +jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ +python main.py \ + --config configs/datasets/imagenet/imagenet.yml \ + configs/datasets/imagenet/imagenet_ood.yml \ + configs/networks/resnet50.yml \ + configs/pipelines/test/test_ood.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/postprocessors/gram.yml \ + --num_workers 4 \ + --ood_dataset.image_size 256 \ + --dataset.test.batch_size 256 \ + --dataset.val.batch_size 256 \ + --network.pretrained True \ + --network.checkpoint 'results/pretrained_weights/resnet50_imagenet1k_v1.pth' \ + --merge_option merge + +############################################ +# we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood_imagenet.py + +# available architectures: +# resnet50 +# ood +python scripts/eval_ood_imagenet.py \ + --tvs-pretrained \ + --arch resnet50 \ + --postprocessor gram \ + --save-score --save-csv #--fsood + +# full-spectrum ood +python scripts/eval_ood_imagenet.py \ + --tvs-pretrained \ + --arch resnet50 \ + --postprocessor gram \ + --save-score --save-csv --fsood diff --git a/OpenOOD/scripts/ood/gram/mnist_test_osr_gram.sh b/OpenOOD/scripts/ood/gram/mnist_test_osr_gram.sh new file mode 100644 index 0000000000000000000000000000000000000000..827b52c4d0b6addd418b5044640281930197390b --- /dev/null +++ b/OpenOOD/scripts/ood/gram/mnist_test_osr_gram.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# sh scripts/ood/gram/mnist_test_osr_gram.sh + +GPU=1 +CPU=1 +node=73 +jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +# --kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ + +python main.py \ +--config configs/datasets/osr_mnist6/mnist6_seed1.yml \ +configs/datasets/osr_mnist6/mnist6_seed1_ood.yml \ +configs/networks/lenet.yml \ +configs/pipelines/test/test_osr.yml \ +configs/preprocessors/base_preprocessor.yml \ +configs/postprocessors/gram.yml \ +--num_workers 8 \ +--network.checkpoint 'results/checkpoints/osr/mnist6_seed1.ckpt' \ +--mark 0 diff --git a/OpenOOD/scripts/ood/gram/sweep_osr.py b/OpenOOD/scripts/ood/gram/sweep_osr.py new file mode 100644 index 0000000000000000000000000000000000000000..5f9eacb8408cffa8be5603333139f6aa6f09db94 --- /dev/null +++ b/OpenOOD/scripts/ood/gram/sweep_osr.py @@ -0,0 +1,39 @@ +# python scripts/ood/gram/sweep_osr.py +import os + +config = [ + [ + 'osr_cifar6/cifar6_seed1.yml', 'osr_cifar6/cifar6_seed1_ood.yml', + 'resnet18_32x32', 'results/checkpoints/osr/cifar6_seed1.ckpt' + ], + [ + 'osr_cifar50/cifar50_seed1.yml', 'osr_cifar50/cifar50_seed1_ood.yml', + 'resnet18_32x32', 'results/checkpoints/osr/cifar50_seed1.ckpt' + ], + [ + 'osr_tin20/tin20_seed1.yml', 'osr_tin20/tin20_seed1_ood.yml', + 'resnet18_64x64', 'results/checkpoints/osr/tin20_seed1.ckpt' + ], + [ + 'osr_mnist6/mnist6_seed1.yml', 'osr_mnist6/mnist6_seed1_ood.yml', + 'lenet', 'results/checkpoints/osr/mnist6_seed1.ckpt' + ], +] + +for [dataset, ood_dataset, network, pth] in config: + command = (f"PYTHONPATH='.':$PYTHONPATH \ + srun -p dsta --mpi=pmi2 --gres=gpu:1 -n1 \ + --cpus-per-task=1 --ntasks-per-node=1 \ + --kill-on-bad-exit=1 --job-name=openood \ + python main.py \ + --config configs/datasets/{dataset} \ + configs/datasets/{ood_dataset} \ + configs/networks/{network}.yml \ + configs/pipelines/test/test_osr.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/postprocessors/gram.yml \ + --network.pretrained True \ + --network.checkpoint {pth} \ + --num_workers 8 \ + --merge_option merge &") + os.system(command) diff --git a/OpenOOD/scripts/ood/ish/imagenet_train_ish.sh b/OpenOOD/scripts/ood/ish/imagenet_train_ish.sh new file mode 100644 index 0000000000000000000000000000000000000000..597cbc219b626ccc36c22a8fa1122bd61e70a3b2 --- /dev/null +++ b/OpenOOD/scripts/ood/ish/imagenet_train_ish.sh @@ -0,0 +1,20 @@ +#!/bin/bash +# sh scripts/ood/ish/imagenet_train_ish.sh +# pretrained model: https://drive.google.com/file/d/1EQimcdbJsKdU2uw4-BrqZO6tu4kXKtbG + +python main.py \ + --config configs/datasets/imagenet/imagenet.yml \ + configs/networks/resnet50.yml \ + configs/pipelines/train/train_ish.yml \ + configs/preprocessors/base_preprocessor.yml \ + --network.pretrained True \ + --network.checkpoint ./checkpoints/resnet50-0676ba61.pth \ + --trainer.trainer_args.param 0.85 \ + --optimizer.lr 0.003 \ + --optimizer.weight_decay_fc 0.00005 \ + --optimizer.num_epochs 10 \ + --dataset.train.batch_size 128 \ + --num_gpus 4 --num_workers 4 \ + --merge_option merge \ + --seed ${SEED} + diff --git a/OpenOOD/scripts/ood/kl_matching/cifar100_test_ood_kl_matching.sh b/OpenOOD/scripts/ood/kl_matching/cifar100_test_ood_kl_matching.sh new file mode 100644 index 0000000000000000000000000000000000000000..363abf891e8c2a353c39197a6758cbd1e5d88578 --- /dev/null +++ b/OpenOOD/scripts/ood/kl_matching/cifar100_test_ood_kl_matching.sh @@ -0,0 +1,35 @@ +#!/bin/bash +# sh scripts/ood/kl_matching/cifar100_test_ood_kl_matching.sh + +# GPU=1 +# CPU=1 +# node=73 +# jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +# srun -p mediasuper -x SZ-IDC1-10-112-2-17 --gres=gpu:${GPU} \ +# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +# --kill-on-bad-exit=1 --job-name=${jobname} \ + +python main.py \ + --config configs/datasets/cifar100/cifar100.yml \ + configs/datasets/cifar100/cifar100_ood.yml \ + configs/networks/resnet18_32x32.yml \ + configs/pipelines/test/test_ood.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/postprocessors/klm.yml \ + --num_workers 8 \ + --network.checkpoint 'results/cifar100_resnet18_32x32_base_e100_lr0.1_default/s0/best.ckpt' \ + --mark 0 + +############################################ +# alternatively, we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood.py +# especially if you want to get results from +# multiple runs +python scripts/eval_ood.py \ + --id-data cifar100 \ + --root ./results/cifar100_resnet18_32x32_base_e100_lr0.1_default \ + --postprocessor klm \ + --save-score --save-csv diff --git a/OpenOOD/scripts/ood/kl_matching/cifar10_test_ood_kl_matching.sh b/OpenOOD/scripts/ood/kl_matching/cifar10_test_ood_kl_matching.sh new file mode 100644 index 0000000000000000000000000000000000000000..9516e403913a3c2e951bfdf020c3d676dc182b86 --- /dev/null +++ b/OpenOOD/scripts/ood/kl_matching/cifar10_test_ood_kl_matching.sh @@ -0,0 +1,35 @@ +#!/bin/bash +# sh scripts/ood/kl_matching/cifar10_test_ood_kl_matching.sh + +# GPU=1 +# CPU=1 +# node=73 +# jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +# srun -p mediasuper -x SZ-IDC1-10-112-2-17 --gres=gpu:${GPU} \ +# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +# --kill-on-bad-exit=1 --job-name=${jobname} \ + +python main.py \ + --config configs/datasets/cifar10/cifar10.yml \ + configs/datasets/cifar10/cifar10_ood.yml \ + configs/networks/resnet18_32x32.yml \ + configs/pipelines/test/test_ood.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/postprocessors/klm.yml \ + --num_workers 8 \ + --network.checkpoint 'results/cifar10_resnet18_32x32_base_e100_lr0.1_default/s0/best.ckpt' \ + --mark 0 + +############################################ +# alternatively, we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood.py +# especially if you want to get results from +# multiple runs +python scripts/eval_ood.py \ + --id-data cifar10 \ + --root ./results/cifar10_resnet18_32x32_base_e100_lr0.1_default \ + --postprocessor klm \ + --save-score --save-csv diff --git a/OpenOOD/scripts/ood/kl_matching/imagenet200_test_ood_kl_matching.sh b/OpenOOD/scripts/ood/kl_matching/imagenet200_test_ood_kl_matching.sh new file mode 100644 index 0000000000000000000000000000000000000000..5667be12ea3939c8c06138f3b3c477674060a163 --- /dev/null +++ b/OpenOOD/scripts/ood/kl_matching/imagenet200_test_ood_kl_matching.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# sh scripts/ood/ebo/imagenet200_test_ood_kl_matching.sh + +############################################ +# alternatively, we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood.py +# especially if you want to get results from +# multiple runs + +# ood +python scripts/eval_ood.py \ + --id-data imagenet200 \ + --root ./results/imagenet200_resnet18_224x224_base_e90_lr0.1_default \ + --postprocessor klm \ + --save-score --save-csv #--fsood + +# full-spectrum ood +python scripts/eval_ood.py \ + --id-data imagenet200 \ + --root ./results/imagenet200_resnet18_224x224_base_e90_lr0.1_default \ + --postprocessor klm \ + --save-score --save-csv --fsood diff --git a/OpenOOD/scripts/ood/kl_matching/imagenet_test_ood_kl_matching.sh b/OpenOOD/scripts/ood/kl_matching/imagenet_test_ood_kl_matching.sh new file mode 100644 index 0000000000000000000000000000000000000000..af089d640fcfd2e27157a421549ac94c8708a27f --- /dev/null +++ b/OpenOOD/scripts/ood/kl_matching/imagenet_test_ood_kl_matching.sh @@ -0,0 +1,47 @@ +#!/bin/bash +# sh scripts/ood/kl_matching/imagenet_test_ood_kl_matching.sh + +GPU=1 +CPU=1 +node=63 +jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ +python -m pdb -c continue main.py \ + --config configs/datasets/imagenet/imagenet.yml \ + configs/datasets/imagenet/imagenet_ood.yml \ + configs/networks/resnet50.yml \ + configs/pipelines/test/test_ood.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/postprocessors/klm.yml \ + --num_workers 4 \ + --ood_dataset.image_size 256 \ + --dataset.test.batch_size 256 \ + --dataset.val.batch_size 256 \ + --network.pretrained True \ + --network.checkpoint 'results/pretrained_weights/resnet50_imagenet1k_v1.pth' \ + --merge_option merge + +############################################ +# we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood_imagenet.py + +# available architectures: +# resnet50, swin-t, vit-b-16 +# ood +python scripts/eval_ood_imagenet.py \ + --tvs-pretrained \ + --arch resnet50 \ + --postprocessor klm \ + --save-score --save-csv #--fsood + +# full-spectrum ood +python scripts/eval_ood_imagenet.py \ + --tvs-pretrained \ + --arch resnet50 \ + --postprocessor klm \ + --save-score --save-csv --fsood diff --git a/OpenOOD/scripts/ood/kl_matching/mnist_test_ood_kl_matching.sh b/OpenOOD/scripts/ood/kl_matching/mnist_test_ood_kl_matching.sh new file mode 100644 index 0000000000000000000000000000000000000000..1fc17d741e2e15ee327cd6d9fa2e99ca4b17f566 --- /dev/null +++ b/OpenOOD/scripts/ood/kl_matching/mnist_test_ood_kl_matching.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# sh scripts/ood/kl_matching/mnist_test_ood_kl_matching.sh + +# GPU=1 +# CPU=1 +# node=73 +# jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +# srun -p mediasuper -x SZ-IDC1-10-112-2-17 --gres=gpu:${GPU} \ +# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +# --kill-on-bad-exit=1 --job-name=${jobname} \ + +python main.py \ +--config configs/datasets/mnist/mnist.yml \ +configs/datasets/mnist/mnist_ood.yml \ +configs/networks/lenet.yml \ +configs/pipelines/test/test_ood.yml \ +configs/preprocessors/base_preprocessor.yml \ +configs/postprocessors/klm.yml \ +--num_workers 8 \ +--network.checkpoint 'results/checkpoints/mnist_lenet_acc99.30.ckpt' \ +--mark 0 diff --git a/OpenOOD/scripts/ood/kl_matching/mnist_test_osr_kl_matching.sh b/OpenOOD/scripts/ood/kl_matching/mnist_test_osr_kl_matching.sh new file mode 100644 index 0000000000000000000000000000000000000000..cffd87678be61bc9a1f9717420fc90bfb3bcc36e --- /dev/null +++ b/OpenOOD/scripts/ood/kl_matching/mnist_test_osr_kl_matching.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# sh scripts/ood/kl_matching/mnist_test_osr_kl_matching.sh + +# GPU=1 +# CPU=1 +# node=73 +# jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +# srun -p mediasuper -x SZ-IDC1-10-112-2-17 --gres=gpu:${GPU} \ +# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +# --kill-on-bad-exit=1 --job-name=${jobname} \ + +python main.py \ +--config configs/datasets/osr_mnist6/mnist6_seed1.yml \ +configs/datasets/osr_mnist6/mnist6_seed1_ood.yml \ +configs/networks/lenet.yml \ +configs/pipelines/test/test_osr.yml \ +configs/preprocessors/base_preprocessor.yml \ +configs/postprocessors/klm.yml \ +--num_workers 8 \ +--network.checkpoint 'results/checkpoints/osr/mnist6_seed1.ckpt' \ +--mark 0 diff --git a/OpenOOD/scripts/ood/kl_matching/sweep_osr.py b/OpenOOD/scripts/ood/kl_matching/sweep_osr.py new file mode 100644 index 0000000000000000000000000000000000000000..18b8500493ccf9626e273555749b3bf3b6207e6d --- /dev/null +++ b/OpenOOD/scripts/ood/kl_matching/sweep_osr.py @@ -0,0 +1,39 @@ +# python scripts/ood/kl_matching/sweep_osr.py +import os + +config = [ + [ + 'osr_cifar6/cifar6_seed1.yml', 'osr_cifar6/cifar6_seed1_ood.yml', + 'resnet18_32x32', 'results/checkpoints/osr/cifar6_seed1.ckpt' + ], + [ + 'osr_cifar50/cifar50_seed1.yml', 'osr_cifar50/cifar50_seed1_ood.yml', + 'resnet18_32x32', 'results/checkpoints/osr/cifar50_seed1.ckpt' + ], + [ + 'osr_tin20/tin20_seed1.yml', 'osr_tin20/tin20_seed1_ood.yml', + 'resnet18_64x64', 'results/checkpoints/osr/tin20_seed1.ckpt' + ], + [ + 'osr_mnist6/mnist6_seed1.yml', 'osr_mnist6/mnist6_seed1_ood.yml', + 'lenet', 'results/checkpoints/osr/mnist6_seed1.ckpt' + ], +] + +for [dataset, ood_dataset, network, pth] in config: + command = (f"PYTHONPATH='.':$PYTHONPATH \ + srun -p dsta --mpi=pmi2 --gres=gpu:1 -n1 \ + --cpus-per-task=1 --ntasks-per-node=1 \ + --kill-on-bad-exit=1 --job-name=openood \ + python main.py \ + --config configs/datasets/{dataset} \ + configs/datasets/{ood_dataset} \ + configs/networks/{network}.yml \ + configs/pipelines/test/test_osr.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/postprocessors/klm.yml \ + --network.pretrained True \ + --network.checkpoint {pth} \ + --num_workers 8 \ + --merge_option merge &") + os.system(command) diff --git a/OpenOOD/scripts/ood/knn/cifar100_test_ood_knn.sh b/OpenOOD/scripts/ood/knn/cifar100_test_ood_knn.sh new file mode 100644 index 0000000000000000000000000000000000000000..5672c4ee3ec6050f4ad356fa78f6eb835cfa08dc --- /dev/null +++ b/OpenOOD/scripts/ood/knn/cifar100_test_ood_knn.sh @@ -0,0 +1,35 @@ +#!/bin/bash +# sh scripts/ood/knn/cifar100_test_ood_knn.sh + +# GPU=1 +# CPU=1 +# node=36 +# jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +#srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +#--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +#--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ + +python main.py \ + --config configs/datasets/cifar100/cifar100.yml \ + configs/datasets/cifar100/cifar100_ood.yml \ + configs/networks/resnet18_32x32.yml \ + configs/pipelines/test/test_ood.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/postprocessors/knn.yml \ + --num_workers 8 \ + --network.checkpoint 'results/cifar100_resnet18_32x32_base_e100_lr0.1_default/s0/best.ckpt' \ + --mark 0 + +############################################ +# alternatively, we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood.py +# especially if you want to get results from +# multiple runs +python scripts/eval_ood.py \ + --id-data cifar100 \ + --root ./results/cifar100_resnet18_32x32_base_e100_lr0.1_default \ + --postprocessor knn \ + --save-score --save-csv diff --git a/OpenOOD/scripts/ood/knn/cifar10_test_ood_knn.sh b/OpenOOD/scripts/ood/knn/cifar10_test_ood_knn.sh new file mode 100644 index 0000000000000000000000000000000000000000..e5f241b98768f9e8c54da9131a47c073790c3988 --- /dev/null +++ b/OpenOOD/scripts/ood/knn/cifar10_test_ood_knn.sh @@ -0,0 +1,35 @@ +#!/bin/bash +# sh scripts/ood/knn/cifar10_test_ood_knn.sh + +# GPU=1 +# CPU=1 +# node=36 +# jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +#srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +#--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +#--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ + +python main.py \ + --config configs/datasets/cifar10/cifar10.yml \ + configs/datasets/cifar10/cifar10_ood.yml \ + configs/networks/resnet18_32x32.yml \ + configs/pipelines/test/test_ood.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/postprocessors/knn.yml \ + --num_workers 8 \ + --network.checkpoint 'results/cifar10_resnet18_32x32_base_e100_lr0.1_default/s0/best.ckpt' \ + --mark 0 + +############################################ +# alternatively, we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood.py +# especially if you want to get results from +# multiple runs +python scripts/eval_ood.py \ + --id-data cifar10 \ + --root ./results/cifar10_resnet18_32x32_base_e100_lr0.1_default \ + --postprocessor knn \ + --save-score --save-csv diff --git a/OpenOOD/scripts/ood/knn/imagenet200_test_ood_knn.sh b/OpenOOD/scripts/ood/knn/imagenet200_test_ood_knn.sh new file mode 100644 index 0000000000000000000000000000000000000000..765be80ccbb325c31551092e752b0c4b4ae38efd --- /dev/null +++ b/OpenOOD/scripts/ood/knn/imagenet200_test_ood_knn.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# sh scripts/ood/ebo/imagenet200_test_ood_knn.sh + +############################################ +# alternatively, we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood.py +# especially if you want to get results from +# multiple runs + +# ood +python scripts/eval_ood.py \ + --id-data imagenet200 \ + --root ./results/imagenet200_resnet18_224x224_base_e90_lr0.1_default \ + --postprocessor knn \ + --save-score --save-csv #--fsood + +# full-spectrum ood +python scripts/eval_ood.py \ + --id-data imagenet200 \ + --root ./results/imagenet200_resnet18_224x224_base_e90_lr0.1_default \ + --postprocessor knn \ + --save-score --save-csv --fsood diff --git a/OpenOOD/scripts/ood/knn/imagenet_test_ood_knn.sh b/OpenOOD/scripts/ood/knn/imagenet_test_ood_knn.sh new file mode 100644 index 0000000000000000000000000000000000000000..058d8879066098d504e5ae4687018e0bb6ea1f4a --- /dev/null +++ b/OpenOOD/scripts/ood/knn/imagenet_test_ood_knn.sh @@ -0,0 +1,47 @@ +#!/bin/bash +# sh scripts/ood/knn/imagenet_test_ood_knn.sh + +GPU=1 +CPU=1 +node=37 +jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ +python main.py \ + --config configs/datasets/imagenet/imagenet.yml \ + configs/datasets/imagenet/imagenet_ood.yml \ + configs/networks/resnet50.yml \ + configs/pipelines/test/test_ood.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/postprocessors/knn.yml \ + --num_workers 4 \ + --ood_dataset.image_size 256 \ + --dataset.test.batch_size 256 \ + --dataset.val.batch_size 256 \ + --network.pretrained True \ + --network.checkpoint 'results/pretrained_weights/resnet50_imagenet1k_v1.pth' \ + --merge_option merge + +############################################ +# we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood_imagenet.py + +# available architectures: +# resnet50, swin-t, vit-b-16 +# ood +python scripts/eval_ood_imagenet.py \ + --tvs-pretrained \ + --arch resnet50 \ + --postprocessor knn \ + --save-score --save-csv #--fsood + +# full-spectrum ood +python scripts/eval_ood_imagenet.py \ + --tvs-pretrained \ + --arch resnet50 \ + --postprocessor knn \ + --save-score --save-csv --fsood diff --git a/OpenOOD/scripts/ood/knn/mnist_test_ood_knn.sh b/OpenOOD/scripts/ood/knn/mnist_test_ood_knn.sh new file mode 100644 index 0000000000000000000000000000000000000000..3d42ede6f577e10be92e369254b7f562ebe7c138 --- /dev/null +++ b/OpenOOD/scripts/ood/knn/mnist_test_ood_knn.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# sh scripts/ood/knn/mnist_test_ood_knn.sh + +# GPU=1 +# CPU=1 +# node=36 +# jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +#srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +#--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +#--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ + +python main.py \ +--config configs/datasets/mnist/mnist.yml \ +configs/datasets/mnist/mnist_ood.yml \ +configs/networks/lenet.yml \ +configs/pipelines/test/test_ood.yml \ +configs/preprocessors/base_preprocessor.yml \ +configs/postprocessors/knn.yml \ +--num_workers 8 \ +--network.checkpoint 'results/checkpoints/mnist_lenet_acc99.30.ckpt' \ +--mark 0 diff --git a/OpenOOD/scripts/ood/knn/mnist_test_osr_knn.sh b/OpenOOD/scripts/ood/knn/mnist_test_osr_knn.sh new file mode 100644 index 0000000000000000000000000000000000000000..032d47a62b2beb23da99bdee91c644b7b54df3b1 --- /dev/null +++ b/OpenOOD/scripts/ood/knn/mnist_test_osr_knn.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# sh scripts/ood/knn/mnist_test_osr_knn.sh + +# GPU=1 +# CPU=1 +# node=36 +# jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +#srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +#--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +#--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ + +python main.py \ +--config configs/datasets/osr_mnist6/mnist6_seed1.yml \ +configs/datasets/osr_mnist6/mnist6_seed1_ood.yml \ +configs/networks/lenet.yml \ +configs/pipelines/test/test_osr.yml \ +configs/preprocessors/base_preprocessor.yml \ +configs/postprocessors/knn.yml \ +--num_workers 8 \ +--network.checkpoint 'results/checkpoints/osr/mnist6_seed1.ckpt' \ +--mark 0 diff --git a/OpenOOD/scripts/ood/knn/sweep_osr.py b/OpenOOD/scripts/ood/knn/sweep_osr.py new file mode 100644 index 0000000000000000000000000000000000000000..f2c679635b80e58f9484ec4530cebaf31c9c7e3a --- /dev/null +++ b/OpenOOD/scripts/ood/knn/sweep_osr.py @@ -0,0 +1,39 @@ +# python scripts/ood/knn/sweep_osr.py +import os + +config = [ + [ + 'osr_cifar6/cifar6_seed1.yml', 'osr_cifar6/cifar6_seed1_ood.yml', + 'resnet18_32x32', 'results/checkpoints/osr/cifar6_seed1.ckpt' + ], + [ + 'osr_cifar50/cifar50_seed1.yml', 'osr_cifar50/cifar50_seed1_ood.yml', + 'resnet18_32x32', 'results/checkpoints/osr/cifar50_seed1.ckpt' + ], + [ + 'osr_tin20/tin20_seed1.yml', 'osr_tin20/tin20_seed1_ood.yml', + 'resnet18_64x64', 'results/checkpoints/osr/tin20_seed1.ckpt' + ], + [ + 'osr_mnist6/mnist6_seed1.yml', 'osr_mnist6/mnist6_seed1_ood.yml', + 'lenet', 'results/checkpoints/osr/mnist6_seed1.ckpt' + ], +] + +for [dataset, ood_dataset, network, pth] in config: + command = (f"PYTHONPATH='.':$PYTHONPATH \ + srun -p dsta --mpi=pmi2 --gres=gpu:1 -n1 \ + --cpus-per-task=1 --ntasks-per-node=1 \ + --kill-on-bad-exit=1 --job-name=openood \ + python main.py \ + --config configs/datasets/{dataset} \ + configs/datasets/{ood_dataset} \ + configs/networks/{network}.yml \ + configs/pipelines/test/test_osr.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/postprocessors/knn.yml \ + --network.pretrained True \ + --network.checkpoint {pth} \ + --num_workers 8 \ + --merge_option merge &") + os.system(command) diff --git a/OpenOOD/scripts/ood/logitnorm/cifar100_test_logitnorm.sh b/OpenOOD/scripts/ood/logitnorm/cifar100_test_logitnorm.sh new file mode 100644 index 0000000000000000000000000000000000000000..de64775e4792eb7334022f62a29024d2fa5c3e60 --- /dev/null +++ b/OpenOOD/scripts/ood/logitnorm/cifar100_test_logitnorm.sh @@ -0,0 +1,14 @@ +#!/bin/bash +# sh scripts/ood/logitnorm/cifar100_test_logitnorm.sh + +############################################ +# alternatively, we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood.py +# especially if you want to get results from +# multiple runs +python scripts/eval_ood.py \ + --id-data cifar100 \ + --root ./results/cifar100_resnet18_32x32_logitnorm_e100_lr0.1_alpha0.04_default \ + --postprocessor msp \ + --save-score --save-csv diff --git a/OpenOOD/scripts/ood/logitnorm/cifar100_train_logitnorm.sh b/OpenOOD/scripts/ood/logitnorm/cifar100_train_logitnorm.sh new file mode 100644 index 0000000000000000000000000000000000000000..ab60a4827d14e9b731ff55cc97ab5a58f2ce4fd1 --- /dev/null +++ b/OpenOOD/scripts/ood/logitnorm/cifar100_train_logitnorm.sh @@ -0,0 +1,9 @@ +#!/bin/bash +# sh scripts/ood/logitnorm/cifar100_train_logitnorm.sh + +python main.py \ + --config configs/datasets/cifar100/cifar100.yml \ + configs/networks/resnet18_32x32.yml \ + configs/pipelines/train/train_logitnorm.yml \ + configs/preprocessors/base_preprocessor.yml \ + --seed 0 diff --git a/OpenOOD/scripts/ood/logitnorm/cifar10_test_logitnorm.sh b/OpenOOD/scripts/ood/logitnorm/cifar10_test_logitnorm.sh new file mode 100644 index 0000000000000000000000000000000000000000..b0296a1bd57600aa6721b404ac5cb1e8464f7081 --- /dev/null +++ b/OpenOOD/scripts/ood/logitnorm/cifar10_test_logitnorm.sh @@ -0,0 +1,14 @@ +#!/bin/bash +# sh scripts/ood/logitnorm/cifar10_test_logitnorm.sh + +############################################ +# alternatively, we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood.py +# especially if you want to get results from +# multiple runs +python scripts/eval_ood.py \ + --id-data cifar10 \ + --root ./results/cifar10_resnet18_32x32_logitnorm_e100_lr0.1_alpha0.04_default \ + --postprocessor msp \ + --save-score --save-csv diff --git a/OpenOOD/scripts/ood/logitnorm/cifar10_train_logitnorm.sh b/OpenOOD/scripts/ood/logitnorm/cifar10_train_logitnorm.sh new file mode 100644 index 0000000000000000000000000000000000000000..b0b24aea63d17dcdc36e5074c13e519b5bd4d70f --- /dev/null +++ b/OpenOOD/scripts/ood/logitnorm/cifar10_train_logitnorm.sh @@ -0,0 +1,9 @@ +#!/bin/bash +# sh scripts/ood/logitnorm/cifar10_train_logitnorm.sh + +python main.py \ + --config configs/datasets/cifar10/cifar10.yml \ + configs/networks/resnet18_32x32.yml \ + configs/pipelines/train/train_logitnorm.yml \ + configs/preprocessors/base_preprocessor.yml \ + --seed 0 diff --git a/OpenOOD/scripts/ood/logitnorm/imagenet200_test_logitnorm.sh b/OpenOOD/scripts/ood/logitnorm/imagenet200_test_logitnorm.sh new file mode 100644 index 0000000000000000000000000000000000000000..4ab9e87de3a9ee2597b5e5adbfeaea6142adccee --- /dev/null +++ b/OpenOOD/scripts/ood/logitnorm/imagenet200_test_logitnorm.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# sh scripts/ood/logitnorm/imagenet200_test_logitnorm.sh + +############################################ +# alternatively, we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood.py +# especially if you want to get results from +# multiple runs + +# ood +python scripts/eval_ood.py \ + --id-data imagenet200 \ + --root ./results/imagenet200_resnet18_224x224_logitnorm_e90_lr0.1_alpha0.04_default \ + --postprocessor msp \ + --save-score --save-csv #--fsood + +# full-spectrum ood +python scripts/eval_ood.py \ + --id-data imagenet200 \ + --root ./results/imagenet200_resnet18_224x224_logitnorm_e90_lr0.1_alpha0.04_default \ + --postprocessor msp \ + --save-score --save-csv --fsood diff --git a/OpenOOD/scripts/ood/logitnorm/imagenet200_train_logitnorm.sh b/OpenOOD/scripts/ood/logitnorm/imagenet200_train_logitnorm.sh new file mode 100644 index 0000000000000000000000000000000000000000..f7bae86811c6617c69b4f4c84f842ef32ba8bd96 --- /dev/null +++ b/OpenOOD/scripts/ood/logitnorm/imagenet200_train_logitnorm.sh @@ -0,0 +1,13 @@ +#!/bin/bash +# sh scripts/ood/logitnorm/imagenet200_train_logitnorm.sh + +python main.py \ + --config configs/datasets/imagenet200/imagenet200.yml \ + configs/networks/resnet18_224x224.yml \ + configs/pipelines/train/train_logitnorm.yml \ + configs/preprocessors/base_preprocessor.yml \ + --optimizer.num_epochs 90 \ + --dataset.train.batch_size 128 \ + --num_gpus 2 --num_workers 16 \ + --merge_option merge \ + --seed 0 diff --git a/OpenOOD/scripts/ood/logitnorm/imagenet_test_logitnorm.sh b/OpenOOD/scripts/ood/logitnorm/imagenet_test_logitnorm.sh new file mode 100644 index 0000000000000000000000000000000000000000..9aa240eb45abb1e59915796aef7a0ef14f0481ab --- /dev/null +++ b/OpenOOD/scripts/ood/logitnorm/imagenet_test_logitnorm.sh @@ -0,0 +1,24 @@ +#!/bin/bash +# sh scripts/ood/logitnorm/imagenet_test_logitnorm.sh + +############################################ +# we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood_imagenet.py + +# available architectures: +# resnet50 +# ood +python scripts/eval_ood_imagenet.py \ + --ckpt-path ./results/imagenet_resnet50_logitnorm_e30_lr0.001_alpha0.04_default/s0/best.ckpt \ + --arch resnet50 \ + --postprocessor msp \ + --save-score --save-csv #--fsood + + +# full-spectrum ood +python scripts/eval_ood_imagenet.py \ + --ckpt-path ./results/imagenet_resnet50_logitnorm_e30_lr0.001_alpha0.04_default/s0/best.ckpt \ + --arch resnet50 \ + --postprocessor msp \ + --save-score --save-csv --fsood diff --git a/OpenOOD/scripts/ood/logitnorm/imagenet_train_logitnorm.sh b/OpenOOD/scripts/ood/logitnorm/imagenet_train_logitnorm.sh new file mode 100644 index 0000000000000000000000000000000000000000..0a1d01b352c6c4866a76641dc9ca7f8b7861a111 --- /dev/null +++ b/OpenOOD/scripts/ood/logitnorm/imagenet_train_logitnorm.sh @@ -0,0 +1,16 @@ +#!/bin/bash +# sh scripts/ood/logitnorm/imagenet_train_logitnorm.sh + +python main.py \ + --config configs/datasets/imagenet/imagenet.yml \ + configs/networks/resnet50.yml \ + configs/pipelines/train/train_logitnorm.yml \ + configs/preprocessors/base_preprocessor.yml \ + --network.pretrained True \ + --network.checkpoint ./results/pretrained_weights/resnet50_imagenet1k_v1.pth \ + --optimizer.lr 0.001 \ + --optimizer.num_epochs 30 \ + --dataset.train.batch_size 128 \ + --num_gpus 2 --num_workers 16 \ + --merge_option merge \ + --seed 0 diff --git a/OpenOOD/scripts/ood/mcd/cifar100_test_mcd.sh b/OpenOOD/scripts/ood/mcd/cifar100_test_mcd.sh new file mode 100644 index 0000000000000000000000000000000000000000..13b4d6bbc350f14090118f6dbebe208ef9b2551b --- /dev/null +++ b/OpenOOD/scripts/ood/mcd/cifar100_test_mcd.sh @@ -0,0 +1,20 @@ +#!/bin/bash +# sh scripts/ood/mcd/cifar100_test_mcd.sh + +# NOTE!!!! +# need to manually change the checkpoint path +# remember to use the last_*.ckpt because mcd only trains for the last 10 epochs +# and the best.ckpt (according to accuracy) is typically not within the last 10 epochs +# therefore using best.ckpt is equivalent to early stopping with standard cross-entropy loss +python main.py \ + --config configs/datasets/cifar100/cifar100.yml \ + configs/datasets/cifar100/cifar100_ood.yml \ + configs/networks/mcd_net.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/pipelines/test/test_ood.yml \ + configs/postprocessors/mcd.yml \ + --network.backbone.name resnet18_32x32 \ + --network.pretrained True \ + --network.checkpoint 'results/cifar100_oe_mcd_mcd_e100_lr0.1_default/s0/last_epoch100_acc0.7510.ckpt' \ + --num_workers 8 \ + --seed 0 diff --git a/OpenOOD/scripts/ood/mcd/cifar100_train_mcd.sh b/OpenOOD/scripts/ood/mcd/cifar100_train_mcd.sh new file mode 100644 index 0000000000000000000000000000000000000000..fd880ea28f4f57427158ac7660c45a0811f402c1 --- /dev/null +++ b/OpenOOD/scripts/ood/mcd/cifar100_train_mcd.sh @@ -0,0 +1,24 @@ +#!/bin/bash +# sh scripts/ood/mcd/cifar100_train_mcd.sh + +GPU=1 +CPU=1 +jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +--kill-on-bad-exit=1 --job-name=${jobname} \ +python main.py \ + --config configs/datasets/cifar100/cifar100.yml \ + configs/datasets/cifar100/cifar100_oe.yml \ + configs/networks/mcd_net.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/pipelines/train/baseline.yml \ + configs/pipelines/train/train_mcd.yml \ + --network.backbone.name resnet18_32x32 \ + --network.pretrained False \ + --dataset.image_size 32 \ + --optimizer.num_epochs 100 \ + --num_workers 8 \ + --seed 0 diff --git a/OpenOOD/scripts/ood/mcd/cifar10_test_mcd.sh b/OpenOOD/scripts/ood/mcd/cifar10_test_mcd.sh new file mode 100644 index 0000000000000000000000000000000000000000..6620e3e46a07b6beb8a21687079d2816129e0a62 --- /dev/null +++ b/OpenOOD/scripts/ood/mcd/cifar10_test_mcd.sh @@ -0,0 +1,20 @@ +#!/bin/bash +# sh scripts/ood/mcd/cifar10_test_mcd.sh + +# NOTE!!!! +# need to manually change the checkpoint path +# remember to use the last_*.ckpt because mcd only trains for the last 10 epochs +# and the best.ckpt (according to accuracy) is typically not within the last 10 epochs +# therefore using best.ckpt is equivalent to early stopping with standard cross-entropy loss +python main.py \ + --config configs/datasets/cifar10/cifar10.yml \ + configs/datasets/cifar10/cifar10_ood.yml \ + configs/networks/mcd_net.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/pipelines/test/test_ood.yml \ + configs/postprocessors/mcd.yml \ + --network.backbone.name resnet18_32x32 \ + --network.pretrained True \ + --network.checkpoint 'results/cifar10_oe_mcd_mcd_e100_lr0.1_default/s0/last_epoch100_acc0.9420.ckpt' \ + --num_workers 8 \ + --seed 0 diff --git a/OpenOOD/scripts/ood/mcd/cifar10_train_mcd.sh b/OpenOOD/scripts/ood/mcd/cifar10_train_mcd.sh new file mode 100644 index 0000000000000000000000000000000000000000..823e9e5e93f28116b86d29dd37c8060f506bfe69 --- /dev/null +++ b/OpenOOD/scripts/ood/mcd/cifar10_train_mcd.sh @@ -0,0 +1,26 @@ +#!/bin/bash +# sh scripts/ood/mcd/cifar10_train_mcd.sh + +GPU=1 +CPU=1 +node=73 +jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +--kill-on-bad-exit=1 --job-name=${jobname} \ +-w SG-IDC1-10-51-2-${node} \ +python main.py \ + --config configs/datasets/cifar10/cifar10.yml \ + configs/datasets/cifar10/cifar10_oe.yml \ + configs/networks/mcd_net.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/pipelines/train/baseline.yml \ + configs/pipelines/train/train_mcd.yml \ + --network.backbone.name resnet18_32x32 \ + --network.pretrained False \ + --dataset.image_size 32 \ + --optimizer.num_epochs 100 \ + --num_workers 8 \ + --seed ${SEED} diff --git a/OpenOOD/scripts/ood/mcd/imagenet200_test_mcd.sh b/OpenOOD/scripts/ood/mcd/imagenet200_test_mcd.sh new file mode 100644 index 0000000000000000000000000000000000000000..de1429fa3afc6f7e9418f7020ce75f446d4a075f --- /dev/null +++ b/OpenOOD/scripts/ood/mcd/imagenet200_test_mcd.sh @@ -0,0 +1,22 @@ +#!/bin/bash +# sh scripts/ood/mcd/imagenet200_test_mcd.sh + +# NOTE!!!! +# need to manually change the checkpoint path +# remember to use the last_*.ckpt because mcd only trains for the last 10 epochs +# and the best.ckpt (according to accuracy) is typically not within the last 10 epochs +# therefore using best.ckpt is equivalent to early stopping with standard cross-entropy loss +SCHEME="ood" # "ood" or "fsood" +python main.py \ + --config configs/datasets/imagenet200/imagenet200.yml \ + configs/datasets/imagenet200/imagenet200_${SCHEME}.yml \ + configs/networks/mcd_net.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/pipelines/test/test_ood.yml \ + configs/postprocessors/mcd.yml \ + --network.backbone.name resnet18_224x224 \ + --network.pretrained True \ + --network.checkpoint 'results/imagenet200_oe_mcd_mcd_e90_lr0.1_default/s0/last_epoch90_acc0.8410.ckpt' \ + --num_workers 8 \ + --evaluator.ood_scheme ${SCHEME} \ + --seed 0 diff --git a/OpenOOD/scripts/ood/mcd/imagenet200_train_mcd.sh b/OpenOOD/scripts/ood/mcd/imagenet200_train_mcd.sh new file mode 100644 index 0000000000000000000000000000000000000000..19bfaa2e8a81e5f0d03604de9ca5cadaba29a8e5 --- /dev/null +++ b/OpenOOD/scripts/ood/mcd/imagenet200_train_mcd.sh @@ -0,0 +1,18 @@ +#!/bin/bash +# sh scripts/ood/mcd/imagenet200_train_mcd.sh + +python main.py \ + --config configs/datasets/imagenet200/imagenet200.yml \ + configs/datasets/imagenet200/imagenet200_oe.yml \ + configs/networks/mcd_net.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/pipelines/train/baseline.yml \ + configs/pipelines/train/train_mcd.yml \ + --network.backbone.name resnet18_224x224 \ + --network.pretrained False \ + --trainer.start_epoch_ft 80 \ + --optimizer.num_epochs 90 \ + --dataset.train.batch_size 128 \ + --num_gpus 2 --num_workers 16 \ + --merge_option merge \ + --seed 0 diff --git a/OpenOOD/scripts/ood/mds/cifar100_test_ood_mds.sh b/OpenOOD/scripts/ood/mds/cifar100_test_ood_mds.sh new file mode 100644 index 0000000000000000000000000000000000000000..9e87afc3d7b3a778ddc4b2110c0f56f83b72c201 --- /dev/null +++ b/OpenOOD/scripts/ood/mds/cifar100_test_ood_mds.sh @@ -0,0 +1,33 @@ +#!/bin/bash +# sh scripts/ood/mds/cifar100_test_ood_mds.sh + +# GPU=1 +# CPU=1 +# node=73 +# jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +# --kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ + +python main.py \ + --config configs/datasets/cifar100/cifar100.yml \ + configs/datasets/cifar100/cifar100_ood.yml \ + configs/networks/resnet18_32x32.yml \ + configs/pipelines/test/test_ood.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/postprocessors/mds.yml \ + --network.checkpoint 'results/cifar100_resnet18_32x32_base_e100_lr0.1_default/s0/best.ckpt' + +############################################ +# alternatively, we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood.py +# especially if you want to get results from +# multiple runs +python scripts/eval_ood.py \ + --id-data cifar100 \ + --root ./results/cifar100_resnet18_32x32_base_e100_lr0.1_default \ + --postprocessor mds \ + --save-score --save-csv diff --git a/OpenOOD/scripts/ood/mds/cifar10_test_ood_mds.sh b/OpenOOD/scripts/ood/mds/cifar10_test_ood_mds.sh new file mode 100644 index 0000000000000000000000000000000000000000..5a1136debd0b29283066d2e00c065779430ca2e2 --- /dev/null +++ b/OpenOOD/scripts/ood/mds/cifar10_test_ood_mds.sh @@ -0,0 +1,36 @@ +#!/bin/bash +# sh scripts/ood/mds/cifar10_test_ood_mds.sh + +# GPU=1 +# CPU=1 +# node=73 +# jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +# --kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ + +python main.py \ + --config configs/datasets/cifar10/cifar10.yml \ + configs/datasets/cifar10/cifar10_ood.yml \ + configs/networks/resnet18_32x32.yml \ + configs/pipelines/test/test_ood.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/postprocessors/mds.yml \ + --num_workers 8 \ + --network.checkpoint 'results/cifar10_resnet18_32x32_base_e100_lr0.1_default/s0/best.ckpt' \ + --mark 1 \ + --postprocessor.postprocessor_args.temperature 1 + +############################################ +# alternatively, we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood.py +# especially if you want to get results from +# multiple runs +python scripts/eval_ood.py \ + --id-data cifar10 \ + --root ./results/cifar10_resnet18_32x32_base_e100_lr0.1_default \ + --postprocessor mds \ + --save-score --save-csv diff --git a/OpenOOD/scripts/ood/mds/imagenet200_test_ood_mds.sh b/OpenOOD/scripts/ood/mds/imagenet200_test_ood_mds.sh new file mode 100644 index 0000000000000000000000000000000000000000..83a222913e95f7048bc9a89050931275b7f012cc --- /dev/null +++ b/OpenOOD/scripts/ood/mds/imagenet200_test_ood_mds.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# sh scripts/ood/mds/imagenet200_test_ood_mds.sh + +############################################ +# alternatively, we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood.py +# especially if you want to get results from +# multiple runs + +# ood +python scripts/eval_ood.py \ + --id-data imagenet200 \ + --root ./results/imagenet200_resnet18_224x224_base_e90_lr0.1_default \ + --postprocessor mds \ + --save-score --save-csv #--fsood + +# full-spectrum ood +python scripts/eval_ood.py \ + --id-data imagenet200 \ + --root ./results/imagenet200_resnet18_224x224_base_e90_lr0.1_default \ + --postprocessor mds \ + --save-score --save-csv --fsood diff --git a/OpenOOD/scripts/ood/mds/imagenet_test_ood_mds.sh b/OpenOOD/scripts/ood/mds/imagenet_test_ood_mds.sh new file mode 100644 index 0000000000000000000000000000000000000000..2d37a120558e93342d255e6d20362841345203fa --- /dev/null +++ b/OpenOOD/scripts/ood/mds/imagenet_test_ood_mds.sh @@ -0,0 +1,47 @@ +#!/bin/bash +# sh scripts/ood/mds/imagenet_test_ood_mds.sh + +GPU=1 +CPU=1 +node=63 +jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ +python main.py \ + --config configs/datasets/imagenet/imagenet.yml \ + configs/datasets/imagenet/imagenet_ood.yml \ + configs/networks/resnet50.yml \ + configs/pipelines/test/test_ood.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/postprocessors/mds.yml \ + --num_workers 4 \ + --ood_dataset.image_size 256 \ + --dataset.test.batch_size 256 \ + --dataset.val.batch_size 256 \ + --network.pretrained True \ + --network.checkpoint 'results/pretrained_weights/resnet50_imagenet1k_v1.pth' \ + --merge_option merge + +############################################ +# we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood_imagenet.py + +# available architectures: +# resnet50, swin-t, vit-b-16 +# ood +python scripts/eval_ood_imagenet.py \ + --tvs-pretrained \ + --arch resnet50 \ + --postprocessor mds \ + --save-score --save-csv #--fsood + +# full-spectrum ood +python scripts/eval_ood_imagenet.py \ + --tvs-pretrained \ + --arch resnet50 \ + --postprocessor mds \ + --save-score --save-csv --fsood diff --git a/OpenOOD/scripts/ood/mds_ensemble/cifar100_test_ood_mds_ensemble.sh b/OpenOOD/scripts/ood/mds_ensemble/cifar100_test_ood_mds_ensemble.sh new file mode 100644 index 0000000000000000000000000000000000000000..6bf3b25749c6e6e7423e8af13cbcdc818b05a694 --- /dev/null +++ b/OpenOOD/scripts/ood/mds_ensemble/cifar100_test_ood_mds_ensemble.sh @@ -0,0 +1,35 @@ +#!/bin/bash +# sh scripts/ood/mds_ensemble/cifar100_test_ood_mds_ensemble.sh + +# GPU=1 +# CPU=1 +# node=73 +# jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +# --kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ + +python main.py \ + --config configs/datasets/cifar100/cifar100.yml \ + configs/datasets/cifar100/cifar100_ood.yml \ + configs/networks/resnet18_32x32.yml \ + configs/pipelines/test/test_ood.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/postprocessors/mds_ensemble.yml \ + --num_workers 8 \ + --network.checkpoint 'results/cifar100_resnet18_32x32_base_e100_lr0.1_default/s0/best.ckpt' \ + --mark 0 + +############################################ +# alternatively, we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood.py +# especially if you want to get results from +# multiple runs +python scripts/eval_ood.py \ + --id-data cifar100 \ + --root ./results/cifar100_resnet18_32x32_base_e100_lr0.1_default \ + --postprocessor mds_ensemble \ + --save-score --save-csv diff --git a/OpenOOD/scripts/ood/mds_ensemble/cifar10_test_ood_mds_ensemble.sh b/OpenOOD/scripts/ood/mds_ensemble/cifar10_test_ood_mds_ensemble.sh new file mode 100644 index 0000000000000000000000000000000000000000..04574b8e548964f91affc642f46bc236d8cac74a --- /dev/null +++ b/OpenOOD/scripts/ood/mds_ensemble/cifar10_test_ood_mds_ensemble.sh @@ -0,0 +1,35 @@ +#!/bin/bash +# sh scripts/ood/mds_ensemble/cifar10_test_ood_mds_ensemble.sh + +# GPU=1 +# CPU=1 +# node=73 +# jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +# --kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ + +python main.py \ + --config configs/datasets/cifar10/cifar10.yml \ + configs/datasets/cifar10/cifar10_ood.yml \ + configs/networks/resnet18_32x32.yml \ + configs/pipelines/test/test_ood.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/postprocessors/mds_ensemble.yml \ + --num_workers 8 \ + --network.checkpoint 'results/cifar10_resnet18_32x32_base_e100_lr0.1_default/s0/best.ckpt' \ + --mark 0 + +############################################ +# alternatively, we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood.py +# especially if you want to get results from +# multiple runs +python scripts/eval_ood.py \ + --id-data cifar10 \ + --root ./results/cifar10_resnet18_32x32_base_e100_lr0.1_default \ + --postprocessor mds_ensemble \ + --save-score --save-csv diff --git a/OpenOOD/scripts/ood/mds_ensemble/imagenet200_test_ood_mds_ensemble.sh b/OpenOOD/scripts/ood/mds_ensemble/imagenet200_test_ood_mds_ensemble.sh new file mode 100644 index 0000000000000000000000000000000000000000..f4be686d5af1703c73a1e5bb9fdd670fbf4cf0e9 --- /dev/null +++ b/OpenOOD/scripts/ood/mds_ensemble/imagenet200_test_ood_mds_ensemble.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# sh scripts/ood/mds_ensemble/imagenet200_test_ood_mds_ensemble.sh + +############################################ +# alternatively, we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood.py +# especially if you want to get results from +# multiple runs + +# ood +python scripts/eval_ood.py \ + --id-data imagenet200 \ + --root ./results/imagenet200_resnet18_224x224_base_e90_lr0.1_default \ + --postprocessor mds_ensemble \ + --save-score --save-csv #--fsood + +# full-spectrum ood +python scripts/eval_ood.py \ + --id-data imagenet200 \ + --root ./results/imagenet200_resnet18_224x224_base_e90_lr0.1_default \ + --postprocessor mds_ensemble \ + --save-score --save-csv --fsood diff --git a/OpenOOD/scripts/ood/mds_ensemble/imagenet_test_ood_mds.sh b/OpenOOD/scripts/ood/mds_ensemble/imagenet_test_ood_mds.sh new file mode 100644 index 0000000000000000000000000000000000000000..5c5a74544068b26403e4199326f1bdc44e24a7a9 --- /dev/null +++ b/OpenOOD/scripts/ood/mds_ensemble/imagenet_test_ood_mds.sh @@ -0,0 +1,47 @@ +#!/bin/bash +# sh scripts/ood/mds_ensemble/imagenet_test_ood_mds_ensemble.sh + +GPU=1 +CPU=1 +node=39 +jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ +python main.py \ + --config configs/datasets/imagenet/imagenet.yml \ + configs/datasets/imagenet/imagenet_ood.yml \ + configs/networks/resnet50.yml \ + configs/pipelines/test/test_ood.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/postprocessors/mds_ensemble.yml \ + --num_workers 4 \ + --ood_dataset.image_size 256 \ + --dataset.test.batch_size 256 \ + --dataset.val.batch_size 256 \ + --network.pretrained True \ + --network.checkpoint 'results/pretrained_weights/resnet50_imagenet1k_v1.pth' \ + --merge_option merge + +############################################ +# we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood_imagenet.py + +# available architectures: +# resnet50 +# ood +python scripts/eval_ood_imagenet.py \ + --tvs-pretrained \ + --arch resnet50 \ + --postprocessor mds_ensemble \ + --save-score --save-csv #--fsood + +# full-spectrum ood +python scripts/eval_ood_imagenet.py \ + --tvs-pretrained \ + --arch resnet50 \ + --postprocessor mds_ensemble \ + --save-score --save-csv --fsood diff --git a/OpenOOD/scripts/ood/mds_ensemble/mnist_test_ood_mds_ensemble.sh b/OpenOOD/scripts/ood/mds_ensemble/mnist_test_ood_mds_ensemble.sh new file mode 100644 index 0000000000000000000000000000000000000000..df4f056c49cf31e7899606371df44deac4f38d29 --- /dev/null +++ b/OpenOOD/scripts/ood/mds_ensemble/mnist_test_ood_mds_ensemble.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# sh scripts/ood/mds_ensemble/mnist_test_ood_mds_ensemble.sh + +# GPU=1 +# CPU=1 +# node=73 +# jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +# --kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ + +python main.py \ + --config configs/datasets/mnist/mnist.yml \ + configs/datasets/mnist/mnist_ood.yml \ + configs/networks/lenet.yml \ + configs/pipelines/test/test_ood.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/postprocessors/mds_ensemble.yml \ + --num_workers 8 \ + --network.checkpoint 'results/checkpoints/mnist_lenet_acc99.30.ckpt' \ + --mark 0 diff --git a/OpenOOD/scripts/ood/mds_ensemble/mnist_test_osr_mds_ensemble.sh b/OpenOOD/scripts/ood/mds_ensemble/mnist_test_osr_mds_ensemble.sh new file mode 100644 index 0000000000000000000000000000000000000000..1dd000a223cdef955ca052bdc3f8a057bdc56ad1 --- /dev/null +++ b/OpenOOD/scripts/ood/mds_ensemble/mnist_test_osr_mds_ensemble.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# sh scripts/ood/mds_ensemble/mnist_test_osr_mds_ensemble.sh + +# GPU=1 +# CPU=1 +# node=73 +# jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +# --kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ + +python main.py \ + --config configs/datasets/osr_mnist6/mnist6_seed1.yml \ + configs/datasets/osr_mnist6/mnist6_seed1_ood.yml \ + configs/networks/lenet.yml \ + configs/pipelines/test/test_osr.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/postprocessors/mds_ensemble.yml \ + --num_workers 8 \ + --network.checkpoint 'results/checkpoints/osr/mnist6_seed1.ckpt' \ + --mark 0 diff --git a/OpenOOD/scripts/ood/mds_ensemble/sweep_osr.py b/OpenOOD/scripts/ood/mds_ensemble/sweep_osr.py new file mode 100644 index 0000000000000000000000000000000000000000..a824fcc634d41dff4db36dfcfbe504bfb583390e --- /dev/null +++ b/OpenOOD/scripts/ood/mds_ensemble/sweep_osr.py @@ -0,0 +1,39 @@ +# python scripts/ood/mds/sweep_osr.py +import os + +config = [ + [ + 'osr_cifar6/cifar6_seed1.yml', 'osr_cifar6/cifar6_seed1_ood.yml', + 'resnet18_32x32', 'results/checkpoints/osr/cifar6_seed1.ckpt' + ], + [ + 'osr_cifar50/cifar50_seed1.yml', 'osr_cifar50/cifar50_seed1_ood.yml', + 'resnet18_32x32', 'results/checkpoints/osr/cifar50_seed1.ckpt' + ], + [ + 'osr_tin20/tin20_seed1.yml', 'osr_tin20/tin20_seed1_ood.yml', + 'resnet18_64x64', 'results/checkpoints/osr/tin20_seed1.ckpt' + ], + [ + 'osr_mnist6/mnist6_seed1.yml', 'osr_mnist6/mnist6_seed1_ood.yml', + 'lenet', 'results/checkpoints/osr/mnist6_seed1.ckpt' + ], +] + +for [dataset, ood_dataset, network, pth] in config: + command = (f"PYTHONPATH='.':$PYTHONPATH \ + srun -p dsta --mpi=pmi2 --gres=gpu:1 -n1 \ + --cpus-per-task=1 --ntasks-per-node=1 \ + --kill-on-bad-exit=1 --job-name=openood \ + python main.py \ + --config configs/datasets/{dataset} \ + configs/datasets/{ood_dataset} \ + configs/networks/{network}.yml \ + configs/pipelines/test/test_osr.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/postprocessors/mds.yml \ + --network.pretrained True \ + --network.checkpoint {pth} \ + --num_workers 8 \ + --merge_option merge &") + os.system(command) diff --git a/OpenOOD/scripts/ood/mixoe/cifar100_test_mixoe.sh b/OpenOOD/scripts/ood/mixoe/cifar100_test_mixoe.sh new file mode 100644 index 0000000000000000000000000000000000000000..426c3ebe31c06967f268691112681bf2084b6a32 --- /dev/null +++ b/OpenOOD/scripts/ood/mixoe/cifar100_test_mixoe.sh @@ -0,0 +1,14 @@ +#!/bin/bash +# sh scripts/ood/mixoe/cifar100_test_mixoe.sh + +############################################ +# alternatively, we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood.py +# especially if you want to get results from +# multiple runs +python scripts/eval_ood.py \ + --id-data cifar100 \ + --root ./results/cifar100_oe_resnet18_32x32_mixoe_e10_lr0.001_alpha0.1_beta1.0_cutmix_lam1.0_default \ + --postprocessor msp \ + --save-score --save-csv diff --git a/OpenOOD/scripts/ood/mixoe/cifar100_train_mixoe.sh b/OpenOOD/scripts/ood/mixoe/cifar100_train_mixoe.sh new file mode 100644 index 0000000000000000000000000000000000000000..b132491c77879ab7e07502f0c6b432e736d5ab5c --- /dev/null +++ b/OpenOOD/scripts/ood/mixoe/cifar100_train_mixoe.sh @@ -0,0 +1,18 @@ +#!/bin/bash +# sh scripts/ood/mixoe/cifar100_train_mixoe.sh + +SEED=0 +python main.py \ + --config configs/datasets/cifar100/cifar100.yml \ + configs/datasets/cifar100/cifar100_oe.yml \ + configs/networks/resnet18_32x32.yml \ + configs/pipelines/train/baseline.yml \ + configs/pipelines/train/train_mixoe.yml \ + configs/preprocessors/base_preprocessor.yml \ + --network.pretrained True \ + --network.checkpoint ./results/cifar100_resnet18_32x32_base_e100_lr0.1_default/s${SEED}/best.ckpt \ + --optimizer.lr 0.001 \ + --optimizer.num_epochs 10 \ + --dataset.train.batch_size 128 \ + --dataset.oe.batch_size 128 \ + --seed ${SEED} diff --git a/OpenOOD/scripts/ood/mixoe/cifar10_test_mixoe.sh b/OpenOOD/scripts/ood/mixoe/cifar10_test_mixoe.sh new file mode 100644 index 0000000000000000000000000000000000000000..863c28aecfee8cff3b84d27548d312fa9d2ee351 --- /dev/null +++ b/OpenOOD/scripts/ood/mixoe/cifar10_test_mixoe.sh @@ -0,0 +1,14 @@ +#!/bin/bash +# sh scripts/ood/mixoe/cifar10_test_mixoe.sh + +############################################ +# alternatively, we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood.py +# especially if you want to get results from +# multiple runs +python scripts/eval_ood.py \ + --id-data cifar10 \ + --root ./results/cifar10_oe_resnet18_32x32_mixoe_e10_lr0.001_alpha0.1_beta1.0_cutmix_lam1.0_default \ + --postprocessor msp \ + --save-score --save-csv diff --git a/OpenOOD/scripts/ood/mixoe/cifar10_train_mixoe.sh b/OpenOOD/scripts/ood/mixoe/cifar10_train_mixoe.sh new file mode 100644 index 0000000000000000000000000000000000000000..02fe440b6ebe09f8b2db5c02d80baf143ac7de06 --- /dev/null +++ b/OpenOOD/scripts/ood/mixoe/cifar10_train_mixoe.sh @@ -0,0 +1,18 @@ +#!/bin/bash +# sh scripts/ood/mixoe/cifar10_train_mixoe.sh + +SEED=0 +python main.py \ + --config configs/datasets/cifar10/cifar10.yml \ + configs/datasets/cifar10/cifar10_oe.yml \ + configs/networks/resnet18_32x32.yml \ + configs/pipelines/train/baseline.yml \ + configs/pipelines/train/train_mixoe.yml \ + configs/preprocessors/base_preprocessor.yml \ + --network.pretrained True \ + --network.checkpoint ./results/cifar10_resnet18_32x32_base_e100_lr0.1_default/s${SEED}/best.ckpt \ + --optimizer.lr 0.001 \ + --optimizer.num_epochs 10 \ + --dataset.train.batch_size 128 \ + --dataset.oe.batch_size 128 \ + --seed ${SEED} diff --git a/OpenOOD/scripts/ood/mixoe/imagenet200_test_mixoe.sh b/OpenOOD/scripts/ood/mixoe/imagenet200_test_mixoe.sh new file mode 100644 index 0000000000000000000000000000000000000000..e3b2d254988c451e365da30f94d6aca10aaeffb2 --- /dev/null +++ b/OpenOOD/scripts/ood/mixoe/imagenet200_test_mixoe.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# sh scripts/ood/mixoe/imagenet200_test_mixoe.sh + +############################################ +# alternatively, we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood.py +# especially if you want to get results from +# multiple runs + +# ood +python scripts/eval_ood.py \ + --id-data imagenet200 \ + --root ./results/imagenet200_oe_resnet18_224x224_mixoe_e10_lr0.001_alpha0.1_beta1.0_cutmix_lam1.0_default \ + --postprocessor msp \ + --save-score --save-csv #--fsood + +# full-spectrum ood +python scripts/eval_ood.py \ + --id-data imagenet200 \ + --root ./results/imagenet200_oe_resnet18_224x224_mixoe_e10_lr0.001_alpha0.1_beta1.0_cutmix_lam1.0_default \ + --postprocessor msp \ + --save-score --save-csv --fsood diff --git a/OpenOOD/scripts/ood/mixoe/imagnet200_train_mixoe.sh b/OpenOOD/scripts/ood/mixoe/imagnet200_train_mixoe.sh new file mode 100644 index 0000000000000000000000000000000000000000..c75a71e61663762dfd474a26688af5b25b9b8564 --- /dev/null +++ b/OpenOOD/scripts/ood/mixoe/imagnet200_train_mixoe.sh @@ -0,0 +1,19 @@ +#!/bin/bash +# sh scripts/ood/mixoe/imagenet200_train_mixoe.sh + +SEED=0 +python main.py \ + --config configs/datasets/imagenet200/imagenet200.yml \ + configs/datasets/imagenet200/imagenet200_oe.yml \ + configs/networks/resnet18_224x224.yml \ + configs/pipelines/train/baseline.yml \ + configs/pipelines/train/train_mixoe.yml \ + configs/preprocessors/base_preprocessor.yml \ + --network.pretrained True \ + --network.checkpoint ./results/imagenet200_resnet18_224x224_base_e90_lr0.1_default/s${SEED}/best.ckpt \ + --optimizer.lr 0.001 \ + --optimizer.num_epochs 10 \ + --dataset.train.batch_size 128 \ + --num_gpus 2 --num_workers 16 \ + --merge_option merge \ + --seed ${SEED} diff --git a/OpenOOD/scripts/ood/mls/cifar100_test_ood_maxlogit.sh b/OpenOOD/scripts/ood/mls/cifar100_test_ood_maxlogit.sh new file mode 100644 index 0000000000000000000000000000000000000000..4a909a8bdb9739d4a4ea26f2af7a7713284028d1 --- /dev/null +++ b/OpenOOD/scripts/ood/mls/cifar100_test_ood_maxlogit.sh @@ -0,0 +1,35 @@ +#!/bin/bash +# sh scripts/ood/mls/cifar100_test_ood_maxlogit.sh + +# GPU=1 +# CPU=1 +# node=73 +# jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +# srun -p mediasuper -x SZ-IDC1-10-112-2-17 --gres=gpu:${GPU} \ +# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +# --kill-on-bad-exit=1 --job-name=${jobname} \ + +python main.py \ + --config configs/datasets/cifar100/cifar100.yml \ + configs/datasets/cifar100/cifar100_ood.yml \ + configs/networks/resnet18_32x32.yml \ + configs/pipelines/test/test_ood.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/postprocessors/mls.yml \ + --num_workers 8 \ + --network.checkpoint 'results/cifar100_resnet18_32x32_base_e100_lr0.1_default/s0/best.ckpt' \ + --mark 0 + +############################################ +# alternatively, we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood.py +# especially if you want to get results from +# multiple runs +python scripts/eval_ood.py \ + --id-data cifar100 \ + --root ./results/cifar100_resnet18_32x32_base_e100_lr0.1_default \ + --postprocessor mls \ + --save-score --save-csv diff --git a/OpenOOD/scripts/ood/mls/cifar10_test_ood_maxlogit.sh b/OpenOOD/scripts/ood/mls/cifar10_test_ood_maxlogit.sh new file mode 100644 index 0000000000000000000000000000000000000000..a8897bed8bd818409c5e2a406e4a65e425e9e6ec --- /dev/null +++ b/OpenOOD/scripts/ood/mls/cifar10_test_ood_maxlogit.sh @@ -0,0 +1,35 @@ +#!/bin/bash +# sh scripts/ood/mls/cifar10_test_ood_maxlogit.sh + +# GPU=1 +# CPU=1 +# node=73 +# jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +# srun -p mediasuper -x SZ-IDC1-10-112-2-17 --gres=gpu:${GPU} \ +# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +# --kill-on-bad-exit=1 --job-name=${jobname} \ + +python main.py \ + --config configs/datasets/cifar10/cifar10.yml \ + configs/datasets/cifar10/cifar10_ood.yml \ + configs/networks/resnet18_32x32.yml \ + configs/pipelines/test/test_ood.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/postprocessors/mls.yml \ + --num_workers 8 \ + --network.checkpoint 'results/cifar10_resnet18_32x32_base_e100_lr0.1_default/s0/best.ckpt' \ + --mark 0 + +############################################ +# alternatively, we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood.py +# especially if you want to get results from +# multiple runs +python scripts/eval_ood.py \ + --id-data cifar10 \ + --root ./results/cifar10_resnet18_32x32_base_e100_lr0.1_default \ + --postprocessor mls \ + --save-score --save-csv diff --git a/OpenOOD/scripts/ood/mls/imagenet200_test_ood_maxlogit.sh b/OpenOOD/scripts/ood/mls/imagenet200_test_ood_maxlogit.sh new file mode 100644 index 0000000000000000000000000000000000000000..bcfca8cc02919d748272d5e020b53ed68fbe67e2 --- /dev/null +++ b/OpenOOD/scripts/ood/mls/imagenet200_test_ood_maxlogit.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# sh scripts/ood/mls/imagenet200_test_ood_maxlogit.sh + +############################################ +# alternatively, we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood.py +# especially if you want to get results from +# multiple runs + +# ood +python scripts/eval_ood.py \ + --id-data imagenet200 \ + --root ./results/imagenet200_resnet18_224x224_base_e90_lr0.1_default \ + --postprocessor mls \ + --save-score --save-csv #--fsood + +# full-spectrum ood +python scripts/eval_ood.py \ + --id-data imagenet200 \ + --root ./results/imagenet200_resnet18_224x224_base_e90_lr0.1_default \ + --postprocessor mls \ + --save-score --save-csv --fsood diff --git a/OpenOOD/scripts/ood/mls/imagenet_test_ood_maxlogit.sh b/OpenOOD/scripts/ood/mls/imagenet_test_ood_maxlogit.sh new file mode 100644 index 0000000000000000000000000000000000000000..d63911837aad8eac78196e1c84f42eff3b3cdfaf --- /dev/null +++ b/OpenOOD/scripts/ood/mls/imagenet_test_ood_maxlogit.sh @@ -0,0 +1,47 @@ +#!/bin/bash +# sh scripts/ood/mls/imagenet_test_ood_maxlogit.sh + +GPU=1 +CPU=1 +node=39 +jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ +python main.py \ + --config configs/datasets/imagenet/imagenet.yml \ + configs/datasets/imagenet/imagenet_ood.yml \ + configs/networks/resnet50.yml \ + configs/pipelines/test/test_ood.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/postprocessors/mls.yml \ + --num_workers 4 \ + --ood_dataset.image_size 256 \ + --dataset.test.batch_size 256 \ + --dataset.val.batch_size 256 \ + --network.pretrained True \ + --network.checkpoint 'results/pretrained_weights/resnet50_imagenet1k_v1.pth' \ + --merge_option merge + +############################################ +# we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood_imagenet.py + +# available architectures: +# resnet50, swin-t, vit-b-16 +# ood +python scripts/eval_ood_imagenet.py \ + --tvs-pretrained \ + --arch resnet50 \ + --postprocessor mls \ + --save-score --save-csv #--fsood + +# full-spectrum ood +python scripts/eval_ood_imagenet.py \ + --tvs-pretrained \ + --arch resnet50 \ + --postprocessor mls \ + --save-score --save-csv --fsood diff --git a/OpenOOD/scripts/ood/mls/mnist_test_ood_maxlogit.sh b/OpenOOD/scripts/ood/mls/mnist_test_ood_maxlogit.sh new file mode 100644 index 0000000000000000000000000000000000000000..988f01c1235ab7e0ac0135bd0be488844496ae68 --- /dev/null +++ b/OpenOOD/scripts/ood/mls/mnist_test_ood_maxlogit.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# sh scripts/ood/mls/mnist_test_ood_maxlogit.sh + +# GPU=1 +# CPU=1 +# node=73 +# jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +# srun -p mediasuper -x SZ-IDC1-10-112-2-17 --gres=gpu:${GPU} \ +# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +# --kill-on-bad-exit=1 --job-name=${jobname} \ + +python main.py \ +--config configs/datasets/mnist/mnist.yml \ +configs/datasets/mnist/mnist_ood.yml \ +configs/networks/lenet.yml \ +configs/pipelines/test/test_ood.yml \ +configs/preprocessors/base_preprocessor.yml \ +configs/postprocessors/mls.yml \ +--num_workers 8 \ +--network.checkpoint 'results/checkpoints/mnist_lenet_acc99.30.ckpt' \ +--mark 0 diff --git a/OpenOOD/scripts/ood/mls/mnist_test_osr_maxlogit.sh b/OpenOOD/scripts/ood/mls/mnist_test_osr_maxlogit.sh new file mode 100644 index 0000000000000000000000000000000000000000..4793eb874e1d792cbc0a3c29edcda565e412e6fd --- /dev/null +++ b/OpenOOD/scripts/ood/mls/mnist_test_osr_maxlogit.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# sh scripts/ood/mls/mnist_test_osr_maxlogit.sh + +# GPU=1 +# CPU=1 +# node=73 +# jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +# srun -p mediasuper -x SZ-IDC1-10-112-2-17 --gres=gpu:${GPU} \ +# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +# --kill-on-bad-exit=1 --job-name=${jobname} \ + +python main.py \ +--config configs/datasets/osr_mnist6/mnist6_seed1.yml \ +configs/datasets/osr_mnist6/mnist6_seed1_ood.yml \ +configs/networks/lenet.yml \ +configs/pipelines/test/test_osr.yml \ +configs/preprocessors/base_preprocessor.yml \ +configs/postprocessors/mls.yml \ +--num_workers 8 \ +--network.checkpoint 'results/checkpoints/osr/mnist6_seed1.ckpt' \ +--mark 0 diff --git a/OpenOOD/scripts/ood/mls/sweep_osr.py b/OpenOOD/scripts/ood/mls/sweep_osr.py new file mode 100644 index 0000000000000000000000000000000000000000..5f11f1d9a2de4e8fdfaea9e1362dd7b4b42ba0d3 --- /dev/null +++ b/OpenOOD/scripts/ood/mls/sweep_osr.py @@ -0,0 +1,39 @@ +# python scripts/ood/mls/sweep_osr.py +import os + +config = [ + [ + 'osr_cifar6/cifar6_seed1.yml', 'osr_cifar6/cifar6_seed1_ood.yml', + 'resnet18_32x32', 'results/checkpoints/osr/cifar6_seed1.ckpt' + ], + [ + 'osr_cifar50/cifar50_seed1.yml', 'osr_cifar50/cifar50_seed1_ood.yml', + 'resnet18_32x32', 'results/checkpoints/osr/cifar50_seed1.ckpt' + ], + [ + 'osr_tin20/tin20_seed1.yml', 'osr_tin20/tin20_seed1_ood.yml', + 'resnet18_64x64', 'results/checkpoints/osr/tin20_seed1.ckpt' + ], + [ + 'osr_mnist6/mnist6_seed1.yml', 'osr_mnist6/mnist6_seed1_ood.yml', + 'lenet', 'results/checkpoints/osr/mnist6_seed1.ckpt' + ], +] + +for [dataset, ood_dataset, network, pth] in config: + command = (f"PYTHONPATH='.':$PYTHONPATH \ + srun -p dsta --mpi=pmi2 --gres=gpu:1 -n1 \ + --cpus-per-task=1 --ntasks-per-node=1 \ + --kill-on-bad-exit=1 --job-name=openood \ + python main.py \ + --config configs/datasets/{dataset} \ + configs/datasets/{ood_dataset} \ + configs/networks/{network}.yml \ + configs/pipelines/test/test_osr.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/postprocessors/mls.yml \ + --network.pretrained True \ + --network.checkpoint {pth} \ + --num_workers 8 \ + --merge_option merge &") + os.system(command) diff --git a/OpenOOD/scripts/ood/mos/cifar100_test_mos.sh b/OpenOOD/scripts/ood/mos/cifar100_test_mos.sh new file mode 100644 index 0000000000000000000000000000000000000000..829ea7585066e55e257b66e4b2ca5eb3d3b167aa --- /dev/null +++ b/OpenOOD/scripts/ood/mos/cifar100_test_mos.sh @@ -0,0 +1,22 @@ +#!/bin/bash +# sh scripts/ood/mos/cifar100_test_mos.sh + +GPU=1 +CPU=1 +node=73 +jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ + +SEED=0 +python main.py \ + --config configs/datasets/cifar100/cifar100_double_label.yml \ + configs/datasets/cifar100/cifar100_ood.yml \ + configs/networks/resnet18_32x32.yml \ + configs/pipelines/test/test_mos.yml \ + configs/postprocessors/mos.yml \ + configs/preprocessors/base_preprocessor.yml \ + --network.pretrained True \ + --network.checkpoint results/cifar100_double_label_resnet18_32x32_mos_e30_lr0.003/s${SEED}/best.ckpt \ + --num_workers 8 \ + --seed ${SEED} diff --git a/OpenOOD/scripts/ood/mos/cifar100_train_mos.sh b/OpenOOD/scripts/ood/mos/cifar100_train_mos.sh new file mode 100644 index 0000000000000000000000000000000000000000..1717cc51db0fcbe2559cdbe5de606805b05b822c --- /dev/null +++ b/OpenOOD/scripts/ood/mos/cifar100_train_mos.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# sh scripts/ood/mos/cifar100_train_mos.sh + +# GPU=1 +# CPU=0 + + +# node=73 +# jobname=openood + +# PYTHONPATH='.':$PYTHONPATH \ + +SEED=0 +python main.py \ + --config configs/datasets/cifar100/cifar100_double_label.yml \ + configs/networks/resnet18_32x32.yml \ + configs/pipelines/train/train_mos.yml \ + configs/preprocessors/base_preprocessor.yml \ + --network.pretrained True \ + --network.checkpoint ./results/cifar100_resnet18_32x32_base_e100_lr0.1_default/s${SEED}/best.ckpt \ + --optimizer.num_epochs 30 \ + --merge_option merge \ + --seed ${SEED} diff --git a/OpenOOD/scripts/ood/mos/cifar10_test_mos.sh b/OpenOOD/scripts/ood/mos/cifar10_test_mos.sh new file mode 100644 index 0000000000000000000000000000000000000000..38360d3e90c036c011c2e5246faf5ca261bb143d --- /dev/null +++ b/OpenOOD/scripts/ood/mos/cifar10_test_mos.sh @@ -0,0 +1,22 @@ +#!/bin/bash +# sh scripts/ood/mos/cifar10_test_mos.sh + +GPU=1 +CPU=1 +node=73 +jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ + +SEED=0 +python main.py \ + --config configs/datasets/cifar10/cifar10_double_label.yml \ + configs/datasets/cifar10/cifar10_ood.yml \ + configs/networks/resnet18_32x32.yml \ + configs/pipelines/test/test_mos.yml \ + configs/postprocessors/mos.yml \ + configs/preprocessors/base_preprocessor.yml \ + --network.pretrained True \ + --network.checkpoint results/cifar10_double_label_resnet18_32x32_mos_e30_lr0.003/s${SEED}/best.ckpt \ + --num_workers 8 \ + --seed ${SEED} diff --git a/OpenOOD/scripts/ood/mos/cifar10_train_mos.sh b/OpenOOD/scripts/ood/mos/cifar10_train_mos.sh new file mode 100644 index 0000000000000000000000000000000000000000..be3d18c2399c6816ef74d54ba9fbb2fe757bdae6 --- /dev/null +++ b/OpenOOD/scripts/ood/mos/cifar10_train_mos.sh @@ -0,0 +1,21 @@ +#!/bin/bash +# sh scripts/ood/mos/cifar10_train_mos.sh + +# GPU=1 +# CPU=1 +# node=73 +# jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ + +SEED=0 +python main.py \ + --config configs/datasets/cifar10/cifar10_double_label.yml \ + configs/networks/resnet18_32x32.yml \ + configs/pipelines/train/train_mos.yml \ + configs/preprocessors/base_preprocessor.yml \ + --network.pretrained True \ + --network.checkpoint ./results/cifar10_resnet18_32x32_base_e100_lr0.1_default/s${SEED}/best.ckpt \ + --optimizer.num_epochs 30 \ + --merge_option merge \ + --seed ${SEED} diff --git a/OpenOOD/scripts/ood/mos/imagenet200_test_mos.sh b/OpenOOD/scripts/ood/mos/imagenet200_test_mos.sh new file mode 100644 index 0000000000000000000000000000000000000000..c66b9330d86c9a0d04d51ca05c1542be9f33c676 --- /dev/null +++ b/OpenOOD/scripts/ood/mos/imagenet200_test_mos.sh @@ -0,0 +1,31 @@ +#!/bin/bash +# sh scripts/ood/mos/imagenet200_test_mos.sh + +SEED=0 + +# ood +python main.py \ + --config configs/datasets/imagenet200/imagenet200_double_label.yml \ + configs/datasets/imagenet200/imagenet200_ood.yml \ + configs/networks/resnet18_224x224.yml \ + configs/pipelines/test/test_mos.yml \ + configs/postprocessors/mos.yml \ + configs/preprocessors/base_preprocessor.yml \ + --network.pretrained True \ + --network.checkpoint results/imagenet200_double_label_resnet18_224x224_mos_e10_lr0.003/s${SEED}/best.ckpt \ + --num_workers 8 \ + --seed ${SEED} + +# full-spectrum ood +python main.py \ + --config configs/datasets/imagenet200/imagenet200_double_label.yml \ + configs/datasets/imagenet200/imagenet200_double_label_fsood.yml \ + configs/networks/resnet18_224x224.yml \ + configs/pipelines/test/test_mos.yml \ + configs/postprocessors/mos.yml \ + configs/preprocessors/base_preprocessor.yml \ + --network.pretrained True \ + --network.checkpoint results/imagenet200_double_label_resnet18_224x224_mos_e10_lr0.003/s${SEED}/best.ckpt \ + --evaluator.ood_scheme fsood \ + --num_workers 8 \ + --seed ${SEED} diff --git a/OpenOOD/scripts/ood/mos/imagenet200_train_mos.sh b/OpenOOD/scripts/ood/mos/imagenet200_train_mos.sh new file mode 100644 index 0000000000000000000000000000000000000000..940d4968da637f103bd1d0e831b7dc3044e0dc87 --- /dev/null +++ b/OpenOOD/scripts/ood/mos/imagenet200_train_mos.sh @@ -0,0 +1,16 @@ +#!/bin/bash +# sh scripts/ood/mos/imagenet200_train_mos.sh + +SEED=0 +python main.py \ + --config configs/datasets/imagenet200/imagenet200_double_label.yml \ + configs/networks/resnet18_224x224.yml \ + configs/pipelines/train/train_mos.yml \ + configs/preprocessors/base_preprocessor.yml \ + --network.pretrained True \ + --network.checkpoint ./results/imagenet200_resnet18_224x224_base_e90_lr0.1_default/s${SEED}/best.ckpt \ + --optimizer.num_epochs 10 \ + --dataset.train.batch_size 128 \ + --num_gpus 2 --num_workers 16 \ + --merge_option merge \ + --seed ${SEED} diff --git a/OpenOOD/scripts/ood/mos/imagenet_test_mos.sh b/OpenOOD/scripts/ood/mos/imagenet_test_mos.sh new file mode 100644 index 0000000000000000000000000000000000000000..64e08ab980dc90e274d910760e669e3db2db3634 --- /dev/null +++ b/OpenOOD/scripts/ood/mos/imagenet_test_mos.sh @@ -0,0 +1,31 @@ +#!/bin/bash +# sh scripts/ood/mos/imagenet_test_mos.sh + +SEED=0 + +# ood +python main.py \ + --config configs/datasets/imagenet/imagenet_double_label.yml \ + configs/datasets/imagenet/imagenet_ood.yml \ + configs/networks/resnet50.yml \ + configs/pipelines/test/test_mos.yml \ + configs/postprocessors/mos.yml \ + configs/preprocessors/base_preprocessor.yml \ + --network.pretrained True \ + --network.checkpoint results/imagenet_double_label_resnet50_mos_e5_lr0.003/s0/best.ckpt \ + --num_workers 8 \ + --seed 0 + +# full-spectrum ood +python main.py \ + --config configs/datasets/imagenet/imagenet_double_label.yml \ + configs/datasets/imagenet/imagenet_double_label_fsood.yml \ + configs/networks/resnet50.yml \ + configs/pipelines/test/test_mos.yml \ + configs/postprocessors/mos.yml \ + configs/preprocessors/base_preprocessor.yml \ + --network.pretrained True \ + --network.checkpoint results/imagenet_double_label_resnet50_mos_e5_lr0.003/s0/best.ckpt \ + --num_workers 8 \ + --seed 0 \ + --evaluator.ood_scheme fsood diff --git a/OpenOOD/scripts/ood/mos/imagenet_train_mos.sh b/OpenOOD/scripts/ood/mos/imagenet_train_mos.sh new file mode 100644 index 0000000000000000000000000000000000000000..21c02186debf7a3fe053741c78dd3a1adbce7e87 --- /dev/null +++ b/OpenOOD/scripts/ood/mos/imagenet_train_mos.sh @@ -0,0 +1,15 @@ +#!/bin/bash +# sh scripts/ood/mos/imagenet_train_mos.sh + +python main.py \ + --config configs/datasets/imagenet/imagenet_double_label.yml \ + configs/networks/resnet50.yml \ + configs/pipelines/train/train_mos.yml \ + configs/preprocessors/base_preprocessor.yml \ + --network.pretrained True \ + --network.checkpoint ./results/pretrained_weights/resnet50_imagenet1k_v1.pth \ + --optimizer.num_epochs 5 \ + --dataset.train.batch_size 128 \ + --num_gpus 2 --num_workers 16 \ + --merge_option merge \ + --seed 0 diff --git a/OpenOOD/scripts/ood/msp/cifar100_test_ood_msp.sh b/OpenOOD/scripts/ood/msp/cifar100_test_ood_msp.sh new file mode 100644 index 0000000000000000000000000000000000000000..0009016c83b163b207433569b4bca4353c83f24b --- /dev/null +++ b/OpenOOD/scripts/ood/msp/cifar100_test_ood_msp.sh @@ -0,0 +1,34 @@ +#!/bin/bash +# sh scripts/ood/msp/cifar100_test_ood_msp.sh + +# GPU=1 +# CPU=1 +# node=36 +# jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +#srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +#--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +#--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ +python main.py \ + --config configs/datasets/cifar100/cifar100.yml \ + configs/datasets/cifar100/cifar100_ood.yml \ + configs/networks/resnet18_32x32.yml \ + configs/pipelines/test/test_ood.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/postprocessors/msp.yml \ + --num_workers 8 \ + --network.checkpoint 'results/cifar100_resnet18_32x32_base_e100_lr0.1_default/s0/best.ckpt' \ + --mark 0 + +############################################ +# alternatively, we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood.py +# especially if you want to get results from +# multiple runs +python scripts/eval_ood.py \ + --id-data cifar100 \ + --root ./results/cifar100_resnet18_32x32_base_e100_lr0.1_default \ + --postprocessor msp \ + --save-score --save-csv diff --git a/OpenOOD/scripts/ood/msp/cifar10_test_ood_msp.sh b/OpenOOD/scripts/ood/msp/cifar10_test_ood_msp.sh new file mode 100644 index 0000000000000000000000000000000000000000..440b301bcef42afa682655de0c52cb84151ec4d7 --- /dev/null +++ b/OpenOOD/scripts/ood/msp/cifar10_test_ood_msp.sh @@ -0,0 +1,35 @@ +#!/bin/bash +# sh scripts/ood/msp/cifar10_test_ood_msp.sh + +# GPU=1 +# CPU=1 +# node=36 +# jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +#srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +#--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +#--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ +python main.py \ + --config configs/datasets/cifar10/cifar10.yml \ + configs/datasets/cifar10/cifar10_ood.yml \ + configs/networks/resnet18_32x32.yml \ + configs/pipelines/test/test_ood.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/postprocessors/msp.yml \ + --num_workers 8 \ + --network.checkpoint './results/cifar10_resnet18_32x32_base_e100_lr0.1_default/s0/best.ckpt' \ + --mark 0 \ + --merge_option merge + +############################################ +# alternatively, we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood.py +# especially if you want to get results from +# multiple runs +python scripts/eval_ood.py \ + --id-data cifar10 \ + --root ./results/cifar10_resnet18_32x32_base_e100_lr0.1_default \ + --postprocessor msp \ + --save-score --save-csv diff --git a/OpenOOD/scripts/ood/msp/imagenet200_test_ood_msp.sh b/OpenOOD/scripts/ood/msp/imagenet200_test_ood_msp.sh new file mode 100644 index 0000000000000000000000000000000000000000..3ef27b6d9aeed9f0af0e824f254368225deefe41 --- /dev/null +++ b/OpenOOD/scripts/ood/msp/imagenet200_test_ood_msp.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# sh scripts/ood/msp/imagenet200_test_ood_msp.sh + +############################################ +# alternatively, we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood.py +# especially if you want to get results from +# multiple runs + +# ood +python scripts/eval_ood.py \ + --id-data imagenet200 \ + --root ./results/imagenet200_resnet18_224x224_base_e90_lr0.1_default \ + --postprocessor msp \ + --save-score --save-csv #--fsood + +# full-spectrum ood +python scripts/eval_ood.py \ + --id-data imagenet200 \ + --root ./results/imagenet200_resnet18_224x224_base_e90_lr0.1_default \ + --postprocessor msp \ + --save-score --save-csv --fsood diff --git a/OpenOOD/scripts/ood/msp/imagenet_test_ood_msp.sh b/OpenOOD/scripts/ood/msp/imagenet_test_ood_msp.sh new file mode 100644 index 0000000000000000000000000000000000000000..065ececca3d5906f363f95edfd2bbc58fa17acd9 --- /dev/null +++ b/OpenOOD/scripts/ood/msp/imagenet_test_ood_msp.sh @@ -0,0 +1,48 @@ +#!/bin/bash +# sh scripts/ood/msp/imagenet_test_ood_msp.sh + +GPU=1 +CPU=1 +node=73 +jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +#srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +#--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +#--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ +python main.py \ + --config configs/datasets/imagenet/imagenet.yml \ + configs/datasets/imagenet/imagenet_ood.yml \ + configs/networks/resnet50.yml \ + configs/pipelines/test/test_ood.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/postprocessors/msp.yml \ + --num_workers 10 \ + --ood_dataset.image_size 256 \ + --dataset.test.batch_size 256 \ + --dataset.val.batch_size 256 \ + --network.pretrained True \ + --network.checkpoint 'results/pretrained_weights/resnet50_imagenet1k_v1.pth' \ + --merge_option merge + +############################################ +# we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood_imagenet.py + +# available architectures: +# resnet50, swin-t, vit-b-16 +# ood +python scripts/eval_ood_imagenet.py \ + --tvs-pretrained \ + --arch resnet50 \ + --postprocessor msp \ + --save-score --save-csv #--fsood + + +# full-spectrum ood +python scripts/eval_ood_imagenet.py \ + --tvs-pretrained \ + --arch resnet50 \ + --postprocessor msp \ + --save-score --save-csv --fsood diff --git a/OpenOOD/scripts/ood/msp/mnist_test_fsood_msp.sh b/OpenOOD/scripts/ood/msp/mnist_test_fsood_msp.sh new file mode 100644 index 0000000000000000000000000000000000000000..cfc2c9958305c62a689f5823e4f01d8361975aa3 --- /dev/null +++ b/OpenOOD/scripts/ood/msp/mnist_test_fsood_msp.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# sh scripts/ood/msp/mnist_test_fsood_msp.sh + +GPU=1 +CPU=1 +node=73 +jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +# --kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ + +python main.py \ +--config configs/datasets/mnist/mnist.yml \ +configs/datasets/mnist/mnist_fsood.yml \ +configs/networks/lenet.yml \ +configs/pipelines/test/test_fsood.yml \ +configs/preprocessors/base_preprocessor.yml \ +configs/postprocessors/msp.yml \ +--num_workers 8 \ +--network.checkpoint 'results/checkpoints/mnist_lenet_acc98.50.ckpt' \ +--mark 0 diff --git a/OpenOOD/scripts/ood/msp/mnist_test_ood_msp.sh b/OpenOOD/scripts/ood/msp/mnist_test_ood_msp.sh new file mode 100644 index 0000000000000000000000000000000000000000..d84d9a10bbe6b2d1f07bf0ea679f3df24d55c9de --- /dev/null +++ b/OpenOOD/scripts/ood/msp/mnist_test_ood_msp.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# sh scripts/ood/msp/mnist_test_ood_msp.sh + +GPU=1 +CPU=1 +node=73 +jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +# --kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ + +python main.py \ +--config configs/datasets/mnist/mnist.yml \ +configs/datasets/mnist/mnist_ood.yml \ +configs/networks/lenet.yml \ +configs/pipelines/test/test_ood.yml \ +configs/preprocessors/base_preprocessor.yml \ +configs/postprocessors/msp.yml \ +--num_workers 8 \ +--network.checkpoint 'results/checkpoints/mnist_lenet_acc98.50.ckpt' \ +--mark 0 diff --git a/OpenOOD/scripts/ood/msp/osr_cifar6_test_msp.sh b/OpenOOD/scripts/ood/msp/osr_cifar6_test_msp.sh new file mode 100644 index 0000000000000000000000000000000000000000..e6bf4a0701d10340820b21ab65f7312c7b4f4da5 --- /dev/null +++ b/OpenOOD/scripts/ood/msp/osr_cifar6_test_msp.sh @@ -0,0 +1,20 @@ +#!/bin/bash +# sh scripts/ood/msp/osr_cifar6_test_msp.sh + +GPU=1 +CPU=1 +node=73 +jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ +python main.py \ +--config configs/datasets/osr_cifar6/cifar6_seed1.yml \ +configs/datasets/osr_cifar6/cifar6_seed1.yml \ +configs/preprocessors/base_preprocessor.yml \ +configs/networks/resnet18_32x32.yml \ +configs/pipelines/test/test_acc.yml \ +--num_workers 8 \ +--network.checkpoint './results/checkpoints/osr/cifar6_seed1.ckpt' diff --git a/OpenOOD/scripts/ood/msp/osr_mnist6_test_msp.sh b/OpenOOD/scripts/ood/msp/osr_mnist6_test_msp.sh new file mode 100644 index 0000000000000000000000000000000000000000..018d1ce3f2cf88bf764b7798d56f6e0c937499c0 --- /dev/null +++ b/OpenOOD/scripts/ood/msp/osr_mnist6_test_msp.sh @@ -0,0 +1,22 @@ +#!/bin/bash +# sh scripts/ood/msp/mnist_test_osr_msp.sh + +GPU=1 +CPU=1 +node=73 +jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ +python main.py \ +--config configs/datasets/osr_mnist6/mnist6_seed1.yml \ +configs/datasets/osr_mnist6/mnist6_seed1_ood.yml \ +configs/networks/lenet.yml \ +configs/pipelines/test/test_osr.yml \ +configs/preprocessors/base_preprocessor.yml \ +configs/postprocessors/msp.yml \ +--num_workers 8 \ +--network.checkpoint 'results/checkpoints/osr/mnist6_seed1.ckpt' \ +--mark 0 diff --git a/OpenOOD/scripts/ood/msp/sweep_osr.py b/OpenOOD/scripts/ood/msp/sweep_osr.py new file mode 100644 index 0000000000000000000000000000000000000000..ce2ea968e05a81cf0c077db3135b62c91565d36c --- /dev/null +++ b/OpenOOD/scripts/ood/msp/sweep_osr.py @@ -0,0 +1,39 @@ +# python scripts/ood/msp/sweep_osr.py +import os + +config = [ + [ + 'osr_cifar6/cifar6_seed1.yml', 'osr_cifar6/cifar6_seed1_ood.yml', + 'resnet18_32x32', 'results/checkpoints/osr/cifar6_seed1.ckpt' + ], + [ + 'osr_cifar50/cifar50_seed1.yml', 'osr_cifar50/cifar50_seed1_ood.yml', + 'resnet18_32x32', 'results/checkpoints/osr/cifar50_seed1.ckpt' + ], + [ + 'osr_tin20/tin20_seed1.yml', 'osr_tin20/tin20_seed1_ood.yml', + 'resnet18_64x64', 'results/checkpoints/osr/tin20_seed1.ckpt' + ], + [ + 'osr_mnist6/mnist6_seed1.yml', 'osr_mnist6/mnist6_seed1_ood.yml', + 'lenet', 'results/checkpoints/osr/mnist6_seed1.ckpt' + ], +] + +for [dataset, ood_dataset, network, pth] in config: + command = (f"PYTHONPATH='.':$PYTHONPATH \ + srun -p dsta --mpi=pmi2 --gres=gpu:1 -n1 \ + --cpus-per-task=1 --ntasks-per-node=1 \ + --kill-on-bad-exit=1 --job-name=openood \ + python main.py \ + --config configs/datasets/{dataset} \ + configs/datasets/{ood_dataset} \ + configs/networks/{network}.yml \ + configs/pipelines/test/test_osr.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/postprocessors/msp.yml \ + --network.pretrained True \ + --network.checkpoint {pth} \ + --num_workers 8 \ + --merge_option merge &") + os.system(command) diff --git a/OpenOOD/scripts/ood/npos/cifar100_test_npos.sh b/OpenOOD/scripts/ood/npos/cifar100_test_npos.sh new file mode 100644 index 0000000000000000000000000000000000000000..2cf90d81ec9aec86ebd8ffe88d22b20d6c0ccd81 --- /dev/null +++ b/OpenOOD/scripts/ood/npos/cifar100_test_npos.sh @@ -0,0 +1,14 @@ +#!/bin/bash +# sh scripts/ood/npos/cifar100_test_npos.sh + +############################################ +# alternatively, we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood.py +# especially if you want to get results from +# multiple runs +python scripts/eval_ood.py \ + --id-data cifar100 \ + --root ./results/cifar100_npos_net_npos_e100_lr0.1_default \ + --postprocessor npos \ + --save-score --save-csv diff --git a/OpenOOD/scripts/ood/npos/cifar100_train_npos.sh b/OpenOOD/scripts/ood/npos/cifar100_train_npos.sh new file mode 100644 index 0000000000000000000000000000000000000000..0b37ae3f3ec48e5cab2b2b4490121d741812f81c --- /dev/null +++ b/OpenOOD/scripts/ood/npos/cifar100_train_npos.sh @@ -0,0 +1,20 @@ +#!/bin/bash +# sh scripts/ood/npos/cifar100_train_npos.sh + +python main.py \ + --config configs/datasets/cifar100/cifar100.yml \ + configs/networks/npos_net.yml \ + configs/pipelines/train/train_npos.yml \ + configs/preprocessors/base_preprocessor.yml \ + --preprocessor.name cider \ + --network.backbone.name resnet18_32x32 \ + --dataset.train.batch_size 256 \ + --trainer.trainer_args.temp 0.1 \ + --trainer.trainer_args.sample_from 600 \ + --trainer.trainer_args.K 300 \ + --trainer.trainer_args.cov_mat 0.1 \ + --trainer.trainer_args.start_epoch_KNN 40 \ + --trainer.trainer_args.ID_points_num 200 \ + --optimizer.num_epochs 100 \ + --optimizer.lr 0.1 \ + --seed 0 diff --git a/OpenOOD/scripts/ood/npos/cifar10_test_npos.sh b/OpenOOD/scripts/ood/npos/cifar10_test_npos.sh new file mode 100644 index 0000000000000000000000000000000000000000..d0a130a8914707240ebb0055eca36a54d7f229c7 --- /dev/null +++ b/OpenOOD/scripts/ood/npos/cifar10_test_npos.sh @@ -0,0 +1,14 @@ +#!/bin/bash +# sh scripts/ood/npos/cifar10_test_npos.sh + +############################################ +# alternatively, we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood.py +# especially if you want to get results from +# multiple runs +python scripts/eval_ood.py \ + --id-data cifar10 \ + --root ./results/cifar10_npos_net_npos_e100_lr0.1_default \ + --postprocessor npos \ + --save-score --save-csv diff --git a/OpenOOD/scripts/ood/npos/cifar10_train_npos.sh b/OpenOOD/scripts/ood/npos/cifar10_train_npos.sh new file mode 100644 index 0000000000000000000000000000000000000000..1d21a0692c4ecdb390c85a24fe8485f0e0deade5 --- /dev/null +++ b/OpenOOD/scripts/ood/npos/cifar10_train_npos.sh @@ -0,0 +1,20 @@ +#!/bin/bash +# sh scripts/ood/npos/cifar10_train_npos.sh + +python main.py \ + --config configs/datasets/cifar10/cifar10.yml \ + configs/networks/npos_net.yml \ + configs/pipelines/train/train_npos.yml \ + configs/preprocessors/base_preprocessor.yml \ + --preprocessor.name cider \ + --network.backbone.name resnet18_32x32 \ + --dataset.train.batch_size 256 \ + --trainer.trainer_args.temp 0.1 \ + --trainer.trainer_args.sample_from 600 \ + --trainer.trainer_args.K 300 \ + --trainer.trainer_args.cov_mat 0.1 \ + --trainer.trainer_args.start_epoch_KNN 40 \ + --trainer.trainer_args.ID_points_num 200 \ + --optimizer.num_epochs 100 \ + --optimizer.lr 0.1 \ + --seed 0 diff --git a/OpenOOD/scripts/ood/npos/imagenet200_test_npos.sh b/OpenOOD/scripts/ood/npos/imagenet200_test_npos.sh new file mode 100644 index 0000000000000000000000000000000000000000..176e2ede39187c40af43ab68cf2baf29f52bc24b --- /dev/null +++ b/OpenOOD/scripts/ood/npos/imagenet200_test_npos.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# sh scripts/ood/npos/imagenet200_test_npos.sh + +############################################ +# alternatively, we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood.py +# especially if you want to get results from +# multiple runs + +# ood +python scripts/eval_ood.py \ + --id-data imagenet200 \ + --root ./results/imagenet200_npos_net_npos_e90_lr0.1_default \ + --postprocessor npos \ + --save-score --save-csv #--fsood + +# full-spectrum ood +python scripts/eval_ood.py \ + --id-data imagenet200 \ + --root ./results/imagenet200_npos_net_npos_e90_lr0.1_default \ + --postprocessor npos \ + --save-score --save-csv --fsood diff --git a/OpenOOD/scripts/ood/npos/imagenet200_train_npos.sh b/OpenOOD/scripts/ood/npos/imagenet200_train_npos.sh new file mode 100644 index 0000000000000000000000000000000000000000..bf2b57edc42bc4fa2ab5bb4b55db0762173dae16 --- /dev/null +++ b/OpenOOD/scripts/ood/npos/imagenet200_train_npos.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# sh scripts/ood/npos/imagenet200_train_npos.sh + +# NPOS trainer cannot work with multiple GPUs (DDP) currently +python main.py \ + --config configs/datasets/imagenet200/imagenet200.yml \ + configs/networks/npos_net.yml \ + configs/pipelines/train/train_npos.yml \ + configs/preprocessors/base_preprocessor.yml \ + --preprocessor.name cider \ + --network.backbone.name resnet18_224x224 \ + --dataset.train.batch_size 256 \ + --trainer.trainer_args.temp 0.1 \ + --trainer.trainer_args.sample_from 1000 \ + --trainer.trainer_args.K 400 \ + --trainer.trainer_args.cov_mat 0.1 \ + --trainer.trainer_args.start_epoch_KNN 40 \ + --trainer.trainer_args.ID_points_num 300 \ + --optimizer.num_epochs 90 \ + --optimizer.lr 0.1 \ + --num_gpus 1 --num_workers 16 \ + --merge_option merge \ + --seed 0 diff --git a/OpenOOD/scripts/ood/npos/imagenet_train_npos.sh b/OpenOOD/scripts/ood/npos/imagenet_train_npos.sh new file mode 100644 index 0000000000000000000000000000000000000000..744e03f64d7c2c6f5c5e0d9915db43e46acb51aa --- /dev/null +++ b/OpenOOD/scripts/ood/npos/imagenet_train_npos.sh @@ -0,0 +1,26 @@ +#!/bin/bash +# sh scripts/ood/npos/imagenet_train_npos.sh + +# NPOS trainer cannot work with multiple GPUs (DDP) currently +# we observed CUDA OOM error on Quadro RTX 6000 24GB GPU +python main.py \ + --config configs/datasets/imagenet/imagenet.yml \ + configs/networks/npos_net.yml \ + configs/pipelines/train/train_npos.yml \ + configs/preprocessors/base_preprocessor.yml \ + --preprocessor.name cider \ + --network.backbone.name resnet50 \ + --network.backbone.pretrained True \ + --network.backbone.checkpoint ./results/pretrained_weights/resnet50_imagenet1k_v1.pth \ + --optimizer.lr 0.001 \ + --optimizer.num_epochs 30 \ + --dataset.train.batch_size 128 \ + --trainer.trainer_args.temp 0.1 \ + --trainer.trainer_args.sample_from 1000 \ + --trainer.trainer_args.K 400 \ + --trainer.trainer_args.cov_mat 0.1 \ + --trainer.trainer_args.start_epoch_KNN 1 \ + --trainer.trainer_args.ID_points_num 300 \ + --num_gpus 1 --num_workers 16 \ + --merge_option merge \ + --seed 0 diff --git a/OpenOOD/scripts/ood/odin/cifar100_test_ood_odin.sh b/OpenOOD/scripts/ood/odin/cifar100_test_ood_odin.sh new file mode 100644 index 0000000000000000000000000000000000000000..aad710f3a46f33c9e388c2e2fbf9230169cc297f --- /dev/null +++ b/OpenOOD/scripts/ood/odin/cifar100_test_ood_odin.sh @@ -0,0 +1,35 @@ +#!/bin/bash +# sh scripts/ood/odin/cifar100_test_ood_odin.sh + +# GPU=1 +# CPU=1 +# node=73 +# jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +# --kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ + +python main.py \ + --config configs/datasets/cifar100/cifar100.yml \ + configs/datasets/cifar100/cifar100_ood.yml \ + configs/networks/resnet18_32x32.yml \ + configs/pipelines/test/test_ood.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/postprocessors/odin.yml \ + --num_workers 8 \ + --network.checkpoint 'results/cifar100_resnet18_32x32_base_e100_lr0.1_default/s0/best.ckpt' \ + --mark 0 + +############################################ +# alternatively, we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood.py +# especially if you want to get results from +# multiple runs +python scripts/eval_ood.py \ + --id-data cifar100 \ + --root ./results/cifar100_resnet18_32x32_base_e100_lr0.1_default \ + --postprocessor odin \ + --save-score --save-csv diff --git a/OpenOOD/scripts/ood/odin/cifar10_test_ood_odin.sh b/OpenOOD/scripts/ood/odin/cifar10_test_ood_odin.sh new file mode 100644 index 0000000000000000000000000000000000000000..821b90eaaf0809239a292c9868c67d86ed8b48c9 --- /dev/null +++ b/OpenOOD/scripts/ood/odin/cifar10_test_ood_odin.sh @@ -0,0 +1,35 @@ +#!/bin/bash +# sh scripts/ood/odin/cifar10_test_ood_odin.sh + +# GPU=1 +# CPU=1 +# node=73 +# jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +# --kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ + +python main.py \ + --config configs/datasets/cifar10/cifar10.yml \ + configs/datasets/cifar10/cifar10_ood.yml \ + configs/networks/resnet18_32x32.yml \ + configs/pipelines/test/test_ood.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/postprocessors/odin.yml \ + --num_workers 8 \ + --network.checkpoint 'results/cifar10_resnet18_32x32_base_e100_lr0.1_default/s0/best.ckpt' \ + --mark 0 + +############################################ +# alternatively, we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood.py +# especially if you want to get results from +# multiple runs +python scripts/eval_ood.py \ + --id-data cifar10 \ + --root ./results/cifar10_resnet18_32x32_base_e100_lr0.1_default \ + --postprocessor odin \ + --save-score --save-csv diff --git a/OpenOOD/scripts/ood/odin/imagenet200_test_ood_odin.sh b/OpenOOD/scripts/ood/odin/imagenet200_test_ood_odin.sh new file mode 100644 index 0000000000000000000000000000000000000000..b4633448223cc5c2873e410f6f99763b9b46315e --- /dev/null +++ b/OpenOOD/scripts/ood/odin/imagenet200_test_ood_odin.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# sh scripts/ood/odin/imagenet200_test_ood_odin.sh + +############################################ +# alternatively, we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood.py +# especially if you want to get results from +# multiple runs + +# ood +python scripts/eval_ood.py \ + --id-data imagenet200 \ + --root ./results/imagenet200_resnet18_224x224_base_e90_lr0.1_default \ + --postprocessor odin \ + --save-score --save-csv #--fsood + +# full-spectrum ood +python scripts/eval_ood.py \ + --id-data imagenet200 \ + --root ./results/imagenet200_resnet18_224x224_base_e90_lr0.1_default \ + --postprocessor odin \ + --save-score --save-csv --fsood diff --git a/OpenOOD/scripts/ood/odin/imagenet_test_ood_odin.sh b/OpenOOD/scripts/ood/odin/imagenet_test_ood_odin.sh new file mode 100644 index 0000000000000000000000000000000000000000..92e2b19147459db11f68a60491322c3b4069c237 --- /dev/null +++ b/OpenOOD/scripts/ood/odin/imagenet_test_ood_odin.sh @@ -0,0 +1,47 @@ +#!/bin/bash +# sh scripts/ood/odin/imagenet_test_ood_odin.sh + +GPU=1 +CPU=1 +node=39 +jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ +python main.py \ + --config configs/datasets/imagenet/imagenet.yml \ + configs/datasets/imagenet/imagenet_ood.yml \ + configs/networks/resnet50.yml \ + configs/pipelines/test/test_ood.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/postprocessors/odin.yml \ + --num_workers 4 \ + --ood_dataset.image_size 256 \ + --dataset.test.batch_size 256 \ + --dataset.val.batch_size 256 \ + --network.pretrained True \ + --network.checkpoint 'results/pretrained_weights/resnet50_imagenet1k_v1.pth' \ + --merge_option merge + +############################################ +# we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood_imagenet.py + +# available architectures: +# resnet50, swin-t, vit-b-16 +# ood +python scripts/eval_ood_imagenet.py \ + --tvs-pretrained \ + --arch resnet50 \ + --postprocessor odin \ + --save-score --save-csv #--fsood + +# full-spectrum ood +python scripts/eval_ood_imagenet.py \ + --tvs-pretrained \ + --arch resnet50 \ + --postprocessor odin \ + --save-score --save-csv --fsood diff --git a/OpenOOD/scripts/ood/odin/mnist_test_ood_odin.sh b/OpenOOD/scripts/ood/odin/mnist_test_ood_odin.sh new file mode 100644 index 0000000000000000000000000000000000000000..f989a6000ce5bd051860f7600d6cdd97bcba4d24 --- /dev/null +++ b/OpenOOD/scripts/ood/odin/mnist_test_ood_odin.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# sh scripts/ood/odin/mnist_test_ood_odin.sh + +# GPU=1 +# CPU=1 +# node=73 +# jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +# --kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ + +python main.py \ +--config configs/datasets/mnist/mnist.yml \ +configs/datasets/mnist/mnist_ood.yml \ +configs/networks/lenet.yml \ +configs/pipelines/test/test_ood.yml \ +configs/preprocessors/base_preprocessor.yml \ +configs/postprocessors/odin.yml \ +--num_workers 8 \ +--network.checkpoint 'results/checkpoints/mnist_lenet_acc99.30.ckpt' \ +--mark 0 diff --git a/OpenOOD/scripts/ood/odin/mnist_test_ood_odin_aps.sh b/OpenOOD/scripts/ood/odin/mnist_test_ood_odin_aps.sh new file mode 100644 index 0000000000000000000000000000000000000000..ad9779b3ecd9c16c2b4ec77f6fa7a2e861f6dee8 --- /dev/null +++ b/OpenOOD/scripts/ood/odin/mnist_test_ood_odin_aps.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# sh scripts/ood/odin/mnist_test_ood_odin_aps.sh + +# GPU=1 +# CPU=1 +# node=73 +# jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +# --kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ + +python main.py \ +--config configs/datasets/mnist/mnist.yml \ +configs/datasets/mnist/mnist_ood.yml \ +configs/networks/lenet.yml \ +configs/pipelines/test/test_ood.yml \ +configs/preprocessors/base_preprocessor.yml \ +configs/postprocessors/odin.yml \ +--num_workers 8 \ +--network.checkpoint 'results/checkpoints/mnist_lenet_acc98.50.ckpt' \ +--mark 0 diff --git a/OpenOOD/scripts/ood/odin/mnist_test_osr_odin.sh b/OpenOOD/scripts/ood/odin/mnist_test_osr_odin.sh new file mode 100644 index 0000000000000000000000000000000000000000..37f8c4423585898bc1ba198c5986a12b748b78fe --- /dev/null +++ b/OpenOOD/scripts/ood/odin/mnist_test_osr_odin.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# sh scripts/ood/odin/mnist_test_osr_odin.sh + +# GPU=1 +# CPU=1 +# node=73 +# jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +# --kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ + +python main.py \ +--config configs/datasets/osr_mnist6/mnist6_seed1.yml \ +configs/datasets/osr_mnist6/mnist6_seed1_ood.yml \ +configs/networks/lenet.yml \ +configs/pipelines/test/test_osr.yml \ +configs/preprocessors/base_preprocessor.yml \ +configs/postprocessors/odin.yml \ +--num_workers 8 \ +--network.checkpoint 'results/checkpoints/osr/mnist6_seed1.ckpt' \ +--mark 0 diff --git a/OpenOOD/scripts/ood/odin/sweep_osr.py b/OpenOOD/scripts/ood/odin/sweep_osr.py new file mode 100644 index 0000000000000000000000000000000000000000..aa5bfc90ed099414144abcdfb0f37757cbbf4218 --- /dev/null +++ b/OpenOOD/scripts/ood/odin/sweep_osr.py @@ -0,0 +1,30 @@ +# python scripts/ood/odin/sweep_osr.py +import os + +config = [ + # ['osr_cifar6/cifar6_seed1.yml', 'osr_cifar6/cifar6_seed1_ood.yml', 'resnet18_32x32', 'results/checkpoints/osr/cifar6_seed1.ckpt'], + # ['osr_cifar50/cifar50_seed1.yml', 'osr_cifar50/cifar50_seed1_ood.yml', 'resnet18_32x32', 'results/checkpoints/osr/cifar50_seed1.ckpt'], + # ['osr_tin20/tin20_seed1.yml', 'osr_tin20/tin20_seed1_ood.yml', 'resnet18_64x64', 'results/checkpoints/osr/tin20_seed1.ckpt'], + [ + 'osr_mnist6/mnist6_seed1.yml', 'osr_mnist6/mnist6_seed1_ood.yml', + 'lenet', 'results/checkpoints/osr/mnist6_seed1.ckpt' + ], +] + +for [dataset, ood_dataset, network, pth] in config: + command = (f"PYTHONPATH='.':$PYTHONPATH \ + srun -p dsta --mpi=pmi2 --gres=gpu:1 -n1 \ + --cpus-per-task=1 --ntasks-per-node=1 \ + --kill-on-bad-exit=1 --job-name=openood \ + python main.py \ + --config configs/datasets/{dataset} \ + configs/datasets/{ood_dataset} \ + configs/networks/{network}.yml \ + configs/pipelines/test/test_osr.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/postprocessors/odin.yml \ + --network.pretrained True \ + --network.checkpoint {pth} \ + --num_workers 8 \ + --merge_option merge &") + os.system(command) diff --git a/OpenOOD/scripts/ood/oe/cifar100_test_oe.sh b/OpenOOD/scripts/ood/oe/cifar100_test_oe.sh new file mode 100644 index 0000000000000000000000000000000000000000..c64692008bd90b6a0cecefcec512f3cc04f5815d --- /dev/null +++ b/OpenOOD/scripts/ood/oe/cifar100_test_oe.sh @@ -0,0 +1,34 @@ +#!/bin/bash +# sh scripts/ood/oe/cifar100_test_oe.sh + +GPU=1 +CPU=1 +node=63 +jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ +python main.py \ + --config configs/datasets/cifar100/cifar100.yml \ + configs/datasets/cifar100/cifar100_ood.yml \ + configs/networks/resnet18_32x32.yml \ + configs/pipelines/test/test_ood.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/postprocessors/msp.yml \ + --num_workers 8 \ + --network.checkpoint 'results/cifar100_oe_resnet18_32x32_oe_e100_lr0.1_lam0.5_default/s0/best.ckpt' \ + --mark 0 + +############################################ +# alternatively, we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood.py +# especially if you want to get results from +# multiple runs +python scripts/eval_ood.py \ + --id-data cifar100 \ + --root ./results/cifar100_oe_resnet18_32x32_oe_e100_lr0.1_lam0.5_default \ + --postprocessor msp \ + --save-score --save-csv diff --git a/OpenOOD/scripts/ood/oe/cifar100_train_oe.sh b/OpenOOD/scripts/ood/oe/cifar100_train_oe.sh new file mode 100644 index 0000000000000000000000000000000000000000..7e376635c1c1410e0a4f4f05a763c48465791df1 --- /dev/null +++ b/OpenOOD/scripts/ood/oe/cifar100_train_oe.sh @@ -0,0 +1,21 @@ +#!/bin/bash +# sh scripts/ood/oe/cifar100_train_oe.sh + +GPU=1 +CPU=1 +node=73 +jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +--kill-on-bad-exit=1 --job-name=${jobname} \ +-w SG-IDC1-10-51-2-${node} \ +python main.py \ + --config configs/datasets/cifar100/cifar100.yml \ + configs/datasets/cifar100/cifar100_oe.yml \ + configs/networks/resnet18_32x32.yml \ + configs/pipelines/train/baseline.yml \ + configs/pipelines/train/train_oe.yml \ + configs/preprocessors/base_preprocessor.yml \ + --seed 0 diff --git a/OpenOOD/scripts/ood/oe/cifar10_test_oe.sh b/OpenOOD/scripts/ood/oe/cifar10_test_oe.sh new file mode 100644 index 0000000000000000000000000000000000000000..d2c8bcfae5361ab1e83ffe95cb894847f4944cd2 --- /dev/null +++ b/OpenOOD/scripts/ood/oe/cifar10_test_oe.sh @@ -0,0 +1,34 @@ +#!/bin/bash +# sh scripts/ood/oe/cifar10_test_oe.sh + +GPU=1 +CPU=1 +node=63 +jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ +python main.py \ + --config configs/datasets/cifar10/cifar10.yml \ + configs/datasets/cifar10/cifar10_ood.yml \ + configs/networks/resnet18_32x32.yml \ + configs/pipelines/test/test_ood.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/postprocessors/msp.yml \ + --num_workers 8 \ + --network.checkpoint 'results/cifar10_oe_resnet18_32x32_oe_e100_lr0.1_lam0.5_default/s0/best.ckpt' \ + --mark 0 + +############################################ +# alternatively, we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood.py +# especially if you want to get results from +# multiple runs +python scripts/eval_ood.py \ + --id-data cifar10 \ + --root ./results/cifar10_oe_resnet18_32x32_oe_e100_lr0.1_lam0.5_default \ + --postprocessor msp \ + --save-score --save-csv diff --git a/OpenOOD/scripts/ood/oe/cifar10_train_oe.sh b/OpenOOD/scripts/ood/oe/cifar10_train_oe.sh new file mode 100644 index 0000000000000000000000000000000000000000..887d13a8d600fa3dc730556e4c9d6c0b7e340464 --- /dev/null +++ b/OpenOOD/scripts/ood/oe/cifar10_train_oe.sh @@ -0,0 +1,21 @@ +#!/bin/bash +# sh scripts/ood/oe/cifar10_train_oe.sh + +GPU=1 +CPU=1 +node=73 +jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +--kill-on-bad-exit=1 --job-name=${jobname} \ +-w SG-IDC1-10-51-2-${node} \ +python main.py \ + --config configs/datasets/cifar10/cifar10.yml \ + configs/datasets/cifar10/cifar10_oe.yml \ + configs/networks/resnet18_32x32.yml \ + configs/pipelines/train/baseline.yml \ + configs/pipelines/train/train_oe.yml \ + configs/preprocessors/base_preprocessor.yml \ + --seed 0 diff --git a/OpenOOD/scripts/ood/oe/imagenet200_test_oe.sh b/OpenOOD/scripts/ood/oe/imagenet200_test_oe.sh new file mode 100644 index 0000000000000000000000000000000000000000..6121832c67c8389f45483b57a421a9349dcec0d1 --- /dev/null +++ b/OpenOOD/scripts/ood/oe/imagenet200_test_oe.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# sh scripts/ood/oe/imagenet200_test_oe.sh + +############################################ +# alternatively, we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood.py +# especially if you want to get results from +# multiple runs + +# ood +python scripts/eval_ood.py \ + --id-data imagenet200 \ + --root ./results/imagenet200_oe_resnet18_224x224_oe_e90_lr0.1_lam0.5_default \ + --postprocessor msp \ + --save-score --save-csv #--fsood + +# full-spectrum ood +python scripts/eval_ood.py \ + --id-data imagenet200 \ + --root ./results/imagenet200_oe_resnet18_224x224_oe_e90_lr0.1_lam0.5_default \ + --postprocessor msp \ + --save-score --save-csv --fsood diff --git a/OpenOOD/scripts/ood/oe/imagnet200_train_oe.sh b/OpenOOD/scripts/ood/oe/imagnet200_train_oe.sh new file mode 100644 index 0000000000000000000000000000000000000000..98546f385ddd5adc7850343bc5f9369f8485526b --- /dev/null +++ b/OpenOOD/scripts/ood/oe/imagnet200_train_oe.sh @@ -0,0 +1,15 @@ +#!/bin/bash +# sh scripts/ood/oe/imagenet200_train_oe.sh + +python main.py \ + --config configs/datasets/imagenet200/imagenet200.yml \ + configs/datasets/imagenet200/imagenet200_oe.yml \ + configs/networks/resnet18_224x224.yml \ + configs/pipelines/train/baseline.yml \ + configs/pipelines/train/train_oe.yml \ + configs/preprocessors/base_preprocessor.yml \ + --optimizer.num_epochs 90 \ + --dataset.train.batch_size 128 \ + --num_gpus 2 --num_workers 16 \ + --merge_option merge \ + --seed 0 diff --git a/OpenOOD/scripts/ood/rankfeat/cifar100_test_ood_rankfeat.sh b/OpenOOD/scripts/ood/rankfeat/cifar100_test_ood_rankfeat.sh new file mode 100644 index 0000000000000000000000000000000000000000..b649528dab7f26baedd4ef642f89a1aa918defc3 --- /dev/null +++ b/OpenOOD/scripts/ood/rankfeat/cifar100_test_ood_rankfeat.sh @@ -0,0 +1,33 @@ +#!/bin/bash +# sh scripts/ood/rankfeat/cifar100_test_ood_rankfeat.sh + +# GPU=1 +# CPU=1 +# node=73 +# jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +# --kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ + +python main.py \ + --config configs/datasets/cifar100/cifar100.yml \ + configs/datasets/cifar100/cifar100_ood.yml \ + configs/networks/resnet18_32x32.yml \ + configs/pipelines/test/test_ood.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/postprocessors/rankfeat.yml \ + --network.checkpoint 'results/cifar100_resnet18_32x32_base_e100_lr0.1_default/s0/best.ckpt' + +############################################ +# alternatively, we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood.py +# especially if you want to get results from +# multiple runs +python scripts/eval_ood.py \ + --id-data cifar100 \ + --root ./results/cifar100_resnet18_32x32_base_e100_lr0.1_default \ + --postprocessor rankfeat \ + --save-score --save-csv diff --git a/OpenOOD/scripts/ood/rankfeat/cifar10_test_ood_rankfeat.sh b/OpenOOD/scripts/ood/rankfeat/cifar10_test_ood_rankfeat.sh new file mode 100644 index 0000000000000000000000000000000000000000..1f187c2f85e5e74bf4855b6d79182639efc0bf3b --- /dev/null +++ b/OpenOOD/scripts/ood/rankfeat/cifar10_test_ood_rankfeat.sh @@ -0,0 +1,35 @@ +#!/bin/bash +# sh scripts/ood/rankfeat/cifar10_test_ood_rankfeat.sh + +# GPU=1 +# CPU=1 +# node=73 +# jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +# --kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ + +python main.py \ + --config configs/datasets/cifar10/cifar10.yml \ + configs/datasets/cifar10/cifar10_ood.yml \ + configs/networks/resnet18_32x32.yml \ + configs/pipelines/test/test_ood.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/postprocessors/rankfeat.yml \ + --num_workers 8 \ + --network.checkpoint 'results/cifar10_resnet18_32x32_base_e100_lr0.1_default/s0/best.ckpt' \ + --mark 1 + +############################################ +# alternatively, we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood.py +# especially if you want to get results from +# multiple runs +python scripts/eval_ood.py \ + --id-data cifar10 \ + --root ./results/cifar10_resnet18_32x32_base_e100_lr0.1_default \ + --postprocessor rankfeat \ + --save-score --save-csv diff --git a/OpenOOD/scripts/ood/rankfeat/imagenet200_test_ood_rankfeat.sh b/OpenOOD/scripts/ood/rankfeat/imagenet200_test_ood_rankfeat.sh new file mode 100644 index 0000000000000000000000000000000000000000..21dcb4748cf98e7bd104aca2954623c75ad154f3 --- /dev/null +++ b/OpenOOD/scripts/ood/rankfeat/imagenet200_test_ood_rankfeat.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# sh scripts/ood/rankfeat/imagenet200_test_ood_rankfeat.sh + +############################################ +# alternatively, we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood.py +# especially if you want to get results from +# multiple runs + +# ood +python scripts/eval_ood.py \ + --id-data imagenet200 \ + --root ./results/imagenet200_resnet18_224x224_base_e90_lr0.1_default \ + --postprocessor rankfeat \ + --save-score --save-csv #--fsood + +# full-spectrum ood +python scripts/eval_ood.py \ + --id-data imagenet200 \ + --root ./results/imagenet200_resnet18_224x224_base_e90_lr0.1_default \ + --postprocessor rankfeat \ + --save-score --save-csv --fsood diff --git a/OpenOOD/scripts/ood/rankfeat/imagenet_test_ood_rankfeat.sh b/OpenOOD/scripts/ood/rankfeat/imagenet_test_ood_rankfeat.sh new file mode 100644 index 0000000000000000000000000000000000000000..9c946d72f0b013da88080a68bd3453247bc5f959 --- /dev/null +++ b/OpenOOD/scripts/ood/rankfeat/imagenet_test_ood_rankfeat.sh @@ -0,0 +1,47 @@ +#!/bin/bash +# sh scripts/ood/rankfeat/imagenet_test_ood_rankfeat.sh + +GPU=1 +CPU=1 +node=63 +jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ +python main.py \ + --config configs/datasets/imagenet/imagenet.yml \ + configs/datasets/imagenet/imagenet_ood.yml \ + configs/networks/resnet50.yml \ + configs/pipelines/test/test_ood.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/postprocessors/rankfeat.yml \ + --num_workers 4 \ + --ood_dataset.image_size 256 \ + --dataset.test.batch_size 256 \ + --dataset.val.batch_size 256 \ + --network.pretrained True \ + --network.checkpoint 'results/pretrained_weights/resnet50_imagenet1k_v1.pth' \ + --merge_option merge + +############################################ +# we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood_imagenet.py + +# available architectures: +# resnet50, swin-t, vit-b-16 +# ood +python scripts/eval_ood_imagenet.py \ + --tvs-pretrained \ + --arch resnet50 \ + --postprocessor rankfeat \ + --save-score --save-csv #--fsood + +# full-spectrum ood +python scripts/eval_ood_imagenet.py \ + --tvs-pretrained \ + --arch resnet50 \ + --postprocessor rankfeat \ + --save-score --save-csv --fsood diff --git a/OpenOOD/scripts/ood/react/cifar100_test_ood_react.sh b/OpenOOD/scripts/ood/react/cifar100_test_ood_react.sh new file mode 100644 index 0000000000000000000000000000000000000000..f9ca78347fd0406e5e1e58b8f8e47c358722d73f --- /dev/null +++ b/OpenOOD/scripts/ood/react/cifar100_test_ood_react.sh @@ -0,0 +1,37 @@ +#!/bin/bash +# sh scripts/ood/react/cifar100_test_ood_react.sh + +# GPU=1 +# CPU=1 +# node=36 +# jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +#srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +#--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +#--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ +python main.py \ + --config configs/datasets/cifar100/cifar100.yml \ + configs/datasets/cifar100/cifar100_ood.yml \ + configs/networks/react_net.yml \ + configs/pipelines/test/test_ood.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/postprocessors/react.yml \ + --network.pretrained False \ + --network.backbone.name resnet18_32x32 \ + --network.backbone.pretrained True \ + --network.backbone.checkpoint 'results/cifar100_resnet18_32x32_base_e100_lr0.1_default/s0/best.ckpt' \ + --num_workers 8 \ + --mark 0 + +############################################ +# alternatively, we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood.py +# especially if you want to get results from +# multiple runs +python scripts/eval_ood.py \ + --id-data cifar100 \ + --root ./results/cifar100_resnet18_32x32_base_e100_lr0.1_default \ + --postprocessor react \ + --save-score --save-csv diff --git a/OpenOOD/scripts/ood/react/cifar10_test_ood_react.sh b/OpenOOD/scripts/ood/react/cifar10_test_ood_react.sh new file mode 100644 index 0000000000000000000000000000000000000000..43d087f02f4e4c8b407dc58a57d56262d8d57c6e --- /dev/null +++ b/OpenOOD/scripts/ood/react/cifar10_test_ood_react.sh @@ -0,0 +1,37 @@ +#!/bin/bash +# sh scripts/ood/react/cifar10_test_ood_react.sh + +# GPU=1 +# CPU=1 +# node=36 +# jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +#srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +#--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +#--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ +python main.py \ + --config configs/datasets/cifar10/cifar10.yml \ + configs/datasets/cifar10/cifar10_ood.yml \ + configs/networks/react_net.yml \ + configs/pipelines/test/test_ood.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/postprocessors/react.yml \ + --network.pretrained False \ + --network.backbone.name resnet18_32x32 \ + --network.backbone.pretrained True \ + --network.backbone.checkpoint 'results/cifar10_resnet18_32x32_base_e100_lr0.1_default/s0/best.ckpt' \ + --num_workers 8 \ + --mark fixed_0 + +############################################ +# alternatively, we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood.py +# especially if you want to get results from +# multiple runs +python scripts/eval_ood.py \ + --id-data cifar10 \ + --root ./results/cifar10_resnet18_32x32_base_e100_lr0.1_default \ + --postprocessor react \ + --save-score --save-csv diff --git a/OpenOOD/scripts/ood/react/imagenet200_test_ood_react.sh b/OpenOOD/scripts/ood/react/imagenet200_test_ood_react.sh new file mode 100644 index 0000000000000000000000000000000000000000..430dfa9051110213e89e3259ef6297af3f9b52c4 --- /dev/null +++ b/OpenOOD/scripts/ood/react/imagenet200_test_ood_react.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# sh scripts/ood/react/imagenet200_test_ood_react.sh + +############################################ +# alternatively, we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood.py +# especially if you want to get results from +# multiple runs + +# ood +python scripts/eval_ood.py \ + --id-data imagenet200 \ + --root ./results/imagenet200_resnet18_224x224_base_e90_lr0.1_default \ + --postprocessor react \ + --save-score --save-csv #--fsood + +# full-spectrum ood +python scripts/eval_ood.py \ + --id-data imagenet200 \ + --root ./results/imagenet200_resnet18_224x224_base_e90_lr0.1_default \ + --postprocessor react \ + --save-score --save-csv --fsood diff --git a/OpenOOD/scripts/ood/react/imagenet_test_ood_react.sh b/OpenOOD/scripts/ood/react/imagenet_test_ood_react.sh new file mode 100644 index 0000000000000000000000000000000000000000..3eff8066c7366f3562dc9230057651eaf5af7213 --- /dev/null +++ b/OpenOOD/scripts/ood/react/imagenet_test_ood_react.sh @@ -0,0 +1,47 @@ +#!/bin/bash +# sh scripts/ood/react/imagenet_test_ood_react.sh + +GPU=1 +CPU=1 +node=63 +jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ +python main.py \ + --config configs/datasets/imagenet/imagenet.yml \ + configs/datasets/imagenet/imagenet_ood.yml \ + configs/networks/react_net.yml \ + configs/pipelines/test/test_ood.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/postprocessors/react.yml \ + --num_workers 4 \ + --ood_dataset.image_size 256 \ + --dataset.test.batch_size 256 \ + --dataset.val.batch_size 256 \ + --network.pretrained False \ + --network.backbone.checkpoint 'results/pretrained_weights/resnet50_imagenet1k_v1.pth' \ + --merge_option merge + +############################################ +# we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood_imagenet.py + +# available architectures: +# resnet50, swin-t, vit-b-16 +# ood +python scripts/eval_ood_imagenet.py \ + --tvs-pretrained \ + --arch resnet50 \ + --postprocessor react \ + --save-score --save-csv #--fsood + +# full-spectrum ood +python scripts/eval_ood_imagenet.py \ + --tvs-pretrained \ + --arch resnet50 \ + --postprocessor react \ + --save-score --save-csv --fsood diff --git a/OpenOOD/scripts/ood/react/mnist_test_ood_react.sh b/OpenOOD/scripts/ood/react/mnist_test_ood_react.sh new file mode 100644 index 0000000000000000000000000000000000000000..ee83af75a945f105d537dbffcaed8cd3d3001a6a --- /dev/null +++ b/OpenOOD/scripts/ood/react/mnist_test_ood_react.sh @@ -0,0 +1,25 @@ +#!/bin/bash +# sh scripts/ood/react/mnist_test_ood_react.sh + +# GPU=1 +# CPU=1 +# node=36 +# jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +#srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +#--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +#--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ +python main.py \ +--config configs/datasets/mnist/mnist.yml \ +configs/datasets/mnist/mnist_ood.yml \ +configs/networks/react_net.yml \ +configs/pipelines/test/test_ood.yml \ +configs/preprocessors/base_preprocessor.yml \ +configs/postprocessors/react.yml \ +--network.pretrained False \ +--network.backbone.name lenet \ +--network.backbone.pretrained True \ +--network.backbone.checkpoint 'results/checkpoints/mnist_lenet_acc99.30.ckpt' \ +--num_workers 8 \ +--mark 0 diff --git a/OpenOOD/scripts/ood/react/mnist_test_osr_react.sh b/OpenOOD/scripts/ood/react/mnist_test_osr_react.sh new file mode 100644 index 0000000000000000000000000000000000000000..c0539aad19401a29bcb016033f6cf6df885c03ed --- /dev/null +++ b/OpenOOD/scripts/ood/react/mnist_test_osr_react.sh @@ -0,0 +1,25 @@ +#!/bin/bash +# sh scripts/ood/react/mnist_test_osr_react.sh + +# GPU=1 +# CPU=1 +# node=36 +# jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +#srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +#--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +#--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ +python main.py \ +--config configs/datasets/osr_mnist6/mnist6_seed1.yml \ +configs/datasets/osr_mnist6/mnist6_seed1_ood.yml \ +configs/networks/react_net.yml \ +configs/pipelines/test/test_osr.yml \ +configs/preprocessors/base_preprocessor.yml \ +configs/postprocessors/react.yml \ +--network.pretrained False \ +--network.backbone.name lenet \ +--network.backbone.pretrained True \ +--network.backbone.checkpoint 'results/checkpoints/osr/mnist6_seed1.ckpt' \ +--num_workers 8 \ +--mark 0 diff --git a/OpenOOD/scripts/ood/react/sweep_osr.py b/OpenOOD/scripts/ood/react/sweep_osr.py new file mode 100644 index 0000000000000000000000000000000000000000..b031594853ed1a728a92d548a9af1bd270cc836d --- /dev/null +++ b/OpenOOD/scripts/ood/react/sweep_osr.py @@ -0,0 +1,40 @@ +# python scripts/ood/react/sweep_osr.py +import os + +config = [ + [ + 'osr_cifar6/cifar6_seed1.yml', 'osr_cifar6/cifar6_seed1_ood.yml', + 'resnet18_32x32', 'results/checkpoints/osr/cifar6_seed1.ckpt' + ], + [ + 'osr_cifar50/cifar50_seed1.yml', 'osr_cifar50/cifar50_seed1_ood.yml', + 'resnet18_32x32', 'results/checkpoints/osr/cifar50_seed1.ckpt' + ], + [ + 'osr_tin20/tin20_seed1.yml', 'osr_tin20/tin20_seed1_ood.yml', + 'resnet18_64x64', 'results/checkpoints/osr/tin20_seed1.ckpt' + ], + [ + 'osr_mnist6/mnist6_seed1.yml', 'osr_mnist6/mnist6_seed1_ood.yml', + 'lenet', 'results/checkpoints/osr/mnist6_seed1.ckpt' + ], +] + +for [dataset, ood_dataset, network, pth] in config: + command = (f"PYTHONPATH='.':$PYTHONPATH \ + srun -p dsta --mpi=pmi2 --gres=gpu:1 -n1 \ + --cpus-per-task=1 --ntasks-per-node=1 \ + --kill-on-bad-exit=1 --job-name=openood \ + python main.py \ + --config configs/datasets/{dataset} \ + configs/datasets/{ood_dataset} \ + configs/networks/react_net.yml \ + configs/pipelines/test/test_osr.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/postprocessors/react.yml \ + --network.pretrained False \ + --network.backbone.name {network} \ + --network.backbone.checkpoint {pth} \ + --num_workers 8 \ + --merge_option merge &") + os.system(command) diff --git a/OpenOOD/scripts/ood/relation/cifar100_test_ood_relation.sh b/OpenOOD/scripts/ood/relation/cifar100_test_ood_relation.sh new file mode 100644 index 0000000000000000000000000000000000000000..eb698d3d9348e556a121d195c373dc71cf5a2a8d --- /dev/null +++ b/OpenOOD/scripts/ood/relation/cifar100_test_ood_relation.sh @@ -0,0 +1,35 @@ +#!/bin/bash +# sh scripts/ood/relation/cifar100_test_ood_relation.sh + +# GPU=1 +# CPU=1 +# node=36 +# jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +#srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +#--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +#--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ + +python main.py \ + --config configs/datasets/cifar100/cifar100.yml \ + configs/datasets/cifar100/cifar100_ood.yml \ + configs/networks/resnet18_32x32.yml \ + configs/pipelines/test/test_ood.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/postprocessors/relation.yml \ + --num_workers 8 \ + --network.checkpoint 'results/cifar100_resnet18_32x32_base_e100_lr0.1_default/s0/best.ckpt' \ + --mark 0 + +############################################ +# alternatively, we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood.py +# especially if you want to get results from +# multiple runs +python scripts/eval_ood.py \ + --id-data cifar100 \ + --root ./results/cifar100_resnet18_32x32_base_e100_lr0.1_default \ + --postprocessor relation \ + --save-score --save-csv diff --git a/OpenOOD/scripts/ood/relation/cifar10_test_ood_relation.sh b/OpenOOD/scripts/ood/relation/cifar10_test_ood_relation.sh new file mode 100644 index 0000000000000000000000000000000000000000..c5613c22fee4e3448fd5a5d800787a5cd7f4dc32 --- /dev/null +++ b/OpenOOD/scripts/ood/relation/cifar10_test_ood_relation.sh @@ -0,0 +1,35 @@ +#!/bin/bash +# sh scripts/ood/relation/cifar10_test_ood_relation.sh + +# GPU=1 +# CPU=1 +# node=36 +# jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +#srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +#--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +#--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ + +python main.py \ + --config configs/datasets/cifar10/cifar10.yml \ + configs/datasets/cifar10/cifar10_ood.yml \ + configs/networks/resnet18_32x32.yml \ + configs/pipelines/test/test_ood.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/postprocessors/relation.yml \ + --num_workers 8 \ + --network.checkpoint 'results/cifar10_resnet18_32x32_base_e100_lr0.1_default/s0/best.ckpt' \ + --mark 0 + +############################################ +# alternatively, we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood.py +# especially if you want to get results from +# multiple runs +python scripts/eval_ood.py \ + --id-data cifar10 \ + --root ./results/cifar10_resnet18_32x32_base_e100_lr0.1_default \ + --postprocessor relation \ + --save-score --save-csv diff --git a/OpenOOD/scripts/ood/relation/imagenet200_test_ood_relation.sh b/OpenOOD/scripts/ood/relation/imagenet200_test_ood_relation.sh new file mode 100644 index 0000000000000000000000000000000000000000..dc241648e9c04fe5b9abd51b2bb9718b232ed237 --- /dev/null +++ b/OpenOOD/scripts/ood/relation/imagenet200_test_ood_relation.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# sh scripts/ood/ebo/imagenet200_test_ood_relation.sh + +############################################ +# alternatively, we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood.py +# especially if you want to get results from +# multiple runs + +# ood +python scripts/eval_ood.py \ + --id-data imagenet200 \ + --root ./results/imagenet200_resnet18_224x224_base_e90_lr0.1_default \ + --postprocessor relation \ + --save-score --save-csv #--fsood + +# full-spectrum ood +python scripts/eval_ood.py \ + --id-data imagenet200 \ + --root ./results/imagenet200_resnet18_224x224_base_e90_lr0.1_default \ + --postprocessor relation \ + --save-score --save-csv --fsood diff --git a/OpenOOD/scripts/ood/relation/imagenet_test_ood_relation.sh b/OpenOOD/scripts/ood/relation/imagenet_test_ood_relation.sh new file mode 100644 index 0000000000000000000000000000000000000000..824d58763369aac5f4bf20c3d939af46d8bb669f --- /dev/null +++ b/OpenOOD/scripts/ood/relation/imagenet_test_ood_relation.sh @@ -0,0 +1,47 @@ +#!/bin/bash +# sh scripts/ood/relation/imagenet_test_ood_relation.sh + +GPU=1 +CPU=1 +node=37 +jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ +python main.py \ + --config configs/datasets/imagenet/imagenet.yml \ + configs/datasets/imagenet/imagenet_ood.yml \ + configs/networks/resnet50.yml \ + configs/pipelines/test/test_ood.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/postprocessors/relation.yml \ + --num_workers 4 \ + --ood_dataset.image_size 256 \ + --dataset.test.batch_size 256 \ + --dataset.val.batch_size 256 \ + --network.pretrained True \ + --network.checkpoint 'results/pretrained_weights/resnet50_imagenet1k_v1.pth' \ + --merge_option merge + +############################################ +# we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood_imagenet.py + +# available architectures: +# resnet50, swin-t, vit-b-16 +# ood +python scripts/eval_ood_imagenet.py \ + --tvs-pretrained \ + --arch resnet50 \ + --postprocessor relation \ + --save-score --save-csv #--fsood + +# full-spectrum ood +python scripts/eval_ood_imagenet.py \ + --tvs-pretrained \ + --arch resnet50 \ + --postprocessor relation \ + --save-score --save-csv --fsood diff --git a/OpenOOD/scripts/ood/relation/mnist_test_ood_relation.sh b/OpenOOD/scripts/ood/relation/mnist_test_ood_relation.sh new file mode 100644 index 0000000000000000000000000000000000000000..965e040b0e7d70df752e043edf99c873fbfb4d88 --- /dev/null +++ b/OpenOOD/scripts/ood/relation/mnist_test_ood_relation.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# sh scripts/ood/relation/mnist_test_ood_relation.sh + +# GPU=1 +# CPU=1 +# node=36 +# jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +#srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +#--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +#--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ + +python main.py \ +--config configs/datasets/mnist/mnist.yml \ +configs/datasets/mnist/mnist_ood.yml \ +configs/networks/lenet.yml \ +configs/pipelines/test/test_ood.yml \ +configs/preprocessors/base_preprocessor.yml \ +configs/postprocessors/relation.yml \ +--num_workers 8 \ +--network.checkpoint 'results/checkpoints/mnist_lenet_acc99.30.ckpt' \ +--mark 0 diff --git a/OpenOOD/scripts/ood/relation/mnist_test_osr_relation.sh b/OpenOOD/scripts/ood/relation/mnist_test_osr_relation.sh new file mode 100644 index 0000000000000000000000000000000000000000..3b0e64716af1331a80480c1ba92ff5c6a4972de4 --- /dev/null +++ b/OpenOOD/scripts/ood/relation/mnist_test_osr_relation.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# sh scripts/ood/relation/mnist_test_osr_relation.sh + +# GPU=1 +# CPU=1 +# node=36 +# jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +#srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +#--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +#--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ + +python main.py \ +--config configs/datasets/osr_mnist6/mnist6_seed1.yml \ +configs/datasets/osr_mnist6/mnist6_seed1_ood.yml \ +configs/networks/lenet.yml \ +configs/pipelines/test/test_osr.yml \ +configs/preprocessors/base_preprocessor.yml \ +configs/postprocessors/relation.yml \ +--num_workers 8 \ +--network.checkpoint 'results/checkpoints/osr/mnist6_seed1.ckpt' \ +--mark 0 diff --git a/OpenOOD/scripts/ood/relation/sweep_osr.py b/OpenOOD/scripts/ood/relation/sweep_osr.py new file mode 100644 index 0000000000000000000000000000000000000000..94ec1d8d592be600e30fdd0ac729cf66f9a94177 --- /dev/null +++ b/OpenOOD/scripts/ood/relation/sweep_osr.py @@ -0,0 +1,39 @@ +# python scripts/ood/relation/sweep_osr.py +import os + +config = [ + [ + 'osr_cifar6/cifar6_seed1.yml', 'osr_cifar6/cifar6_seed1_ood.yml', 'resnet18_32x32', + 'results/checkpoints/osr/cifar6_seed1.ckpt' + ], + [ + 'osr_cifar50/cifar50_seed1.yml', 'osr_cifar50/cifar50_seed1_ood.yml', 'resnet18_32x32', + 'results/checkpoints/osr/cifar50_seed1.ckpt' + ], + [ + 'osr_tin20/tin20_seed1.yml', 'osr_tin20/tin20_seed1_ood.yml', 'resnet18_64x64', + 'results/checkpoints/osr/tin20_seed1.ckpt' + ], + [ + 'osr_mnist6/mnist6_seed1.yml', 'osr_mnist6/mnist6_seed1_ood.yml', 'lenet', + 'results/checkpoints/osr/mnist6_seed1.ckpt' + ], +] + +for [dataset, ood_dataset, network, pth] in config: + command = (f"PYTHONPATH='.':$PYTHONPATH \ + srun -p dsta --mpi=pmi2 --gres=gpu:1 -n1 \ + --cpus-per-task=1 --ntasks-per-node=1 \ + --kill-on-bad-exit=1 --job-name=openood \ + python main.py \ + --config configs/datasets/{dataset} \ + configs/datasets/{ood_dataset} \ + configs/networks/{network}.yml \ + configs/pipelines/test/test_osr.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/postprocessors/relation.yml \ + --network.pretrained True \ + --network.checkpoint {pth} \ + --num_workers 8 \ + --merge_option merge &") + os.system(command) diff --git a/OpenOOD/scripts/ood/residual/cifar100_test_ood_residual.sh b/OpenOOD/scripts/ood/residual/cifar100_test_ood_residual.sh new file mode 100644 index 0000000000000000000000000000000000000000..c0a4291ac05f1b9e9ab276dc3a0835bb4a1d926b --- /dev/null +++ b/OpenOOD/scripts/ood/residual/cifar100_test_ood_residual.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# sh scripts/ood/residual/cifar100_test_ood_residual.sh + +# GPU=1 +# CPU=1 +# node=73 +# jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +# srun -p mediasuper -x SZ-IDC1-10-112-2-17 --gres=gpu:${GPU} \ +# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +# --kill-on-bad-exit=1 --job-name=${jobname} \ + +python main.py \ +--config configs/datasets/cifar100/cifar100.yml \ +configs/datasets/cifar100/cifar100_ood.yml \ +configs/networks/resnet18_32x32.yml \ +configs/pipelines/test/test_ood.yml \ +configs/preprocessors/base_preprocessor.yml \ +configs/postprocessors/residual.yml \ +--num_workers 8 \ +--network.checkpoint 'results/cifar100_resnet18_32x32_base_e100_lr0.1/best.ckpt' \ +--mark 0 diff --git a/OpenOOD/scripts/ood/residual/cifar10_test_ood_residual.sh b/OpenOOD/scripts/ood/residual/cifar10_test_ood_residual.sh new file mode 100644 index 0000000000000000000000000000000000000000..38b795bc00722f7d58cb25c3d2273a5167e8dc91 --- /dev/null +++ b/OpenOOD/scripts/ood/residual/cifar10_test_ood_residual.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# sh scripts/ood/residual/cifar10_test_ood_residual.sh + +# GPU=1 +# CPU=1 +# node=73 +# jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +# srun -p mediasuper -x SZ-IDC1-10-112-2-17 --gres=gpu:${GPU} \ +# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +# --kill-on-bad-exit=1 --job-name=${jobname} \ + +python main.py \ +--config configs/datasets/cifar10/cifar10.yml \ +configs/datasets/cifar10/cifar10_ood.yml \ +configs/networks/resnet18_32x32.yml \ +configs/pipelines/test/test_ood.yml \ +configs/preprocessors/base_preprocessor.yml \ +configs/postprocessors/residual.yml \ +--num_workers 8 \ +--network.checkpoint 'results/checkpoints/cifar10_res18_acc94.30.ckpt' \ +--mark 0 diff --git a/OpenOOD/scripts/ood/residual/imagenet_test_ood_residual.sh b/OpenOOD/scripts/ood/residual/imagenet_test_ood_residual.sh new file mode 100644 index 0000000000000000000000000000000000000000..a50ed00818727cf82050746ddd441152e81d3dc5 --- /dev/null +++ b/OpenOOD/scripts/ood/residual/imagenet_test_ood_residual.sh @@ -0,0 +1,21 @@ +#!/bin/bash +# sh scripts/ood/residual/imagenet_test_ood_residual.sh + +GPU=1 +CPU=1 +node=73 +jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +srun -p mediasuper -x SZ-IDC1-10-112-2-17 --gres=gpu:${GPU} \ +--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +--kill-on-bad-exit=1 --job-name=${jobname} \ +python main.py \ +--config configs/datasets/imagenet/imagenet.yml \ +configs/datasets/imagenet/imagenet_ood.yml \ +configs/networks/vit.yml \ +configs/pipelines/test/test_ood.yml \ +configs/postprocessors/residual.yml \ +--num_workers 8 \ +--network.checkpoint ./checkpoints/vit-base-p16_in21k-pre-3rdparty_ft-64xb64_in1k-384_20210928-98e8652b.pth \ +--mark 0 diff --git a/OpenOOD/scripts/ood/rmds/cifar100_test_ood_rmds.sh b/OpenOOD/scripts/ood/rmds/cifar100_test_ood_rmds.sh new file mode 100644 index 0000000000000000000000000000000000000000..19eefed1a5764cb9452dfa3cd771a376f49b30b6 --- /dev/null +++ b/OpenOOD/scripts/ood/rmds/cifar100_test_ood_rmds.sh @@ -0,0 +1,33 @@ +#!/bin/bash +# sh scripts/ood/rmds/cifar100_test_ood_rmds.sh + +# GPU=1 +# CPU=1 +# node=73 +# jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +# --kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ + +python main.py \ + --config configs/datasets/cifar100/cifar100.yml \ + configs/datasets/cifar100/cifar100_ood.yml \ + configs/networks/resnet18_32x32.yml \ + configs/pipelines/test/test_ood.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/postprocessors/rmds.yml \ + --network.checkpoint 'results/cifar100_resnet18_32x32_base_e100_lr0.1_default/s0/best.ckpt' + +############################################ +# alternatively, we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood.py +# especially if you want to get results from +# multiple runs +python scripts/eval_ood.py \ + --id-data cifar100 \ + --root ./results/cifar100_resnet18_32x32_base_e100_lr0.1_default \ + --postprocessor rmds \ + --save-score --save-csv diff --git a/OpenOOD/scripts/ood/rmds/cifar10_test_ood_rmds.sh b/OpenOOD/scripts/ood/rmds/cifar10_test_ood_rmds.sh new file mode 100644 index 0000000000000000000000000000000000000000..995e26cae239cba254351cf9991e9525fea23993 --- /dev/null +++ b/OpenOOD/scripts/ood/rmds/cifar10_test_ood_rmds.sh @@ -0,0 +1,35 @@ +#!/bin/bash +# sh scripts/ood/rmds/cifar10_test_ood_rmds.sh + +# GPU=1 +# CPU=1 +# node=73 +# jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +# --kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ + +python main.py \ + --config configs/datasets/cifar10/cifar10.yml \ + configs/datasets/cifar10/cifar10_ood.yml \ + configs/networks/resnet18_32x32.yml \ + configs/pipelines/test/test_ood.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/postprocessors/rmds.yml \ + --num_workers 8 \ + --network.checkpoint 'results/cifar10_resnet18_32x32_base_e100_lr0.1_default/s0/best.ckpt' \ + --mark 1 + +############################################ +# alternatively, we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood.py +# especially if you want to get results from +# multiple runs +python scripts/eval_ood.py \ + --id-data cifar10 \ + --root ./results/cifar10_resnet18_32x32_base_e100_lr0.1_default \ + --postprocessor rmds \ + --save-score --save-csv diff --git a/OpenOOD/scripts/ood/rmds/imagenet200_test_ood_rmds.sh b/OpenOOD/scripts/ood/rmds/imagenet200_test_ood_rmds.sh new file mode 100644 index 0000000000000000000000000000000000000000..0aec0265886be7b723b02ef8d019154003295ac0 --- /dev/null +++ b/OpenOOD/scripts/ood/rmds/imagenet200_test_ood_rmds.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# sh scripts/ood/rmds/imagenet200_test_ood_rmds.sh + +############################################ +# alternatively, we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood.py +# especially if you want to get results from +# multiple runs + +# ood +python scripts/eval_ood.py \ + --id-data imagenet200 \ + --root ./results/imagenet200_resnet18_224x224_base_e90_lr0.1_default \ + --postprocessor rmds \ + --save-score --save-csv #--fsood + +# full-spectrum ood +python scripts/eval_ood.py \ + --id-data imagenet200 \ + --root ./results/imagenet200_resnet18_224x224_base_e90_lr0.1_default \ + --postprocessor rmds \ + --save-score --save-csv --fsood diff --git a/OpenOOD/scripts/ood/rmds/imagenet_test_ood_rmds.sh b/OpenOOD/scripts/ood/rmds/imagenet_test_ood_rmds.sh new file mode 100644 index 0000000000000000000000000000000000000000..857cacc63c17481930394c1d3683d31980d1dbd0 --- /dev/null +++ b/OpenOOD/scripts/ood/rmds/imagenet_test_ood_rmds.sh @@ -0,0 +1,47 @@ +#!/bin/bash +# sh scripts/ood/rmds/imagenet_test_ood_rmds.sh + +GPU=1 +CPU=1 +node=63 +jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ +python main.py \ + --config configs/datasets/imagenet/imagenet.yml \ + configs/datasets/imagenet/imagenet_ood.yml \ + configs/networks/resnet50.yml \ + configs/pipelines/test/test_ood.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/postprocessors/rmds.yml \ + --num_workers 4 \ + --ood_dataset.image_size 256 \ + --dataset.test.batch_size 256 \ + --dataset.val.batch_size 256 \ + --network.pretrained True \ + --network.checkpoint 'results/pretrained_weights/resnet50_imagenet1k_v1.pth' \ + --merge_option merge + +############################################ +# we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood_imagenet.py + +# available architectures: +# resnet50, swin-t, vit-b-16 +# ood +python scripts/eval_ood_imagenet.py \ + --tvs-pretrained \ + --arch resnet50 \ + --postprocessor rmds \ + --save-score --save-csv #--fsood + +# full-spectrum ood +python scripts/eval_ood_imagenet.py \ + --tvs-pretrained \ + --arch resnet50 \ + --postprocessor rmds \ + --save-score --save-csv --fsood diff --git a/OpenOOD/scripts/ood/rotpred/cifar100_test_rotpred.sh b/OpenOOD/scripts/ood/rotpred/cifar100_test_rotpred.sh new file mode 100644 index 0000000000000000000000000000000000000000..b7faa66e9820382a0dc15f172aa643e4927c8540 --- /dev/null +++ b/OpenOOD/scripts/ood/rotpred/cifar100_test_rotpred.sh @@ -0,0 +1,14 @@ +#!/bin/bash +# sh scripts/ood/rotpred/cifar100_test_rotpred.sh + +############################################ +# alternatively, we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood.py +# especially if you want to get results from +# multiple runs +python scripts/eval_ood.py \ + --id-data cifar100 \ + --root ./results/cifar100_rot_net_rotpred_e100_lr0.1_default \ + --postprocessor rotpred \ + --save-score --save-csv diff --git a/OpenOOD/scripts/ood/rotpred/cifar100_train_rotpred.sh b/OpenOOD/scripts/ood/rotpred/cifar100_train_rotpred.sh new file mode 100644 index 0000000000000000000000000000000000000000..4f0dcf998096fbb888f0a232e90019fa30fcea36 --- /dev/null +++ b/OpenOOD/scripts/ood/rotpred/cifar100_train_rotpred.sh @@ -0,0 +1,10 @@ +#!/bin/bash +# sh scripts/ood/rotpred/cifar100_train_rotpred.sh + +python main.py \ + --config configs/datasets/cifar100/cifar100.yml \ + configs/networks/rot_net.yml \ + configs/pipelines/train/baseline.yml \ + configs/preprocessors/base_preprocessor.yml \ + --trainer.name rotpred \ + --seed 0 diff --git a/OpenOOD/scripts/ood/rotpred/cifar10_test_rotpred.sh b/OpenOOD/scripts/ood/rotpred/cifar10_test_rotpred.sh new file mode 100644 index 0000000000000000000000000000000000000000..f5cb503743c9442571598f1f3b5371a8e4cd4182 --- /dev/null +++ b/OpenOOD/scripts/ood/rotpred/cifar10_test_rotpred.sh @@ -0,0 +1,14 @@ +#!/bin/bash +# sh scripts/ood/rotpred/cifar10_test_rotpred.sh + +############################################ +# alternatively, we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood.py +# especially if you want to get results from +# multiple runs +python scripts/eval_ood.py \ + --id-data cifar10 \ + --root ./results/cifar10_rot_net_rotpred_e100_lr0.1_default \ + --postprocessor rotpred \ + --save-score --save-csv diff --git a/OpenOOD/scripts/ood/rotpred/cifar10_train_rotpred.sh b/OpenOOD/scripts/ood/rotpred/cifar10_train_rotpred.sh new file mode 100644 index 0000000000000000000000000000000000000000..af340c526960748bf6f49d8f367ec91c421a7b89 --- /dev/null +++ b/OpenOOD/scripts/ood/rotpred/cifar10_train_rotpred.sh @@ -0,0 +1,10 @@ +#!/bin/bash +# sh scripts/ood/rotpred/cifar10_train_rotpred.sh + +python main.py \ + --config configs/datasets/cifar10/cifar10.yml \ + configs/networks/rot_net.yml \ + configs/pipelines/train/baseline.yml \ + configs/preprocessors/base_preprocessor.yml \ + --trainer.name rotpred \ + --seed 0 diff --git a/OpenOOD/scripts/ood/rotpred/imagenet200_test_rotpred.sh b/OpenOOD/scripts/ood/rotpred/imagenet200_test_rotpred.sh new file mode 100644 index 0000000000000000000000000000000000000000..e7c20f782285fea1788547ba7127c9ee7a0e2e4b --- /dev/null +++ b/OpenOOD/scripts/ood/rotpred/imagenet200_test_rotpred.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# sh scripts/ood/rotpred/imagenet200_test_rotpred.sh + +############################################ +# alternatively, we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood.py +# especially if you want to get results from +# multiple runs + +# ood +python scripts/eval_ood.py \ + --id-data imagenet200 \ + --root ./results/imagenet200_rot_net_rotpred_e90_lr0.1_default \ + --postprocessor rotpred \ + --save-score --save-csv #--fsood + +# full-spectrum ood +python scripts/eval_ood.py \ + --id-data imagenet200 \ + --root ./results/imagenet200_rot_net_rotpred_e90_lr0.1_default \ + --postprocessor rotpred \ + --save-score --save-csv --fsood diff --git a/OpenOOD/scripts/ood/rotpred/imagenet200_train_rotpred.sh b/OpenOOD/scripts/ood/rotpred/imagenet200_train_rotpred.sh new file mode 100644 index 0000000000000000000000000000000000000000..9f49121833ff32c545e62d6db801238702b501ec --- /dev/null +++ b/OpenOOD/scripts/ood/rotpred/imagenet200_train_rotpred.sh @@ -0,0 +1,15 @@ +#!/bin/bash +# sh scripts/ood/rotpred/imagenet200_train_rotpred.sh + +python main.py \ + --config configs/datasets/imagenet200/imagenet200.yml \ + configs/networks/rot_net.yml \ + configs/pipelines/train/baseline.yml \ + configs/preprocessors/base_preprocessor.yml \ + --network.backbone.name resnet18_224x224 \ + --trainer.name rotpred \ + --optimizer.num_epochs 90 \ + --dataset.train.batch_size 128 \ + --num_gpus 2 --num_workers 16 \ + --merge_option merge \ + --seed 0 diff --git a/OpenOOD/scripts/ood/rotpred/imagenet_test_rotpred.sh b/OpenOOD/scripts/ood/rotpred/imagenet_test_rotpred.sh new file mode 100644 index 0000000000000000000000000000000000000000..4159e58f121126ffa6081c9b9660603dbdb75a65 --- /dev/null +++ b/OpenOOD/scripts/ood/rotpred/imagenet_test_rotpred.sh @@ -0,0 +1,24 @@ +#!/bin/bash +# sh scripts/ood/rotpred/imagenet_test_rotpred.sh + +############################################ +# we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood_imagenet.py + +# available architectures: +# resnet50 +# ood +python scripts/eval_ood_imagenet.py \ + --ckpt-path ./results/imagenet_rot_net_rotpred_e30_lr0.001_default/s0/best.ckpt \ + --arch resnet50 \ + --postprocessor rotpred \ + --save-score --save-csv #--fsood + + +# full-spectrum ood +python scripts/eval_ood_imagenet.py \ + --ckpt-path ./results/imagenet_rot_net_rotpred_e30_lr0.001_default/s0/best.ckpt \ + --arch resnet50 \ + --postprocessor rotpred \ + --save-score --save-csv --fsood diff --git a/OpenOOD/scripts/ood/rotpred/imagenet_train_rotpred.sh b/OpenOOD/scripts/ood/rotpred/imagenet_train_rotpred.sh new file mode 100644 index 0000000000000000000000000000000000000000..69f45cba91b253d97352731665c4e90be360389f --- /dev/null +++ b/OpenOOD/scripts/ood/rotpred/imagenet_train_rotpred.sh @@ -0,0 +1,19 @@ +#!/bin/bash +# sh scripts/ood/rotpred/imagenet_train_rotpred.sh + +# batch size is 64 otherwise will run out of GPU memory +python main.py \ + --config configs/datasets/imagenet/imagenet.yml \ + configs/networks/rot_net.yml \ + configs/pipelines/train/baseline.yml \ + configs/preprocessors/base_preprocessor.yml \ + --network.backbone.name resnet50 \ + --network.backbone.pretrained True \ + --network.backbone.checkpoint ./results/pretrained_weights/resnet50_imagenet1k_v1.pth \ + --trainer.name rotpred \ + --optimizer.lr 0.001 \ + --optimizer.num_epochs 30 \ + --dataset.train.batch_size 64 \ + --num_gpus 2 --num_workers 16 \ + --merge_option merge \ + --seed 0 diff --git a/OpenOOD/scripts/ood/scale/cifar100_test_ood_scale.sh b/OpenOOD/scripts/ood/scale/cifar100_test_ood_scale.sh new file mode 100644 index 0000000000000000000000000000000000000000..2aa3ef3fbb99fd91f5a951f282b4fe3407fcdf64 --- /dev/null +++ b/OpenOOD/scripts/ood/scale/cifar100_test_ood_scale.sh @@ -0,0 +1,33 @@ +#!/bin/bash +# sh scripts/ood/scale/cifar100_test_ood_scale.sh + +# GPU=1 +# CPU=1 +# node=73 +# jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +# --kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ + +python main.py \ + --config configs/datasets/cifar100/cifar100.yml \ + configs/datasets/cifar100/cifar100_ood.yml \ + configs/networks/resnet18_32x32.yml \ + configs/pipelines/test/test_ood.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/postprocessors/scale.yml \ + --network.checkpoint 'results/cifar100_resnet18_32x32_base_e100_lr0.1_default/s0/best.ckpt' + +############################################ +# alternatively, we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood.py +# especially if you want to get results from +# multiple runs +python scripts/eval_ood.py \ + --id-data cifar100 \ + --root ./results/cifar100_resnet18_32x32_base_e100_lr0.1_default \ + --postprocessor scale \ + --save-score --save-csv diff --git a/OpenOOD/scripts/ood/scale/cifar10_test_ood_scale.sh b/OpenOOD/scripts/ood/scale/cifar10_test_ood_scale.sh new file mode 100644 index 0000000000000000000000000000000000000000..066d09da573a7e7859b5c067ff48714ffb779e2b --- /dev/null +++ b/OpenOOD/scripts/ood/scale/cifar10_test_ood_scale.sh @@ -0,0 +1,35 @@ +#!/bin/bash +# sh scripts/ood/scale/cifar10_test_ood_scale.sh + +# GPU=1 +# CPU=1 +# node=73 +# jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +# --kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ + +python main.py \ + --config configs/datasets/cifar10/cifar10.yml \ + configs/datasets/cifar10/cifar10_ood.yml \ + configs/networks/resnet18_32x32.yml \ + configs/pipelines/test/test_ood.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/postprocessors/scale.yml \ + --num_workers 8 \ + --network.checkpoint 'results/cifar10_resnet18_32x32_base_e100_lr0.1_default/s0/best.ckpt' \ + --mark 1 + +############################################ +# alternatively, we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood.py +# especially if you want to get results from +# multiple runs +python scripts/eval_ood.py \ + --id-data cifar10 \ + --root ./results/cifar10_resnet18_32x32_base_e100_lr0.1_default \ + --postprocessor scale \ + --save-score --save-csv diff --git a/OpenOOD/scripts/ood/scale/imagenet200_test_ood_scale.sh b/OpenOOD/scripts/ood/scale/imagenet200_test_ood_scale.sh new file mode 100644 index 0000000000000000000000000000000000000000..a0e78c7abc170af0d765d0a80fde5e1a0f0db3d6 --- /dev/null +++ b/OpenOOD/scripts/ood/scale/imagenet200_test_ood_scale.sh @@ -0,0 +1,23 @@ +#!/bin/bscale +# sh scripts/ood/scale/imagenet200_test_ood_scale.sh + +############################################ +# alternatively, we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood.py +# especially if you want to get results from +# multiple runs + +# ood +python scripts/eval_ood.py \ + --id-data imagenet200 \ + --root ./results/imagenet200_resnet18_224x224_base_e90_lr0.1_default \ + --postprocessor scale \ + --save-score --save-csv #--fsood + +# full-spectrum ood +python scripts/eval_ood.py \ + --id-data imagenet200 \ + --root ./results/imagenet200_resnet18_224x224_base_e90_lr0.1_default \ + --postprocessor scale \ + --save-score --save-csv --fsood diff --git a/OpenOOD/scripts/ood/scale/imagenet_test_ood_scale.sh b/OpenOOD/scripts/ood/scale/imagenet_test_ood_scale.sh new file mode 100644 index 0000000000000000000000000000000000000000..4a9f7621b88e64b21fe45ad02a0cc6d0c270f491 --- /dev/null +++ b/OpenOOD/scripts/ood/scale/imagenet_test_ood_scale.sh @@ -0,0 +1,47 @@ +#!/bin/bash +# sh scripts/ood/scale/imagenet_test_ood_scale.sh + +GPU=1 +CPU=1 +node=63 +jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ +python main.py \ + --config configs/datasets/imagenet/imagenet.yml \ + configs/datasets/imagenet/imagenet_ood.yml \ + configs/networks/resnet50.yml \ + configs/pipelines/test/test_ood.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/postprocessors/scale.yml \ + --num_workers 4 \ + --ood_dataset.image_size 256 \ + --dataset.test.batch_size 256 \ + --dataset.val.batch_size 256 \ + --network.pretrained True \ + --network.checkpoint 'results/pretrained_weights/resnet50_imagenet1k_v1.pth' \ + --merge_option merge + +############################################ +# we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood_imagenet.py + +# available architectures: +# resnet50, swin-t, vit-b-16 +# ood +python scripts/eval_ood_imagenet.py \ + --tvs-pretrained \ + --arch resnet50 \ + --postprocessor scale \ + --save-score --save-csv #--fsood + +# full-spectrum ood +python scripts/eval_ood_imagenet.py \ + --tvs-pretrained \ + --arch resnet50 \ + --postprocessor scale \ + --save-score --save-csv --fsood diff --git a/OpenOOD/scripts/ood/sem/cifar100_test_ood_sem.sh b/OpenOOD/scripts/ood/sem/cifar100_test_ood_sem.sh new file mode 100644 index 0000000000000000000000000000000000000000..787b3d8a859895b0f8c3a90065f2aa37a03b8697 --- /dev/null +++ b/OpenOOD/scripts/ood/sem/cifar100_test_ood_sem.sh @@ -0,0 +1,22 @@ +#!/bin/bash +# sh scripts/ood/sem/cifar100_test_ood_sem.sh + +# GPU=1 +# CPU=1 +# node=73 +# jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +# --kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ +python main.py \ +--config configs/datasets/cifar100/cifar100.yml \ +configs/datasets/cifar100/cifar100_ood.yml \ +configs/networks/resnet18_32x32.yml \ +configs/pipelines/test/test_ood.yml \ +configs/preprocessors/base_preprocessor.yml \ +configs/postprocessors/gmm.yml \ +--num_workers 8 \ +--network.checkpoint 'results/cifar100_resnet18_32x32_sae_e100_lr0.05/best.ckpt' \ +--mark 0 diff --git a/OpenOOD/scripts/ood/sem/cifar100_train_sem.sh b/OpenOOD/scripts/ood/sem/cifar100_train_sem.sh new file mode 100644 index 0000000000000000000000000000000000000000..9165131428aebb21371ac085698e16171bf58d1f --- /dev/null +++ b/OpenOOD/scripts/ood/sem/cifar100_train_sem.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# sh scripts/ood/sem/cifar100_train_sem.sh + +#GPU=1 +#CPU=1 +#node=79 +#jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +#srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +#--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +#--kill-on-bad-exit=1 --job-name=${jobname} \ +#-w SG-IDC1-10-51-2-${node} \ +python main.py \ +--config configs/datasets/cifar100/cifar100.yml \ +configs/datasets/cifar100/cifar100_ood.yml \ +configs/networks/resnet18_32x32.yml \ +configs/preprocessors/base_preprocessor.yml \ +configs/pipelines/train/train_sem.yml \ +--optimizer.num_epochs 100 \ +--network.pretrained False \ +--network.checkpoint ./results/mnist_0408_3/mnist_lenet_base_e100_lr0.1/best_epoch77_acc0.9940.ckpt \ +--num_workers 8 \ No newline at end of file diff --git a/OpenOOD/scripts/ood/sem/cifar10_test_ood_sem.sh b/OpenOOD/scripts/ood/sem/cifar10_test_ood_sem.sh new file mode 100644 index 0000000000000000000000000000000000000000..d7018e83fbd02100e2c73669cc9e50d2facbab21 --- /dev/null +++ b/OpenOOD/scripts/ood/sem/cifar10_test_ood_sem.sh @@ -0,0 +1,22 @@ +#!/bin/bash +# sh scripts/ood/sem/cifar10_test_ood_sem.sh + +# GPU=1 +# CPU=1 +# node=73 +# jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +# --kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ +python main.py \ +--config configs/datasets/cifar10/cifar10.yml \ +configs/datasets/cifar10/cifar10_ood.yml \ +configs/networks/resnet18_32x32.yml \ +configs/pipelines/test/test_ood.yml \ +configs/preprocessors/base_preprocessor.yml \ +configs/postprocessors/gmm.yml \ +--num_workers 8 \ +--network.checkpoint 'results/checkpoints/cifar10_res18_acc94.30.ckpt' \ +--mark no_train diff --git a/OpenOOD/scripts/ood/sem/cifar10_train_sem.sh b/OpenOOD/scripts/ood/sem/cifar10_train_sem.sh new file mode 100644 index 0000000000000000000000000000000000000000..5c6ed8c3456b2bc49eb187b1b6057f9208222ebf --- /dev/null +++ b/OpenOOD/scripts/ood/sem/cifar10_train_sem.sh @@ -0,0 +1,22 @@ +#!/bin/bash +# sh scripts/ood/sem/cifar10_train_sem.sh + +# GPU=1 +# CPU=1 +# node=79 +# jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +# --kill-on-bad-exit=1 --job-name=${jobname} \ +# -w SG-IDC1-10-51-2-${node} \ + +python main.py \ +--config configs/datasets/cifar10/cifar10.yml \ +configs/datasets/cifar10/cifar10_ood.yml \ +configs/networks/resnet18_32x32.yml \ +configs/pipelines/train/train_sem.yml \ +configs/preprocessors/base_preprocessor.yml \ +--num_workers 8 \ +--network.checkpoint 'results/checkpoints/cifar10_res18_acc94.30.ckpt' diff --git a/OpenOOD/scripts/ood/sem/imagenet_test_ood_sem.sh b/OpenOOD/scripts/ood/sem/imagenet_test_ood_sem.sh new file mode 100644 index 0000000000000000000000000000000000000000..5e381fab9331bca793491869c846b5da80f1bd5d --- /dev/null +++ b/OpenOOD/scripts/ood/sem/imagenet_test_ood_sem.sh @@ -0,0 +1,26 @@ +#!/bin/bash +# sh scripts/ood/sem/imagenet_test_ood_sem.sh + +GPU=1 +CPU=1 +node=76 +jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ +python main.py \ +--config configs/datasets/imagenet/imagenet.yml \ +configs/datasets/imagenet/imagenet_ood.yml \ +configs/networks/resnet50.yml \ +configs/pipelines/test/test_ood.yml \ +configs/preprocessors/base_preprocessor.yml \ +configs/postprocessors/gmm.yml \ +--num_workers 4 \ +--ood_dataset.image_size 256 \ +--dataset.test.batch_size 256 \ +--dataset.val.batch_size 256 \ +--network.pretrained True \ +--network.checkpoint 'results/checkpoints/imagenet_res50_acc76.10.pth' \ +--merge_option merge diff --git a/OpenOOD/scripts/ood/sem/mnist_test_ood_sem.sh b/OpenOOD/scripts/ood/sem/mnist_test_ood_sem.sh new file mode 100644 index 0000000000000000000000000000000000000000..77bd12723354594400e0f63b7c1e9de864708aa9 --- /dev/null +++ b/OpenOOD/scripts/ood/sem/mnist_test_ood_sem.sh @@ -0,0 +1,22 @@ +#!/bin/bash +# sh scripts/ood/sem/mnist_test_ood_sem.sh + +# GPU=1 +# CPU=1 +# node=73 +# jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +# --kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ +python main.py \ +--config configs/datasets/mnist/mnist.yml \ +configs/datasets/mnist/mnist_ood.yml \ +configs/networks/lenet.yml \ +configs/pipelines/test/test_ood.yml \ +configs/preprocessors/base_preprocessor.yml \ +configs/postprocessors/gmm.yml \ +--num_workers 8 \ +--network.checkpoint 'results/checkpoints/mnist_lenet_acc99.30.ckpt' \ +--mark no_train diff --git a/OpenOOD/scripts/ood/sem/sweep_osr.py b/OpenOOD/scripts/ood/sem/sweep_osr.py new file mode 100644 index 0000000000000000000000000000000000000000..d929b88a527fa297702b89fc76f165efbef505b0 --- /dev/null +++ b/OpenOOD/scripts/ood/sem/sweep_osr.py @@ -0,0 +1,30 @@ +# python scripts/ood/sem/sweep_osr.py +import os + +config = [ + # ['osr_cifar6/cifar6_seed1.yml', 'osr_cifar6/cifar6_seed1_ood.yml', 'resnet18_32x32', 'results/checkpoints/osr/cifar6_seed1.ckpt'], + [ + 'osr_cifar50/cifar50_seed1.yml', 'osr_cifar50/cifar50_seed1_ood.yml', + 'resnet18_32x32', 'results/checkpoints/osr/cifar50_seed1.ckpt' + ], + # ['osr_tin20/tin20_seed1.yml', 'osr_tin20/tin20_seed1_ood.yml', 'resnet18_64x64', 'results/checkpoints/osr/tin20_seed1.ckpt'], + # ['osr_mnist6/mnist6_seed1.yml', 'osr_mnist6/mnist6_seed1_ood.yml', 'lenet', 'results/checkpoints/osr/mnist6_seed1.ckpt'], +] + +for [dataset, ood_dataset, network, pth] in config: + command = (f"PYTHONPATH='.':$PYTHONPATH \ + srun -p dsta --mpi=pmi2 --gres=gpu:1 -n1 \ + --cpus-per-task=1 --ntasks-per-node=1 \ + --kill-on-bad-exit=1 --job-name=openood \ + python main.py \ + --config configs/datasets/{dataset} \ + configs/datasets/{ood_dataset} \ + configs/networks/{network}.yml \ + configs/pipelines/test/test_osr.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/postprocessors/gmm.yml \ + --network.pretrained True \ + --network.checkpoint {pth} \ + --num_workers 8 \ + --merge_option merge &") + os.system(command) diff --git a/OpenOOD/scripts/ood/she/cifar100_test_ood_she.sh b/OpenOOD/scripts/ood/she/cifar100_test_ood_she.sh new file mode 100644 index 0000000000000000000000000000000000000000..b974b3ab88ec6785e57571ee50a73e0f9549600a --- /dev/null +++ b/OpenOOD/scripts/ood/she/cifar100_test_ood_she.sh @@ -0,0 +1,33 @@ +#!/bin/bash +# sh scripts/ood/she/cifar100_test_ood_she.sh + +# GPU=1 +# CPU=1 +# node=73 +# jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +# --kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ + +python main.py \ + --config configs/datasets/cifar100/cifar100.yml \ + configs/datasets/cifar100/cifar100_ood.yml \ + configs/networks/resnet18_32x32.yml \ + configs/pipelines/test/test_ood.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/postprocessors/she.yml \ + --network.checkpoint 'results/cifar100_resnet18_32x32_base_e100_lr0.1_default/s0/best.ckpt' + +############################################ +# alternatively, we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood.py +# especially if you want to get results from +# multiple runs +python scripts/eval_ood.py \ + --id-data cifar100 \ + --root ./results/cifar100_resnet18_32x32_base_e100_lr0.1_default \ + --postprocessor she \ + --save-score --save-csv diff --git a/OpenOOD/scripts/ood/she/cifar10_test_ood_she.sh b/OpenOOD/scripts/ood/she/cifar10_test_ood_she.sh new file mode 100644 index 0000000000000000000000000000000000000000..db2715ba03f087bb61209a911233e92a15faa66e --- /dev/null +++ b/OpenOOD/scripts/ood/she/cifar10_test_ood_she.sh @@ -0,0 +1,35 @@ +#!/bin/bash +# sh scripts/ood/she/cifar10_test_ood_she.sh + +# GPU=1 +# CPU=1 +# node=73 +# jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +# --kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ + +python main.py \ + --config configs/datasets/cifar10/cifar10.yml \ + configs/datasets/cifar10/cifar10_ood.yml \ + configs/networks/resnet18_32x32.yml \ + configs/pipelines/test/test_ood.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/postprocessors/she.yml \ + --num_workers 8 \ + --network.checkpoint 'results/cifar10_resnet18_32x32_base_e100_lr0.1_default/s0/best.ckpt' \ + --mark 1 + +############################################ +# alternatively, we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood.py +# especially if you want to get results from +# multiple runs +python scripts/eval_ood.py \ + --id-data cifar10 \ + --root ./results/cifar10_resnet18_32x32_base_e100_lr0.1_default \ + --postprocessor she \ + --save-score --save-csv diff --git a/OpenOOD/scripts/ood/she/imagenet200_test_ood_she.sh b/OpenOOD/scripts/ood/she/imagenet200_test_ood_she.sh new file mode 100644 index 0000000000000000000000000000000000000000..df0998b6814604fd1fb4726c467bf3ae3b623b9a --- /dev/null +++ b/OpenOOD/scripts/ood/she/imagenet200_test_ood_she.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# sh scripts/ood/she/imagenet200_test_ood_she.sh + +############################################ +# alternatively, we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood.py +# especially if you want to get results from +# multiple runs + +# ood +python scripts/eval_ood.py \ + --id-data imagenet200 \ + --root ./results/imagenet200_resnet18_224x224_base_e90_lr0.1_default \ + --postprocessor she \ + --save-score --save-csv #--fsood + +# full-spectrum ood +python scripts/eval_ood.py \ + --id-data imagenet200 \ + --root ./results/imagenet200_resnet18_224x224_base_e90_lr0.1_default \ + --postprocessor she \ + --save-score --save-csv --fsood diff --git a/OpenOOD/scripts/ood/she/imagenet_test_ood_she.sh b/OpenOOD/scripts/ood/she/imagenet_test_ood_she.sh new file mode 100644 index 0000000000000000000000000000000000000000..860f2fc8804dfd801dea12ea22ddce038640608f --- /dev/null +++ b/OpenOOD/scripts/ood/she/imagenet_test_ood_she.sh @@ -0,0 +1,47 @@ +#!/bin/bash +# sh scripts/ood/she/imagenet_test_ood_she.sh + +GPU=1 +CPU=1 +node=63 +jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ +python main.py \ + --config configs/datasets/imagenet/imagenet.yml \ + configs/datasets/imagenet/imagenet_ood.yml \ + configs/networks/resnet50.yml \ + configs/pipelines/test/test_ood.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/postprocessors/she.yml \ + --num_workers 4 \ + --ood_dataset.image_size 256 \ + --dataset.test.batch_size 256 \ + --dataset.val.batch_size 256 \ + --network.pretrained True \ + --network.checkpoint 'results/pretrained_weights/resnet50_imagenet1k_v1.pth' \ + --merge_option merge + +############################################ +# we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood_imagenet.py + +# available architectures: +# resnet50, swin-t, vit-b-16 +# ood +python scripts/eval_ood_imagenet.py \ + --tvs-pretrained \ + --arch resnet50 \ + --postprocessor she \ + --save-score --save-csv #--fsood + +# full-spectrum ood +python scripts/eval_ood_imagenet.py \ + --tvs-pretrained \ + --arch resnet50 \ + --postprocessor she \ + --save-score --save-csv --fsood diff --git a/OpenOOD/scripts/ood/ssd/cifar_10_test_ood_ssd.sh b/OpenOOD/scripts/ood/ssd/cifar_10_test_ood_ssd.sh new file mode 100644 index 0000000000000000000000000000000000000000..9bfcbc4aae7330e481bc6853e1364aac06c951ab --- /dev/null +++ b/OpenOOD/scripts/ood/ssd/cifar_10_test_ood_ssd.sh @@ -0,0 +1,24 @@ +#!/bin/bash +# sh scripts/ood/ssd/cifar_10_test_ood_ssd.sh + +# GPU=1 +# CPU=1 +# node=73 +# jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +# --kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ + +python main.py \ +--config configs/datasets/cifar10/cifar10.yml \ +configs/datasets/cifar10/cifar10_ood.yml \ +configs/networks/simclr.yml \ +configs/pipelines/test/test_ood.yml \ +configs/preprocessors/base_preprocessor.yml \ +configs/postprocessors/mds.yml \ +--num_workers 8 \ +--network.checkpoint 'results/checkpoints/ssd/last.pth' \ +--mark 0 \ +--merge_option merge diff --git a/OpenOOD/scripts/ood/t2fnorm/test.sh b/OpenOOD/scripts/ood/t2fnorm/test.sh new file mode 100644 index 0000000000000000000000000000000000000000..2ce6df9462bd8f68cf38e742df82548196030fad --- /dev/null +++ b/OpenOOD/scripts/ood/t2fnorm/test.sh @@ -0,0 +1,50 @@ +#!/bin/bash +# sh scripts/basics/cifar10/test_cifar10.sh +idx=-1 +datasets=("cifar10" "cifar100") +pids=() +counter=0 +for dataset in "${datasets[@]}" +do + networks=("resnet18_32x32") + for network in "${networks[@]}" + do + folders=( $(find ./results/t2fnorm/models/ -maxdepth 1 -type d -name "*${dataset}_${network}*") ) + if [ ${#folders[@]} -ne 5 ]; then + echo "Should contain 5 folders" + echo ${#folders[@]} + #exit 1 + fi + for folder in "${folders[@]}" + do + postprocessors=("base" "ebo" "dice" "gradnorm" "odin") + for postproc in "${postprocessors[@]}" + do + idx=$(( (idx + 1) % 8 )) + CUDA_VISIBLE_DEVICES=$idx python main.py \ + --config configs/datasets/$dataset/$dataset.yml \ + configs/datasets/$dataset/${dataset}_ood.yml \ + configs/networks/$network.yml \ + configs/pipelines/test/test_ood.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/postprocessors/$postproc.yml \ + --num_workers 8 \ + --network.checkpoint "${folder}/best.ckpt" \ + --mark T2FNorm & + pids+=($!) + counter=$((counter+1)) + if [ $counter -eq 8 ]; then + for pid in "${pids[@]}"; do + wait $pid + done + pids=() + counter=0 + fi + done + done + done +done + +for pid in "${pids[@]}"; do + wait $pid +done diff --git a/OpenOOD/scripts/ood/t2fnorm/train.sh b/OpenOOD/scripts/ood/t2fnorm/train.sh new file mode 100644 index 0000000000000000000000000000000000000000..dd028bd30eafa56dc33e68e2a3194ddfb4228148 --- /dev/null +++ b/OpenOOD/scripts/ood/t2fnorm/train.sh @@ -0,0 +1,27 @@ +#!/bin/bash +# sh scripts/basics/cifar10/train_cifar10.sh +idx=-1 +for j in {1..5} +do + datasets=("cifar10" "cifar100") + for dataset in "${datasets[@]}" + do + networks=("resnet18_32x32") + for network in "${networks[@]}" + do + idx=$(( (idx + 1) % 4 )) + CUDA_VISIBLE_DEVICES=$idx python main.py \ + --config configs/datasets/$dataset/$dataset.yml \ + configs/datasets/$dataset/${dataset}_ood.yml \ + configs/networks/$network.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/pipelines/train/train_t2fnorm.yml \ + --network.pretrained False \ + --dataset.image_size 32 \ + --optimizer.num_epochs 100 \ + --num_workers 8 \ + --seed $RANDOM \ + --network.tau 0.1 & + done + done +done diff --git a/OpenOOD/scripts/ood/udg/cifar100_test_udg.sh b/OpenOOD/scripts/ood/udg/cifar100_test_udg.sh new file mode 100644 index 0000000000000000000000000000000000000000..d498c107f35c2efa271b567db03431a8f8df145d --- /dev/null +++ b/OpenOOD/scripts/ood/udg/cifar100_test_udg.sh @@ -0,0 +1,34 @@ +#!/bin/bash +# sh scripts/ood/udg/cifar100_test_udg.sh + +GPU=1 +CPU=1 +node=63 +jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ +python main.py \ + --config configs/datasets/cifar100/cifar100.yml \ + configs/datasets/cifar100/cifar100_ood.yml \ + configs/networks/udg_net.yml \ + configs/pipelines/test/test_ood.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/postprocessors/msp.yml \ + --num_workers 8 \ + --network.checkpoint 'results/cifar100_oe_udg_udg_e100_lr0.1_default/s0/best.ckpt' \ + --mark 0 + +############################################ +# alternatively, we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood.py +# especially if you want to get results from +# multiple runs +python scripts/eval_ood.py \ + --id-data cifar100 \ + --root ./results/cifar100_oe_udg_udg_e100_lr0.1_default \ + --postprocessor msp \ + --save-score --save-csv diff --git a/OpenOOD/scripts/ood/udg/cifar100_train_udg.sh b/OpenOOD/scripts/ood/udg/cifar100_train_udg.sh new file mode 100644 index 0000000000000000000000000000000000000000..44d8bb2f6a55785f7dad45328ac6fe5a838ffd20 --- /dev/null +++ b/OpenOOD/scripts/ood/udg/cifar100_train_udg.sh @@ -0,0 +1,25 @@ +#!/bin/bash +# sh scripts/ood/udg/cifar100_train_udg.sh + +GPU=1 +CPU=1 +node=73 +jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +--kill-on-bad-exit=1 --job-name=${jobname} \ +-w SG-IDC1-10-51-2-${node} \ +python -m pdb -c continue main.py \ +--config configs/datasets/cifar100/cifar100.yml \ + configs/datasets/cifar100/cifar100_oe.yml \ + configs/networks/udg_net.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/pipelines/train/baseline.yml \ + configs/pipelines/train/train_udg.yml \ + --dataset.train.dataset_class UDGDataset \ + --dataset.oe.dataset_class UDGDataset \ + --network.backbone.name resnet18_32x32 \ + --network.pretrained False \ + --seed 0 diff --git a/OpenOOD/scripts/ood/udg/cifar10_test_udg.sh b/OpenOOD/scripts/ood/udg/cifar10_test_udg.sh new file mode 100644 index 0000000000000000000000000000000000000000..b5faebd19a957d9c6bdf6192381489d32aad9f61 --- /dev/null +++ b/OpenOOD/scripts/ood/udg/cifar10_test_udg.sh @@ -0,0 +1,34 @@ +#!/bin/bash +# sh scripts/ood/udg/cifar10_test_udg.sh + +GPU=1 +CPU=1 +node=63 +jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ +python main.py \ + --config configs/datasets/cifar10/cifar10.yml \ + configs/datasets/cifar10/cifar10_ood.yml \ + configs/networks/udg_net.yml \ + configs/pipelines/test/test_ood.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/postprocessors/msp.yml \ + --num_workers 8 \ + --network.checkpoint 'results/cifar10_oe_udg_udg_e100_lr0.1_default/s0/best.ckpt' \ + --mark 0 + +############################################ +# alternatively, we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood.py +# especially if you want to get results from +# multiple runs +python scripts/eval_ood.py \ + --id-data cifar10 \ + --root ./results/cifar10_oe_udg_udg_e100_lr0.1_default \ + --postprocessor msp \ + --save-score --save-csv diff --git a/OpenOOD/scripts/ood/udg/cifar10_train_udg.sh b/OpenOOD/scripts/ood/udg/cifar10_train_udg.sh new file mode 100644 index 0000000000000000000000000000000000000000..de7956fa63b8c0ab1fe1c5c063301c8a1d784baf --- /dev/null +++ b/OpenOOD/scripts/ood/udg/cifar10_train_udg.sh @@ -0,0 +1,24 @@ +#!/bin/bash +# sh scripts/ood/udg/cifar10_train_udg.sh + +GPU=1 +CPU=1 +node=73 +jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +--kill-on-bad-exit=1 --job-name=${jobname} \ +python -m pdb -c continue main.py \ + --config configs/datasets/cifar10/cifar10.yml \ + configs/datasets/cifar10/cifar10_oe.yml \ + configs/networks/udg_net.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/pipelines/train/baseline.yml \ + configs/pipelines/train/train_udg.yml \ + --dataset.train.dataset_class UDGDataset \ + --dataset.oe.dataset_class UDGDataset \ + --network.backbone.name resnet18_32x32 \ + --network.pretrained False \ + --seed 0 diff --git a/OpenOOD/scripts/ood/udg/imagenet200_test_udg.sh b/OpenOOD/scripts/ood/udg/imagenet200_test_udg.sh new file mode 100644 index 0000000000000000000000000000000000000000..ea1e57bbeba993d95981c628d4d322e99ea56dd9 --- /dev/null +++ b/OpenOOD/scripts/ood/udg/imagenet200_test_udg.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# sh scripts/ood/udg/imagenet200_test_udg.sh + +############################################ +# alternatively, we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood.py +# especially if you want to get results from +# multiple runs + +# ood +python scripts/eval_ood.py \ + --id-data imagenet200 \ + --root ./results/imagenet200_oe_udg_udg_e90_lr0.1_default \ + --postprocessor msp \ + --save-score --save-csv #--fsood + +# full-spectrum ood +python scripts/eval_ood.py \ + --id-data imagenet200 \ + --root ./results/imagenet200_oe_udg_udg_e90_lr0.1_default \ + --postprocessor msp \ + --save-score --save-csv --fsood diff --git a/OpenOOD/scripts/ood/udg/imagenet200_train_udg.sh b/OpenOOD/scripts/ood/udg/imagenet200_train_udg.sh new file mode 100644 index 0000000000000000000000000000000000000000..a34676717842885528436b964a35041f38edcd63 --- /dev/null +++ b/OpenOOD/scripts/ood/udg/imagenet200_train_udg.sh @@ -0,0 +1,21 @@ +#!/bin/bash +# sh scripts/ood/udg/imagenet200_train_udg.sh + +# UDG trainer cannot work with multiple GPUs currently +python main.py \ + --config configs/datasets/imagenet200/imagenet200.yml \ + configs/datasets/imagenet200/imagenet200_oe.yml \ + configs/networks/udg_net.yml \ + configs/pipelines/train/baseline.yml \ + configs/pipelines/train/train_udg.yml \ + configs/preprocessors/base_preprocessor.yml \ + --dataset.train.dataset_class UDGDataset \ + --dataset.oe.dataset_class UDGDataset \ + --network.backbone.name resnet18_224x224 \ + --network.pretrained False \ + --optimizer.num_epochs 90 \ + --dataset.train.batch_size 256 \ + --dataset.oe.batch_size 512 \ + --num_gpus 1 --num_workers 16 \ + --merge_option merge \ + --seed 0 diff --git a/OpenOOD/scripts/ood/vim/cifar100_test_ood_vim.sh b/OpenOOD/scripts/ood/vim/cifar100_test_ood_vim.sh new file mode 100644 index 0000000000000000000000000000000000000000..a298823a8a87ad807707fc261d8481ec45565299 --- /dev/null +++ b/OpenOOD/scripts/ood/vim/cifar100_test_ood_vim.sh @@ -0,0 +1,36 @@ +#!/bin/bash +# sh scripts/ood/vim/cifar100_test_ood_vim.sh + +# GPU=1 +# CPU=1 +# node=73 +# jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +# srun -p mediasuper -x SZ-IDC1-10-112-2-17 --gres=gpu:${GPU} \ +# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +# --kill-on-bad-exit=1 --job-name=${jobname} \ + +python main.py \ + --config configs/datasets/cifar100/cifar100.yml \ + configs/datasets/cifar100/cifar100_ood.yml \ + configs/networks/resnet18_32x32.yml \ + configs/pipelines/test/test_ood.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/postprocessors/vim.yml \ + --num_workers 8 \ + --network.checkpoint 'results/cifar100_resnet18_32x32_base_e100_lr0.1_default/s0/best.ckpt' \ + --mark 0 \ + --postprocessor.postprocessor_args.dim 256 + +############################################ +# alternatively, we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood.py +# especially if you want to get results from +# multiple runs +python scripts/eval_ood.py \ + --id-data cifar100 \ + --root ./results/cifar100_resnet18_32x32_base_e100_lr0.1_default \ + --postprocessor vim \ + --save-score --save-csv diff --git a/OpenOOD/scripts/ood/vim/cifar10_test_ood_vim.sh b/OpenOOD/scripts/ood/vim/cifar10_test_ood_vim.sh new file mode 100644 index 0000000000000000000000000000000000000000..986747b2c5c76a04b518a3fb1d318069172de5c8 --- /dev/null +++ b/OpenOOD/scripts/ood/vim/cifar10_test_ood_vim.sh @@ -0,0 +1,36 @@ +#!/bin/bash +# sh scripts/ood/vim/cifar10_test_ood_vim.sh + +# GPU=1 +# CPU=1 +# node=73 +# jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +# srun -p mediasuper -x SZ-IDC1-10-112-2-17 --gres=gpu:${GPU} \ +# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +# --kill-on-bad-exit=1 --job-name=${jobname} \ + +python main.py \ + --config configs/datasets/cifar10/cifar10.yml \ + configs/datasets/cifar10/cifar10_ood.yml \ + configs/networks/resnet18_32x32.yml \ + configs/pipelines/test/test_ood.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/postprocessors/vim.yml \ + --num_workers 8 \ + --network.checkpoint 'results/cifar10_resnet18_32x32_base_e100_lr0.1_default/s0/best.ckpt' \ + --mark 0 \ + --postprocessor.postprocessor_args.dim 256 + +############################################ +# alternatively, we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood.py +# especially if you want to get results from +# multiple runs +python scripts/eval_ood.py \ + --id-data cifar10 \ + --root ./results/cifar10_resnet18_32x32_base_e100_lr0.1_default \ + --postprocessor vim \ + --save-score --save-csv diff --git a/OpenOOD/scripts/ood/vim/imagenet200_test_ood_vim.sh b/OpenOOD/scripts/ood/vim/imagenet200_test_ood_vim.sh new file mode 100644 index 0000000000000000000000000000000000000000..0d0e3d3e681fbff55e6353d49176dafc1a7ea738 --- /dev/null +++ b/OpenOOD/scripts/ood/vim/imagenet200_test_ood_vim.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# sh scripts/ood/vim/imagenet200_test_ood_vim.sh + +############################################ +# alternatively, we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood.py +# especially if you want to get results from +# multiple runs + +# ood +python scripts/eval_ood.py \ + --id-data imagenet200 \ + --root ./results/imagenet200_resnet18_224x224_base_e90_lr0.1_default \ + --postprocessor vim \ + --save-score --save-csv #--fsood + +# full-spectrum ood +python scripts/eval_ood.py \ + --id-data imagenet200 \ + --root ./results/imagenet200_resnet18_224x224_base_e90_lr0.1_default \ + --postprocessor vim \ + --save-score --save-csv --fsood diff --git a/OpenOOD/scripts/ood/vim/imagenet_test_ood_vim.sh b/OpenOOD/scripts/ood/vim/imagenet_test_ood_vim.sh new file mode 100644 index 0000000000000000000000000000000000000000..71188955d8e28c7aed453822f06936cc92c3629f --- /dev/null +++ b/OpenOOD/scripts/ood/vim/imagenet_test_ood_vim.sh @@ -0,0 +1,48 @@ +#!/bin/bash +# sh scripts/ood/vim/imagenet_test_ood_vim.sh + +GPU=1 +CPU=1 +node=73 +jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ +python main.py \ + --config configs/datasets/imagenet/imagenet.yml \ + configs/datasets/imagenet/imagenet_ood.yml \ + configs/networks/resnet50.yml \ + configs/pipelines/test/test_ood.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/postprocessors/vim.yml \ + --num_workers 4 \ + --ood_dataset.image_size 256 \ + --dataset.test.batch_size 256 \ + --dataset.val.batch_size 256 \ + --network.pretrained True \ + --network.checkpoint 'results/pretrained_weights/resnet50_imagenet1k_v1.pth' \ + --postprocessor.postprocessor_args.dim 1000 \ + --merge_option merge + +############################################ +# we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood_imagenet.py + +# available architectures: +# resnet50, swin-t, vit-b-16 +# ood +python scripts/eval_ood_imagenet.py \ + --tvs-pretrained \ + --arch resnet50 \ + --postprocessor vim \ + --save-score --save-csv #--fsood + +# full-spectrum ood +python scripts/eval_ood_imagenet.py \ + --tvs-pretrained \ + --arch resnet50 \ + --postprocessor vim \ + --save-score --save-csv --fsood diff --git a/OpenOOD/scripts/ood/vim/mnist_test_osr_vim.sh b/OpenOOD/scripts/ood/vim/mnist_test_osr_vim.sh new file mode 100644 index 0000000000000000000000000000000000000000..7246170dacb0cec6b5a516cacd755b92e03e897b --- /dev/null +++ b/OpenOOD/scripts/ood/vim/mnist_test_osr_vim.sh @@ -0,0 +1,24 @@ +#!/bin/bash +# sh scripts/ood/vim/mnist_test_osr_vim.sh + +# GPU=1 +# CPU=1 +# node=73 +# jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +# srun -p mediasuper -x SZ-IDC1-10-112-2-17 --gres=gpu:${GPU} \ +# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +# --kill-on-bad-exit=1 --job-name=${jobname} \ + +python main.py \ +--config configs/datasets/osr_mnist6/mnist6_seed1.yml \ +configs/datasets/osr_mnist6/mnist6_seed1_ood.yml \ +configs/networks/lenet.yml \ +configs/pipelines/test/test_osr.yml \ +configs/preprocessors/base_preprocessor.yml \ +configs/postprocessors/vim.yml \ +--num_workers 8 \ +--network.checkpoint 'results/checkpoints/osr/mnist6_seed1.ckpt' \ +--mark 0 \ +--postprocessor.postprocessor_args.dim 42 diff --git a/OpenOOD/scripts/ood/vim/sweep_osr.py b/OpenOOD/scripts/ood/vim/sweep_osr.py new file mode 100644 index 0000000000000000000000000000000000000000..a528073100729727c15f0c8425d7b10826bb209f --- /dev/null +++ b/OpenOOD/scripts/ood/vim/sweep_osr.py @@ -0,0 +1,40 @@ +# python scripts/ood/vim/sweep_osr.py +import os + +config = [ + [ + 'osr_cifar6/cifar6_seed1.yml', 'osr_cifar6/cifar6_seed1_ood.yml', + 'resnet18_32x32', 'results/checkpoints/osr/cifar6_seed1.ckpt' + ], + [ + 'osr_cifar50/cifar50_seed1.yml', 'osr_cifar50/cifar50_seed1_ood.yml', + 'resnet18_32x32', 'results/checkpoints/osr/cifar50_seed1.ckpt' + ], + [ + 'osr_tin20/tin20_seed1.yml', 'osr_tin20/tin20_seed1_ood.yml', + 'resnet18_64x64', 'results/checkpoints/osr/tin20_seed1.ckpt' + ], + [ + 'osr_mnist6/mnist6_seed1.yml', 'osr_mnist6/mnist6_seed1_ood.yml', + 'lenet', 'results/checkpoints/osr/mnist6_seed1.ckpt' + ], +] + +for [dataset, ood_dataset, network, pth] in config: + command = (f"PYTHONPATH='.':$PYTHONPATH \ + srun -p dsta --mpi=pmi2 --gres=gpu:1 -n1 \ + --cpus-per-task=1 --ntasks-per-node=1 \ + --kill-on-bad-exit=1 --job-name=openood \ + python main.py \ + --config configs/datasets/{dataset} \ + configs/datasets/{ood_dataset} \ + configs/networks/{network}.yml \ + configs/pipelines/test/test_osr.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/postprocessors/vim.yml \ + --network.pretrained True \ + --network.checkpoint {pth} \ + --postprocessor.postprocessor_args.dim 128 \ + --num_workers 8 \ + --merge_option merge &") + os.system(command) diff --git a/OpenOOD/scripts/ood/vos/cifar100_test_vos.sh b/OpenOOD/scripts/ood/vos/cifar100_test_vos.sh new file mode 100644 index 0000000000000000000000000000000000000000..e8622dbe0413d27618e6d0bdddb6c5f78106da74 --- /dev/null +++ b/OpenOOD/scripts/ood/vos/cifar100_test_vos.sh @@ -0,0 +1,30 @@ +#!/bin/bash +# sh scripts/ood/vos/cifar100_test_vos.sh + +PYTHONPATH='.':$PYTHONPATH \ +#srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +#--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +#--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ +python main.py \ + --config configs/datasets/cifar100/cifar100.yml \ + configs/datasets/cifar100/cifar100_ood.yml \ + configs/networks/resnet18_32x32.yml \ + configs/pipelines/test/test_ood.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/postprocessors/ebo.yml \ + --num_workers 8 \ + --network.pretrained True \ + --network.checkpoint 'results/cifar100_resnet18_32x32_vos_e100_lr0.1_default/s0/best.ckpt' \ + --mark 0 + +############################################ +# alternatively, we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood.py +# especially if you want to get results from +# multiple runs +python scripts/eval_ood.py \ + --id-data cifar100 \ + --root ./results/cifar100_resnet18_32x32_vos_e100_lr0.1_default \ + --postprocessor ebo \ + --save-score --save-csv diff --git a/OpenOOD/scripts/ood/vos/cifar100_train_vos.sh b/OpenOOD/scripts/ood/vos/cifar100_train_vos.sh new file mode 100644 index 0000000000000000000000000000000000000000..cccf2b3d1c0c30ba390ca1684334cc95b0589d3b --- /dev/null +++ b/OpenOOD/scripts/ood/vos/cifar100_train_vos.sh @@ -0,0 +1,17 @@ +#!/bin/bash +# sh scripts/ood/vos/cifar100_train_vos.sh + +PYTHONPATH='.':$PYTHONPATH \ +#srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +#--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +#--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ +python main.py \ + --config configs/datasets/cifar100/cifar100.yml \ + configs/networks/resnet18_32x32.yml \ + configs/pipelines/train/train_vos.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/postprocessors/ebo.yml \ + --num_workers 8 \ + --optimizer.num_epochs 100 \ + --merge_option merge \ + --seed 0 diff --git a/OpenOOD/scripts/ood/vos/cifar10_test_vos.sh b/OpenOOD/scripts/ood/vos/cifar10_test_vos.sh new file mode 100644 index 0000000000000000000000000000000000000000..9709a2e0424fd1f671e548802fdc150908059f05 --- /dev/null +++ b/OpenOOD/scripts/ood/vos/cifar10_test_vos.sh @@ -0,0 +1,30 @@ +#!/bin/bash +# sh scripts/ood/vos/cifar10_test_vos.sh + +PYTHONPATH='.':$PYTHONPATH \ +#srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +#--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +#--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ +python main.py \ + --config configs/datasets/cifar10/cifar10.yml \ + configs/datasets/cifar10/cifar10_ood.yml \ + configs/networks/resnet18_32x32.yml \ + configs/pipelines/test/test_ood.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/postprocessors/ebo.yml \ + --num_workers 8 \ + --network.pretrained True \ + --network.checkpoint 'results/cifar10_resnet18_32x32_vos_e100_lr0.1_default/s0/best.ckpt' \ + --mark vos + +############################################ +# alternatively, we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood.py +# especially if you want to get results from +# multiple runs +python scripts/eval_ood.py \ + --id-data cifar10 \ + --root ./results/cifar10_resnet18_32x32_vos_e100_lr0.1_default \ + --postprocessor ebo \ + --save-score --save-csv diff --git a/OpenOOD/scripts/ood/vos/cifar10_train_vos.sh b/OpenOOD/scripts/ood/vos/cifar10_train_vos.sh new file mode 100644 index 0000000000000000000000000000000000000000..b81d62df507f1dfc356ecbfc6f8a3a2e4d3f8b98 --- /dev/null +++ b/OpenOOD/scripts/ood/vos/cifar10_train_vos.sh @@ -0,0 +1,17 @@ +#!/bin/bash +# sh scripts/ood/vos/cifar10_train_vos.sh + +PYTHONPATH='.':$PYTHONPATH \ +#srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +#--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +#--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ +python main.py \ + --config configs/datasets/cifar10/cifar10.yml \ + configs/networks/resnet18_32x32.yml \ + configs/pipelines/train/train_vos.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/postprocessors/ebo.yml \ + --num_workers 8 \ + --optimizer.num_epochs 100 \ + --merge_option merge \ + --seed 0 diff --git a/OpenOOD/scripts/ood/vos/imagenet200_test_vos.sh b/OpenOOD/scripts/ood/vos/imagenet200_test_vos.sh new file mode 100644 index 0000000000000000000000000000000000000000..66a06404049e0fb3d690f82dea3fdbfbf840ea31 --- /dev/null +++ b/OpenOOD/scripts/ood/vos/imagenet200_test_vos.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# sh scripts/ood/vos/imagenet200_test_vos.sh + +############################################ +# alternatively, we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood.py +# especially if you want to get results from +# multiple runs + +# ood +python scripts/eval_ood.py \ + --id-data imagenet200 \ + --root ./results/imagenet200_resnet18_224x224_vos_e90_lr0.1_default \ + --postprocessor ebo \ + --save-score --save-csv #--fsood + +# full-spectrum ood +python scripts/eval_ood.py \ + --id-data imagenet200 \ + --root ./results/imagenet200_resnet18_224x224_vos_e90_lr0.1_default \ + --postprocessor ebo \ + --save-score --save-csv --fsood diff --git a/OpenOOD/scripts/ood/vos/imagenet200_train_vos.sh b/OpenOOD/scripts/ood/vos/imagenet200_train_vos.sh new file mode 100644 index 0000000000000000000000000000000000000000..36302b6e6530955d257d889fceff6113ba87fc2e --- /dev/null +++ b/OpenOOD/scripts/ood/vos/imagenet200_train_vos.sh @@ -0,0 +1,14 @@ +#!/bin/bash +# sh scripts/ood/vos/imagenet200_train_vos.sh + +python main.py \ + --config configs/datasets/imagenet200/imagenet200.yml \ + configs/networks/resnet18_224x224.yml \ + configs/pipelines/train/train_vos.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/postprocessors/ebo.yml \ + --optimizer.num_epochs 90 \ + --dataset.train.batch_size 128 \ + --num_gpus 2 --num_workers 16 \ + --merge_option merge \ + --seed 0 diff --git a/OpenOOD/scripts/ood/vos/imagenet_train_vos.sh b/OpenOOD/scripts/ood/vos/imagenet_train_vos.sh new file mode 100644 index 0000000000000000000000000000000000000000..6a96f6da77c61df4150f6d09c2b5b54a15155a4b --- /dev/null +++ b/OpenOOD/scripts/ood/vos/imagenet_train_vos.sh @@ -0,0 +1,19 @@ +#!/bin/bash +# sh scripts/ood/vos/imagenet_train_vos.sh + +# we observed CUDA OOM error on Quadro RTX 6000 24GB GPUs +python main.py \ + --config configs/datasets/imagenet/imagenet.yml \ + configs/networks/resnet50.yml \ + configs/pipelines/train/train_vos.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/postprocessors/ebo.yml \ + --network.pretrained True \ + --network.checkpoint ./results/pretrained_weights/resnet50_imagenet1k_v1.pth \ + --feature_dim 2048 \ + --optimizer.lr 0.001 \ + --optimizer.num_epochs 30 \ + --dataset.train.batch_size 128 \ + --num_gpus 2 --num_workers 16 \ + --merge_option merge \ + --seed ${SEED} diff --git a/OpenOOD/scripts/osr/arpl/2_arpl_test.sh b/OpenOOD/scripts/osr/arpl/2_arpl_test.sh new file mode 100644 index 0000000000000000000000000000000000000000..85c5486b42dfbdf265b6edb5cd8219207923042c --- /dev/null +++ b/OpenOOD/scripts/osr/arpl/2_arpl_test.sh @@ -0,0 +1,20 @@ +#!/bin/bash +# sh scripts/c_ood/0_mnist_test_ood_msp.sh + +# GPU=1 +# CPU=1 +# node=73 +# jobname=openood + +# PYTHONPATH='.':$PYTHONPATH \ +# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +# --kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ +python main.py \ +--config configs/datasets/digits/mnist.yml \ +configs/datasets/digits/mnist_ood.yml \ +configs/networks/arpl_net.yml \ +configs/pipelines/test/test_arpl.yml \ +configs/postprocessors/msp.yml \ +--num_workers 8 \ +--mark 0 diff --git a/OpenOOD/scripts/osr/arpl/2_arpl_train.sh b/OpenOOD/scripts/osr/arpl/2_arpl_train.sh new file mode 100644 index 0000000000000000000000000000000000000000..f415a84ad391dc913afa9341373c9944bb1dadba --- /dev/null +++ b/OpenOOD/scripts/osr/arpl/2_arpl_train.sh @@ -0,0 +1,19 @@ +#!/bin/bash +# sh scripts/0_basics/cifar10_train.sh + +# GPU=1 +# CPU=1 +# node=73 +# jobname=openood + +# PYTHONPATH='.':$PYTHONPATH \ +# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +# --kill-on-bad-exit=1 --job-name=${jobname} \ +# -w SG-IDC1-10-51-2-${node} \ +python main.py \ +--config configs/datasets/digits/mnist.yml \ +configs/networks/arpl_net.yml \ +configs/pipelines/train/train_arpl.yml \ +--optimizer.num_epochs 100 \ +--num_workers 8 diff --git a/OpenOOD/scripts/osr/arpl/2_arplgan_test.sh b/OpenOOD/scripts/osr/arpl/2_arplgan_test.sh new file mode 100644 index 0000000000000000000000000000000000000000..8e3d61a6395f0b232d7a8f0959da6da1b57ab40a --- /dev/null +++ b/OpenOOD/scripts/osr/arpl/2_arplgan_test.sh @@ -0,0 +1,21 @@ +#!/bin/bash +# sh scripts/c_ood/0_mnist_test_ood_msp.sh + +# GPU=1 +# CPU=1 +# node=73 +# jobname=openood + +# PYTHONPATH='.':$PYTHONPATH \ +# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +# --kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ +python main.py \ +--config configs/datasets/digits/mnist.yml \ +configs/datasets/digits/mnist_ood.yml \ +configs/networks/arpl_gan.yml \ +configs/pipelines/test/test_arplgan.yml \ +configs/postprocessors/msp.yml \ +--dataset.image_size 32 \ +--num_workers 8 \ +--mark 0 diff --git a/OpenOOD/scripts/osr/arpl/2_arplgan_train.sh b/OpenOOD/scripts/osr/arpl/2_arplgan_train.sh new file mode 100644 index 0000000000000000000000000000000000000000..c6c5e146a3244993bef0a5271e56a57ffba6f208 --- /dev/null +++ b/OpenOOD/scripts/osr/arpl/2_arplgan_train.sh @@ -0,0 +1,21 @@ +#!/bin/bash +# sh scripts/0_basics/cifar10_train.sh + +# GPU=1 +# CPU=1 +# node=73 +# jobname=openood + +# PYTHONPATH='.':$PYTHONPATH \ +# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +# --kill-on-bad-exit=1 --job-name=${jobname} \ +# -w SG-IDC1-10-51-2-${node} \ + +python main.py \ +--config configs/datasets/digits/mnist.yml \ +configs/networks/arpl_gan.yml \ +configs/pipelines/train/train_arpl_gan.yml \ +--dataset.image_size 32 \ +--optimizer.num_epochs 100 \ +--num_workers 8 diff --git a/OpenOOD/scripts/osr/arpl/cifar100_test_ood_arpl.sh b/OpenOOD/scripts/osr/arpl/cifar100_test_ood_arpl.sh new file mode 100644 index 0000000000000000000000000000000000000000..7385c16f5cc75b8b4c9db945acb0722176143938 --- /dev/null +++ b/OpenOOD/scripts/osr/arpl/cifar100_test_ood_arpl.sh @@ -0,0 +1,25 @@ +#!/bin/bash +# sh scripts/osr/arpl/cifar100_test_ood_arpl.sh + +# GPU=1 +# CPU=1 +# node=73 +# jobname=openood + +# PYTHONPATH='.':$PYTHONPATH \ +# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +# --kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ + +# this method needs to load multiple networks, please set the checkpoints in test_pipeling config file +# need to manually change the checkpoint path in configs/pipelines/test/test_arpl.yml +python main.py \ + --config configs/datasets/cifar100/cifar100.yml \ + configs/datasets/cifar100/cifar100_ood.yml \ + configs/networks/arpl_net.yml \ + configs/pipelines/test/test_arpl.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/postprocessors/msp.yml \ + --network.feat_extract_network.name resnet18_32x32 \ + --num_workers 8 \ + --seed 0 diff --git a/OpenOOD/scripts/osr/arpl/cifar100_train_arpl.sh b/OpenOOD/scripts/osr/arpl/cifar100_train_arpl.sh new file mode 100644 index 0000000000000000000000000000000000000000..9f831edd08fa4cb8053c2bb7b0380eec0cabad95 --- /dev/null +++ b/OpenOOD/scripts/osr/arpl/cifar100_train_arpl.sh @@ -0,0 +1,22 @@ +#!/bin/bash +# sh scripts/osr/arpl/cifar100_train_arpl.sh + +# GPU=1 +# CPU=1 +# node=73 +# jobname=openood + +# PYTHONPATH='.':$PYTHONPATH \ +# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +# --kill-on-bad-exit=1 --job-name=${jobname} \ +# -w SG-IDC1-10-51-2-${node} \ +python main.py \ + --config configs/datasets/cifar100/cifar100.yml \ + configs/networks/arpl_net.yml \ + configs/pipelines/train/train_arpl.yml \ + configs/preprocessors/base_preprocessor.yml \ + --network.feat_extract_network.name resnet18_32x32 \ + --num_workers 8 \ + --optimizer.num_epochs 100 \ + --seed 0 diff --git a/OpenOOD/scripts/osr/arpl/cifar10_test_ood_arpl.sh b/OpenOOD/scripts/osr/arpl/cifar10_test_ood_arpl.sh new file mode 100644 index 0000000000000000000000000000000000000000..b87c60babbc47a7a9c633eed1d93dd8a4424cb2b --- /dev/null +++ b/OpenOOD/scripts/osr/arpl/cifar10_test_ood_arpl.sh @@ -0,0 +1,25 @@ +#!/bin/bash +# sh scripts/osr/arpl/cifar10_test_ood_arpl.sh + +# GPU=1 +# CPU=1 +# node=73 +# jobname=openood + +# PYTHONPATH='.':$PYTHONPATH \ +# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +# --kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ + +# this method needs to load multiple networks, please set the checkpoints in test_pipeling config file +# need to manually change the checkpoint path in configs/pipelines/test/test_arpl.yml +python main.py \ + --config configs/datasets/cifar10/cifar10.yml \ + configs/datasets/cifar10/cifar10_ood.yml \ + configs/networks/arpl_net.yml \ + configs/pipelines/test/test_arpl.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/postprocessors/msp.yml \ + --network.feat_extract_network.name resnet18_32x32 \ + --num_workers 8 \ + --seed 0 diff --git a/OpenOOD/scripts/osr/arpl/cifar10_train_arpl.sh b/OpenOOD/scripts/osr/arpl/cifar10_train_arpl.sh new file mode 100644 index 0000000000000000000000000000000000000000..0dc10e006f7e30a2cc87b6510b1ed2fe6303834d --- /dev/null +++ b/OpenOOD/scripts/osr/arpl/cifar10_train_arpl.sh @@ -0,0 +1,22 @@ +#!/bin/bash +# sh scripts/osr/arpl/cifar10_train_arpl.sh + +# GPU=1 +# CPU=1 +# node=73 +# jobname=openood + +# PYTHONPATH='.':$PYTHONPATH \ +# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +# --kill-on-bad-exit=1 --job-name=${jobname} \ +# -w SG-IDC1-10-51-2-${node} \ +python main.py \ + --config configs/datasets/cifar10/cifar10.yml \ + configs/networks/arpl_net.yml \ + configs/pipelines/train/train_arpl.yml \ + configs/preprocessors/base_preprocessor.yml \ + --network.feat_extract_network.name resnet18_32x32 \ + --num_workers 8 \ + --optimizer.num_epochs 100 \ + --seed 0 diff --git a/OpenOOD/scripts/osr/arpl/imagenet200_test_ood_arpl.sh b/OpenOOD/scripts/osr/arpl/imagenet200_test_ood_arpl.sh new file mode 100644 index 0000000000000000000000000000000000000000..ed2e2ed4d5994d4ecd3773381dee230e7dc61005 --- /dev/null +++ b/OpenOOD/scripts/osr/arpl/imagenet200_test_ood_arpl.sh @@ -0,0 +1,17 @@ +#!/bin/bash +# sh scripts/osr/arpl/imagenet200_test_ood_arpl.sh + +# NOTE!!!! +# need to manually change the checkpoint path in configs/pipelines/test/test_arpl.yml +SCHEME="ood" # "ood" or "fsood" +python main.py \ + --config configs/datasets/imagenet200/imagenet200.yml \ + configs/datasets/imagenet200/imagenet200_${SCHEME}.yml \ + configs/networks/arpl_net.yml \ + configs/pipelines/test/test_arpl.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/postprocessors/msp.yml \ + --network.feat_extract_network.name resnet18_224x224 \ + --num_workers 8 \ + --evaluator.ood_scheme ${SCHEME} \ + --seed 0 diff --git a/OpenOOD/scripts/osr/arpl/imagenet200_train_arpl.sh b/OpenOOD/scripts/osr/arpl/imagenet200_train_arpl.sh new file mode 100644 index 0000000000000000000000000000000000000000..c3414a84c0af6e62fc11f38e6b40f0efc679f428 --- /dev/null +++ b/OpenOOD/scripts/osr/arpl/imagenet200_train_arpl.sh @@ -0,0 +1,14 @@ +#!/bin/bash +# sh scripts/osr/arpl/imagenet200_train_arpl.sh + +python main.py \ + --config configs/datasets/imagenet200/imagenet200.yml \ + configs/networks/arpl_net.yml \ + configs/pipelines/train/train_arpl.yml \ + configs/preprocessors/base_preprocessor.yml \ + --network.feat_extract_network.name resnet18_224x224 \ + --optimizer.num_epochs 90 \ + --dataset.train.batch_size 128 \ + --num_gpus 2 --num_workers 16 \ + --merge_option merge \ + --seed 0 diff --git a/OpenOOD/scripts/osr/arpl/imagenet_test_ood_arpl.sh b/OpenOOD/scripts/osr/arpl/imagenet_test_ood_arpl.sh new file mode 100644 index 0000000000000000000000000000000000000000..c9239c0c44353105f22e95036e33efa436af8a3d --- /dev/null +++ b/OpenOOD/scripts/osr/arpl/imagenet_test_ood_arpl.sh @@ -0,0 +1,17 @@ +#!/bin/bash +# sh scripts/osr/arpl/imagenet_test_ood_arpl.sh + +# NOTE!!!! +# need to manually change the checkpoint path in configs/pipelines/test/test_arpl.yml +SCHEME="fsood" # "ood" or "fsood" +python main.py \ + --config configs/datasets/imagenet/imagenet.yml \ + configs/datasets/imagenet/imagenet_${SCHEME}.yml \ + configs/networks/arpl_net.yml \ + configs/pipelines/test/test_arpl.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/postprocessors/msp.yml \ + --network.feat_extract_network.name resnet50 \ + --num_workers 8 \ + --evaluator.ood_scheme ${SCHEME} \ + --seed 0 diff --git a/OpenOOD/scripts/osr/arpl/imagenet_train_arpl.sh b/OpenOOD/scripts/osr/arpl/imagenet_train_arpl.sh new file mode 100644 index 0000000000000000000000000000000000000000..ddf49b44e6add68137086a820d20332eb1c7955a --- /dev/null +++ b/OpenOOD/scripts/osr/arpl/imagenet_train_arpl.sh @@ -0,0 +1,17 @@ +#!/bin/bash +# sh scripts/osr/arpl/imagenet200_train_arpl.sh + +python main.py \ + --config configs/datasets/imagenet/imagenet.yml \ + configs/networks/arpl_net.yml \ + configs/pipelines/train/train_arpl.yml \ + configs/preprocessors/base_preprocessor.yml \ + --network.feat_extract_network.name resnet50 \ + --network.feat_extract_network.pretrained True \ + --network.feat_extract_network.checkpoint ./results/pretrained_weights/resnet50_imagenet1k_v1.pth \ + --optimizer.lr 0.001 \ + --optimizer.num_epochs 30 \ + --dataset.train.batch_size 128 \ + --num_gpus 2 --num_workers 16 \ + --merge_option merge \ + --seed 0 diff --git a/OpenOOD/scripts/osr/opengan/cifar100_test_ood_opengan.sh b/OpenOOD/scripts/osr/opengan/cifar100_test_ood_opengan.sh new file mode 100644 index 0000000000000000000000000000000000000000..c73f1c2ed21a040d1d2e5a2fec2ba6666679df3e --- /dev/null +++ b/OpenOOD/scripts/osr/opengan/cifar100_test_ood_opengan.sh @@ -0,0 +1,18 @@ +#!/bin/bash +# sh scripts/osr/opengan/cifar100_test_ood_opengan.sh + +# NOTE!!!! +# need to manually change the network checkpoint path (not backbone) in configs/pipelines/test/test_opengan.yml +# corresponding to different runs +SEED=0 +python main.py \ + --config configs/datasets/cifar100/cifar100.yml \ + configs/datasets/cifar100/cifar100_ood.yml \ + configs/networks/opengan.yml \ + configs/pipelines/test/test_opengan.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/postprocessors/opengan.yml \ + --num_workers 8 \ + --network.backbone.pretrained True \ + --network.backbone.checkpoint ./results/cifar100_resnet18_32x32_base_e100_lr0.1_default/s${SEED}/best.ckpt \ + --seed ${SEED} diff --git a/OpenOOD/scripts/osr/opengan/cifar100_train_opengan.sh b/OpenOOD/scripts/osr/opengan/cifar100_train_opengan.sh new file mode 100644 index 0000000000000000000000000000000000000000..14ba0e893679a0a85e3b253b7b495581efb541a4 --- /dev/null +++ b/OpenOOD/scripts/osr/opengan/cifar100_train_opengan.sh @@ -0,0 +1,26 @@ +#!/bin/bash +# sh scripts/osr/opengan/cifar100_train_opengan.sh + +SEED=0 + +# feature extraction +python main.py \ + --config configs/datasets/cifar100/cifar100.yml \ + configs/datasets/cifar100/cifar100_ood.yml \ + configs/networks/resnet18_32x32.yml \ + configs/pipelines/train/train_opengan_feat_extract.yml \ + configs/preprocessors/base_preprocessor.yml \ + --network.checkpoint "./results/cifar100_resnet18_32x32_base_e100_lr0.1_default/s${SEED}/best.ckpt" \ + --seed ${SEED} + +# train +python main.py \ + --config configs/datasets/cifar100/cifar100.yml \ + configs/networks/opengan.yml \ + configs/pipelines/train/train_opengan.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/postprocessors/opengan.yml \ + --dataset.feat_root ./results/cifar100_resnet18_32x32_feat_extract_opengan_default/s${SEED} \ + --network.backbone.pretrained True \ + --network.backbone.checkpoint ./results/cifar100_resnet18_32x32_base_e100_lr0.1_default/s${SEED}/best.ckpt \ + --seed ${SEED} diff --git a/OpenOOD/scripts/osr/opengan/cifar10_test_ood_opengan.sh b/OpenOOD/scripts/osr/opengan/cifar10_test_ood_opengan.sh new file mode 100644 index 0000000000000000000000000000000000000000..209de3229fad6dccfc2d29234b598eadefb61026 --- /dev/null +++ b/OpenOOD/scripts/osr/opengan/cifar10_test_ood_opengan.sh @@ -0,0 +1,18 @@ +#!/bin/bash +# sh scripts/osr/opengan/cifar10_test_ood_opengan.sh + +# NOTE!!!! +# need to manually change the network checkpoint path (not backbone) in configs/pipelines/test/test_opengan.yml +# corresponding to different runs +SEED=0 +python main.py \ + --config configs/datasets/cifar10/cifar10.yml \ + configs/datasets/cifar10/cifar10_ood.yml \ + configs/networks/opengan.yml \ + configs/pipelines/test/test_opengan.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/postprocessors/opengan.yml \ + --num_workers 8 \ + --network.backbone.pretrained True \ + --network.backbone.checkpoint ./results/cifar10_resnet18_32x32_base_e100_lr0.1_default/s${SEED}/best.ckpt \ + --seed ${SEED} diff --git a/OpenOOD/scripts/osr/opengan/cifar10_train_opengan.sh b/OpenOOD/scripts/osr/opengan/cifar10_train_opengan.sh new file mode 100644 index 0000000000000000000000000000000000000000..75b1d6894c2ebed099f46938248d76c080be3a45 --- /dev/null +++ b/OpenOOD/scripts/osr/opengan/cifar10_train_opengan.sh @@ -0,0 +1,26 @@ +#!/bin/bash +# sh scripts/osr/opengan/cifar10_train_opengan.sh + +SEED=0 + +# feature extraction +python main.py \ + --config configs/datasets/cifar10/cifar10.yml \ + configs/datasets/cifar10/cifar10_ood.yml \ + configs/networks/resnet18_32x32.yml \ + configs/pipelines/train/train_opengan_feat_extract.yml \ + configs/preprocessors/base_preprocessor.yml \ + --network.checkpoint "./results/cifar10_resnet18_32x32_base_e100_lr0.1_default/s${SEED}/best.ckpt" \ + --seed ${SEED} + +# train +python main.py \ + --config configs/datasets/cifar10/cifar10.yml \ + configs/networks/opengan.yml \ + configs/pipelines/train/train_opengan.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/postprocessors/opengan.yml \ + --dataset.feat_root ./results/cifar10_resnet18_32x32_feat_extract_opengan_default/s${SEED} \ + --network.backbone.pretrained True \ + --network.backbone.checkpoint ./results/cifar10_resnet18_32x32_base_e100_lr0.1_default/s${SEED}/best.ckpt \ + --seed ${SEED} diff --git a/OpenOOD/scripts/osr/opengan/feature_extract.sh b/OpenOOD/scripts/osr/opengan/feature_extract.sh new file mode 100644 index 0000000000000000000000000000000000000000..f93aadce842e4f7aa367bf379014dd9cc8b73827 --- /dev/null +++ b/OpenOOD/scripts/osr/opengan/feature_extract.sh @@ -0,0 +1,22 @@ +#!/bin/bash +# sh scripts/osr/opengan/feature_extract.sh + +# GPU=1 +# CPU=1 +# node=36 +# jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +#srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +#--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +#--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ +python main.py \ +--config configs/datasets/cifar100/cifar100.yml \ +configs/networks/resnet18_32x32.yml \ +configs/pipelines/test/feat_extract.yml \ +configs/preprocessors/base_preprocessor.yml \ +--network.checkpoint "results/cifar100_resnet18_32x32_base_e100_lr0.1/best.ckpt" \ +--pipeline.extract_target train \ +--merge_option merge \ +--num_workers 8 \ +--mark 0 diff --git a/OpenOOD/scripts/osr/opengan/imagenet200_test_ood_opengan.sh b/OpenOOD/scripts/osr/opengan/imagenet200_test_ood_opengan.sh new file mode 100644 index 0000000000000000000000000000000000000000..7c353dda7cee01bca0b4a9b5fcb5bc4141314b8b --- /dev/null +++ b/OpenOOD/scripts/osr/opengan/imagenet200_test_ood_opengan.sh @@ -0,0 +1,21 @@ +#!/bin/bash +# sh scripts/osr/opengan/imagenet200_test_ood_opengan.sh + +# NOTE!!!! +# need to manually change the network checkpoint path (not backbone) in configs/pipelines/test/test_opengan.yml +# corresponding to different runs +SEED=0 +SCHEME="ood" # "ood" or "fsood" +python main.py \ + --config configs/datasets/imagenet200/imagenet200.yml \ + configs/datasets/imagenet200/imagenet200_${SCHEME}.yml \ + configs/networks/opengan.yml \ + configs/pipelines/test/test_opengan.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/postprocessors/opengan.yml \ + --num_workers 8 \ + --network.backbone.name resnet18_224x224 \ + --network.backbone.pretrained True \ + --network.backbone.checkpoint ./results/imagenet200_resnet18_224x224_base_e90_lr0.1_default/s${SEED}/best.ckpt \ + --evaluator.ood_scheme ${SCHEME} \ + --seed ${SEED} diff --git a/OpenOOD/scripts/osr/opengan/imagenet200_train_opengan.sh b/OpenOOD/scripts/osr/opengan/imagenet200_train_opengan.sh new file mode 100644 index 0000000000000000000000000000000000000000..1a7861846e16182d3b6e2fc23712c0f30b32da26 --- /dev/null +++ b/OpenOOD/scripts/osr/opengan/imagenet200_train_opengan.sh @@ -0,0 +1,27 @@ +#!/bin/bash +# sh scripts/osr/opengan/imagenet200_train_opengan.sh + +SEED=0 + +# feature extraction +python main.py \ + --config configs/datasets/imagenet200/imagenet200.yml \ + configs/datasets/imagenet200/imagenet200_ood.yml \ + configs/networks/resnet18_224x224.yml \ + configs/pipelines/train/train_opengan_feat_extract.yml \ + configs/preprocessors/base_preprocessor.yml \ + --network.checkpoint "./results/imagenet200_resnet18_224x224_base_e90_lr0.1_default/s${SEED}/best.ckpt" \ + --seed ${SEED} + +# train +python main.py \ + --config configs/datasets/imagenet200/imagenet200.yml \ + configs/networks/opengan.yml \ + configs/pipelines/train/train_opengan.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/postprocessors/opengan.yml \ + --dataset.feat_root ./results/imagenet200_resnet18_224x224_feat_extract_opengan_default/s${SEED} \ + --network.backbone.pretrained True \ + --network.backbone.checkpoint ./results/imagenet200_resnet18_224x224_base_e90_lr0.1_default/s${SEED}/best.ckpt \ + --optimizer.num_epochs 90 \ + --seed ${SEED} diff --git a/OpenOOD/scripts/osr/openmax/cifar100_test_ood_openmax.sh b/OpenOOD/scripts/osr/openmax/cifar100_test_ood_openmax.sh new file mode 100644 index 0000000000000000000000000000000000000000..afc4c881497c1a254697986b1ce26ed6939197c9 --- /dev/null +++ b/OpenOOD/scripts/osr/openmax/cifar100_test_ood_openmax.sh @@ -0,0 +1,32 @@ +#!/bin/bash +# sh scripts/osr/openmax/cifar100_test_ood_openmax.sh + +# GPU=1 +# CPU=1 +# node=30 +# jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ + +python main.py \ + --config configs/datasets/cifar100/cifar100.yml \ + configs/datasets/cifar100/cifar100_ood.yml \ + configs/networks/resnet18_32x32.yml \ + configs/pipelines/test/test_ood.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/postprocessors/openmax.yml \ + --num_workers 8 \ + --network.checkpoint 'results/cifar100_resnet18_32x32_base_e100_lr0.1_default/s0/best.ckpt' \ + --mark 0 + +############################################ +# alternatively, we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood.py +# especially if you want to get results from +# multiple runs +python scripts/eval_ood.py \ + --id-data cifar100 \ + --root ./results/cifar100_resnet18_32x32_base_e100_lr0.1_default \ + --postprocessor openmax \ + --save-score --save-csv diff --git a/OpenOOD/scripts/osr/openmax/cifar10_test_ood_openmax.sh b/OpenOOD/scripts/osr/openmax/cifar10_test_ood_openmax.sh new file mode 100644 index 0000000000000000000000000000000000000000..2a94ce0e426f37d4fb2f1ddce350b2fc6de144f2 --- /dev/null +++ b/OpenOOD/scripts/osr/openmax/cifar10_test_ood_openmax.sh @@ -0,0 +1,32 @@ +#!/bin/bash +# sh scripts/osr/openmax/cifar10_test_ood_openmax.sh + +# GPU=1 +# CPU=1 +# node=30 +# jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ + +python main.py \ + --config configs/datasets/cifar10/cifar10.yml \ + configs/datasets/cifar10/cifar10_ood.yml \ + configs/networks/resnet18_32x32.yml \ + configs/pipelines/test/test_ood.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/postprocessors/openmax.yml \ + --num_workers 8 \ + --network.checkpoint 'results/cifar10_resnet18_32x32_base_e100_lr0.1_default/s0/best.ckpt' \ + --mark 0 + +############################################ +# alternatively, we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood.py +# especially if you want to get results from +# multiple runs +python scripts/eval_ood.py \ + --id-data cifar10 \ + --root ./results/cifar10_resnet18_32x32_base_e100_lr0.1_default \ + --postprocessor openmax \ + --save-score --save-csv diff --git a/OpenOOD/scripts/osr/openmax/imagenet200_test_ood_openmax.sh b/OpenOOD/scripts/osr/openmax/imagenet200_test_ood_openmax.sh new file mode 100644 index 0000000000000000000000000000000000000000..72d4a9d1d80d00a318fc8765fb6eeeea2028a72f --- /dev/null +++ b/OpenOOD/scripts/osr/openmax/imagenet200_test_ood_openmax.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# sh scripts/ood/openmax/imagenet200_test_ood_openmax.sh + +############################################ +# alternatively, we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood.py +# especially if you want to get results from +# multiple runs + +# ood +python scripts/eval_ood.py \ + --id-data imagenet200 \ + --root ./results/imagenet200_resnet18_224x224_base_e90_lr0.1_default \ + --postprocessor openmax \ + --save-score --save-csv #--fsood + +# full-spectrum ood +python scripts/eval_ood.py \ + --id-data imagenet200 \ + --root ./results/imagenet200_resnet18_224x224_base_e90_lr0.1_default \ + --postprocessor openmax \ + --save-score --save-csv --fsood diff --git a/OpenOOD/scripts/osr/openmax/imagenet_test_ood_openmax.sh b/OpenOOD/scripts/osr/openmax/imagenet_test_ood_openmax.sh new file mode 100644 index 0000000000000000000000000000000000000000..6e56fb3267e110f7ab4b3b78c321f08b5389643c --- /dev/null +++ b/OpenOOD/scripts/osr/openmax/imagenet_test_ood_openmax.sh @@ -0,0 +1,47 @@ +#!/bin/bash +# sh scripts/osr/openmax/imagenet_test_ood_openmax.sh + +GPU=1 +CPU=1 +node=63 +jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ +python main.py \ + --config configs/datasets/imagenet/imagenet.yml \ + configs/datasets/imagenet/imagenet_ood.yml \ + configs/networks/resnet50.yml \ + configs/pipelines/test/test_ood.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/postprocessors/openmax.yml \ + --num_workers 10 \ + --ood_dataset.image_size 256 \ + --dataset.test.batch_size 256 \ + --dataset.val.batch_size 256 \ + --network.pretrained True \ + --network.checkpoint 'results/pretrained_weights/resnet50_imagenet1k_v1.pth' \ + --merge_option merge + +############################################ +# we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood_imagenet.py + +# available architectures: +# resnet50, swin-t, vit-b-16 +# ood +python scripts/eval_ood_imagenet.py \ + --tvs-pretrained \ + --arch resnet50 \ + --postprocessor openmax \ + --save-score --save-csv #--fsood + +# full-spectrum ood +python scripts/eval_ood_imagenet.py \ + --tvs-pretrained \ + --arch resnet50 \ + --postprocessor openmax \ + --save-score --save-csv --fsood diff --git a/OpenOOD/scripts/osr/openmax/mnist_test_ood_openmax.sh b/OpenOOD/scripts/osr/openmax/mnist_test_ood_openmax.sh new file mode 100644 index 0000000000000000000000000000000000000000..de58fa344031b42a4ead4c4091a66dfb80ccb6a0 --- /dev/null +++ b/OpenOOD/scripts/osr/openmax/mnist_test_ood_openmax.sh @@ -0,0 +1,20 @@ +#!/bin/bash +# sh scripts/osr/openmax/mnist_test_ood_openmax.sh + +# GPU=1 +# CPU=1 +# node=30 +# jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ + +python main.py \ +--config configs/datasets/mnist/mnist.yml \ +configs/datasets/mnist/mnist_ood.yml \ +configs/networks/lenet.yml \ +configs/pipelines/test/test_ood.yml \ +configs/preprocessors/base_preprocessor.yml \ +configs/postprocessors/openmax.yml \ +--num_workers 8 \ +--network.checkpoint 'results/checkpoints/mnist_lenet_acc99.30.ckpt' \ +--mark 0 diff --git a/OpenOOD/scripts/osr/openmax/mnist_test_osr_openmax.sh b/OpenOOD/scripts/osr/openmax/mnist_test_osr_openmax.sh new file mode 100644 index 0000000000000000000000000000000000000000..80a67a2ebec3464af98681d2a6e2a25d138276bf --- /dev/null +++ b/OpenOOD/scripts/osr/openmax/mnist_test_osr_openmax.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# sh scripts/osr/openmax/mnist_test_osr_openmax.sh + +# GPU=1 +# CPU=1 +# node=30 +# jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +# --kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ + +python main.py \ +--config configs/datasets/osr_mnist6/mnist6_seed1.yml \ +configs/datasets/osr_mnist6/mnist6_seed1_ood.yml \ +configs/networks/lenet.yml \ +configs/pipelines/test/test_osr.yml \ +configs/preprocessors/base_preprocessor.yml \ +configs/postprocessors/openmax.yml \ +--num_workers 8 \ +--network.checkpoint 'results/checkpoints/osr/mnist6_seed1.ckpt' \ +--mark 0 diff --git a/OpenOOD/scripts/osr/openmax/sweep_osr.py b/OpenOOD/scripts/osr/openmax/sweep_osr.py new file mode 100644 index 0000000000000000000000000000000000000000..3c5f9d598bfc444f7602c2358237c63133160c10 --- /dev/null +++ b/OpenOOD/scripts/osr/openmax/sweep_osr.py @@ -0,0 +1,39 @@ +# python scripts/osr/openmax/sweep_osr.py +import os + +config = [ + [ + 'osr_cifar6/cifar6_seed1.yml', 'osr_cifar6/cifar6_seed1_ood.yml', + 'resnet18_32x32', 'results/checkpoints/osr/cifar6_seed1.ckpt' + ], + [ + 'osr_cifar50/cifar50_seed1.yml', 'osr_cifar50/cifar50_seed1_ood.yml', + 'resnet18_32x32', 'results/checkpoints/osr/cifar50_seed1.ckpt' + ], + [ + 'osr_tin20/tin20_seed1.yml', 'osr_tin20/tin20_seed1_ood.yml', + 'resnet18_64x64', 'results/checkpoints/osr/tin20_seed1.ckpt' + ], + [ + 'osr_mnist6/mnist6_seed1.yml', 'osr_mnist6/mnist6_seed1_ood.yml', + 'lenet', 'results/checkpoints/osr/mnist6_seed1.ckpt' + ], +] + +for [dataset, ood_dataset, network, pth] in config: + command = (f"PYTHONPATH='.':$PYTHONPATH \ + srun -p dsta --mpi=pmi2 --gres=gpu:1 -n1 \ + --cpus-per-task=1 --ntasks-per-node=1 \ + --kill-on-bad-exit=1 --job-name=openood \ + python main.py \ + --config configs/datasets/{dataset} \ + configs/datasets/{ood_dataset} \ + configs/networks/{network}.yml \ + configs/pipelines/test/test_osr.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/postprocessors/openmax.yml \ + --network.pretrained True \ + --network.checkpoint {pth} \ + --num_workers 8 \ + --merge_option merge &") + os.system(command) diff --git a/OpenOOD/scripts/sweep/sweep_hyperparam.py b/OpenOOD/scripts/sweep/sweep_hyperparam.py new file mode 100644 index 0000000000000000000000000000000000000000..75326d37aa79009908494f253f869b7e61b31e82 --- /dev/null +++ b/OpenOOD/scripts/sweep/sweep_hyperparam.py @@ -0,0 +1,112 @@ +import argparse +import os + +# dictionary with keywords from benchmarks +network_dict = { + 'mnist': 'lenet', + 'mnist6': 'lenet', + 'cifar10': 'resnet18_32x32', + 'cifar6': 'resnet18_32x32', + 'cifar100': 'resnet18_32x32', + 'cifar50': 'resnet18_32x32', + 'imagenet': 'resnet50', + 'tin20': 'resnet18_64x64' +} + +checkpoint_dict = { + 'mnist': './results/checkpoints/mnist_lenet_acc98.50.ckpt', + 'cifar10': './results/checkpoints/cifar10_res18_acc95.24.ckpt', + 'cifar100': './results/checkpoints/cifar100_res18_acc77.10.ckpt', + 'imagenet': './results/checkpoints/imagenet_res50_acc76.17.pth', + 'mnist6': './results/checkpoints/osr/mnist6', + 'cifar6': './results/checkpoints/osr/cifar6', + 'cifar50': './results/checkpoints/osr/cifar50', + 'tin20': './results/checkpoints/osr/tin20', +} + +method_dict = { + 'msp': + None, + 'odin': [ + '--postprocessor.postprocessor_args.temperature 1', + '--postprocessor.postprocessor_args.temperature 100', + '--postprocessor.postprocessor_args.temperature 1000' + ], + 'mds': + None, + 'gram': + None, +} + + +def make_args_list(benchmarks, methods, metrics): + args_list = [] + for benchmark in benchmarks: + for method in methods: + for metric in metrics: + args_list.append([benchmark, method, metric]) + return args_list + + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='Run a sweep') + parser.add_argument('--benchmarks', + nargs='+', + default=['mnist', 'cifar10', 'cifar100', 'imagenet']) + parser.add_argument('--methods', nargs='+', default=['msp']) + parser.add_argument('--metrics', nargs='+', default=['acc']) + parser.add_argument('--output-dir', type=str, default='./results/') + parser.add_argument('--launcher', + default='local', + choices=['local', 'slurm']) + args = parser.parse_args() + + # different command with different job schedulers + if args.launcher == 'slurm': + command_prefix = ("PYTHONPATH='.':$PYTHONPATH \ + srun -p dsta --mpi=pmi2 --gres=gpu:1 -n1 \ + --cpus-per-task=1 --ntasks-per-node=1 \ + --kill-on-bad-exit=1 -w SG-IDC1-10-51-2-79 ") + else: + command_prefix = "PYTHONPATH='.':$PYTHONPATH " + + args_list = make_args_list(args.benchmarks, args.methods, args.metrics) + print(f'{len(args_list)} experiments have been setup...', flush=True) + for exp_id, [benchmark, method, metric] in enumerate(args_list): + print(f'Experiment #{exp_id} Starts...', flush=True) + print(f'Config: {benchmark}, {method}, {metric}', flush=True) + if metric in ['ood', 'fsood']: + command = (f'python main.py --config \ + configs/datasets/{benchmark}/{benchmark}.yml \ + configs/datasets/{benchmark}/{benchmark}_{metric}.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/networks/{network_dict[benchmark]}.yml \ + configs/pipelines/test/test_{metric}.yml \ + configs/postprocessors/{method}.yml \ + --network.checkpoint {checkpoint_dict[benchmark]} \ + --output_dir {args.output_dir}') + elif metric == 'osr': + for sid in range(1, 6): + print(f'5 OSR Exp, {sid} out of 5', flush=True) + command = (f'python main.py --config \ + configs/datasets/osr_{benchmark}/{benchmark}_seed{sid}.yml \ + configs/datasets/osr_{benchmark}/{benchmark}_seed{sid}_osr.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/networks/{network_dict[benchmark]}.yml \ + configs/pipelines/test/test_osr.yml \ + configs/postprocessors/{method}.yml \ + --network.checkpoint {checkpoint_dict[benchmark]}_seed{sid}.ckpt \ + --output_dir {args.output_dir}') + os.system(command_prefix + command) + elif metric in ['acc', 'ece']: + command = (f'python main.py --config \ + configs/datasets/{benchmark}/{benchmark}.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/networks/{network_dict[benchmark]}.yml \ + configs/pipelines/test/test_{metric}.yml \ + configs/postprocessors/{method}.yml \ + --network.checkpoint {checkpoint_dict[benchmark]} \ + --output_dir {args.output_dir}') + os.system(command_prefix + command) + else: + raise ValueError('Unexpected Metric...') diff --git a/OpenOOD/scripts/sweep/sweep_posthoc.py b/OpenOOD/scripts/sweep/sweep_posthoc.py new file mode 100644 index 0000000000000000000000000000000000000000..c7fb78edeb1315aca9cf313c339a1b386188c595 --- /dev/null +++ b/OpenOOD/scripts/sweep/sweep_posthoc.py @@ -0,0 +1,127 @@ +import argparse +import csv +import os + +import numpy as np +from write_metrics import make_args_list, write_metric, write_total + +# dictionary with keywords from benchmarks +network_dict = { + 'mnist': 'lenet', + 'mnist6': 'lenet', + 'cifar10': 'resnet18_32x32', + 'cifar6': 'resnet18_32x32', + 'cifar100': 'resnet18_32x32', + 'cifar50': 'resnet18_32x32', + 'imagenet': 'resnet50', + 'tin20': 'resnet18_64x64' +} + +checkpoint_dict = { + 'mnist': './results/checkpoints/mnist_lenet_acc98.50.ckpt', + 'cifar10': './results/checkpoints/cifar10_res18_acc95.24.ckpt', + 'cifar100': './results/checkpoints/cifar100_res18_acc77.10.ckpt', + 'imagenet': './results/checkpoints/imagenet_res50_acc76.17.pth', + 'mnist6': './results/checkpoints/osr/mnist6', + 'cifar6': './results/checkpoints/osr/cifar6', + 'cifar50': './results/checkpoints/osr/cifar50', + 'tin20': './results/checkpoints/osr/tin20', +} + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='Run a sweep') + parser.add_argument('--benchmarks', + nargs='+', + default=['mnist', 'cifar10', 'cifar100', 'imagenet']) + parser.add_argument('--methods', nargs='+', default=['msp']) + parser.add_argument('--metrics', nargs='+', default=['acc']) + parser.add_argument('--metric2save', nargs='+', default=['auroc']) + parser.add_argument('--update_form_only', action='store_true') + parser.add_argument('--output-dir', type=str, default='./results/') + parser.add_argument('--launcher', + default='local', + choices=['local', 'slurm']) + parser.add_argument('--merge-option', default='default') + args = parser.parse_args() + + if not os.path.exists(args.output_dir): + os.makedirs(args.output_dir) + + # different command with different job schedulers + if args.launcher == 'slurm': + command_prefix = ("PYTHONPATH='.':$PYTHONPATH \ + srun -p dsta --mpi=pmi2 --gres=gpu:1 -n1 \ + --cpus-per-task=1 --ntasks-per-node=1 \ + --kill-on-bad-exit=1 -w SG-IDC1-10-51-2-79 ") + else: + command_prefix = "PYTHONPATH='.':$PYTHONPATH " + + # TODO: dynamic benchmark dict + benchmark_dict = { + 'ood': ['cifar10', 'cifar100'], + 'osr': ['cifar6', 'cifar50', 'mnist6', 'tin20'], + 'acc': args.benchmarks + } + + args_list = make_args_list(args.benchmarks, args.methods, args.metrics, + benchmark_dict) + print(f'{len(args_list)} experiments have been setup...', flush=True) + + if not args.update_form_only: + for exp_id, [benchmark, method, metric] in enumerate(args_list): + print(f'Experiment #{exp_id+1} Starts...', flush=True) + print(f'Config: {benchmark}, {method}, {metric}', flush=True) + if metric in ['ood', 'fsood']: + command = (f'python main.py --config \ + configs/datasets/{benchmark}/{benchmark}.yml \ + configs/datasets/{benchmark}/{benchmark}_{metric}.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/networks/{network_dict[benchmark]}.yml \ + configs/pipelines/test/test_{metric}.yml \ + configs/postprocessors/{method}.yml \ + --network.checkpoint {checkpoint_dict[benchmark]} \ + --merge_option {args.merge_option} \ + --output_dir {args.output_dir}') + os.system(command_prefix + command) + elif metric == 'osr': + for sid in range(1, 6): + print(f'5 OSR Exp, {sid} out of 5', flush=True) + command = (f'python main.py --config \ + configs/datasets/osr_{benchmark}/{benchmark}_seed{sid}.yml \ + configs/datasets/osr_{benchmark}/{benchmark}_seed{sid}_osr.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/networks/{network_dict[benchmark]}.yml \ + configs/pipelines/test/test_osr.yml \ + configs/postprocessors/{method}.yml \ + --network.checkpoint {checkpoint_dict[benchmark]}_seed{sid}.ckpt \ + --output_dir {args.output_dir} \ + --merge_option {args.merge_option}') + os.system(command_prefix + command) + elif metric in ['acc', 'ece']: + command = (f'python main.py --config \ + configs/datasets/{benchmark}/{benchmark}.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/networks/{network_dict[benchmark]}.yml \ + configs/pipelines/test/test_{metric}.yml \ + configs/postprocessors/{method}.yml \ + --network.checkpoint {checkpoint_dict[benchmark]} \ + --output_dir {args.output_dir} \ + --merge_option {args.merge_option}') + os.system(command_prefix + command) + else: + raise ValueError('Unexpected Metric...') + + folder_list = os.listdir(args.output_dir) + # TODO: do not hard code -8 + save_line_dict = {'ood': -8, 'osr': -1, 'acc': -1} + # TODO: extend according to config + args.benchmarks.extend([ + 'tin', 'nearood', 'mnist', 'svhn', 'texture', 'place365', 'places365', + 'farood' + ]) + + # TODO: try to find farood and near ood in another way, user can se t what to save by changing ood's list + main_content_extract_dict = {'ood': ['nearood', 'farood'], 'osr': [-1]} + write_metric(args, folder_list, save_line_dict, benchmark_dict) + write_total(args, folder_list, save_line_dict, benchmark_dict, + main_content_extract_dict) diff --git a/OpenOOD/scripts/sweep/sweep_posthoc.sh b/OpenOOD/scripts/sweep/sweep_posthoc.sh new file mode 100644 index 0000000000000000000000000000000000000000..8bfff6fa1411ace70c08c811b3ad96ff56ff891a --- /dev/null +++ b/OpenOOD/scripts/sweep/sweep_posthoc.sh @@ -0,0 +1,9 @@ +# sh ./scripts/sweep/sweep_posthoc.sh +python ./scripts/sweep/sweep_posthoc.py \ +--benchmarks 'cifar10' \ +--methods 'msp' \ +--metrics 'ood' \ +--metric2save 'fpr95' 'auroc' 'aupr_in' \ +--output-dir './results/ood' \ +--launcher 'local' \ +--update_form_only diff --git a/OpenOOD/scripts/sweep/sweep_posthoc_ood.sh b/OpenOOD/scripts/sweep/sweep_posthoc_ood.sh new file mode 100644 index 0000000000000000000000000000000000000000..9179f47d0aef00d8d5655618f7e0517130457bd9 --- /dev/null +++ b/OpenOOD/scripts/sweep/sweep_posthoc_ood.sh @@ -0,0 +1,9 @@ +# sh ./scripts/sweep/sweep_posthoc-backup.sh +python ./scripts/sweep/sweep_posthoc.py \ +--benchmarks 'cifar10' 'cifar100' \ +--methods 'msp' 'odin' 'mds' 'gram' 'ebo' 'gradnorm' 'react' 'dice' 'vim' 'mls' 'klm' 'knn' \ +--metrics 'ood' \ +--metric2save 'fpr95' 'auroc' 'aupr_in' \ +--output-dir './results/ood' \ +--launcher 'local' \ +--update_form_only diff --git a/OpenOOD/scripts/sweep/sweep_posthoc_osr.sh b/OpenOOD/scripts/sweep/sweep_posthoc_osr.sh new file mode 100644 index 0000000000000000000000000000000000000000..9f92e73c7b0336d243a8aff507ba9c68708de399 --- /dev/null +++ b/OpenOOD/scripts/sweep/sweep_posthoc_osr.sh @@ -0,0 +1,9 @@ +# sh ./scripts/sweep/sweep_posthoc_osr.sh +python ./scripts/sweep/sweep_posthoc.py \ +--benchmarks 'cifar6' 'cifar50' 'mnist6' 'tin20' \ +--methods 'msp' \ +--metrics 'osr' \ +--metric2save 'fpr95' 'auroc' 'aupr_in' \ +--output-dir './results/osr' \ +--launcher 'local' \ +--update_form_only diff --git a/OpenOOD/scripts/sweep/sweep_posthoc_total.sh b/OpenOOD/scripts/sweep/sweep_posthoc_total.sh new file mode 100644 index 0000000000000000000000000000000000000000..ced4ee40552be758322b9dbe8399ef768d580be5 --- /dev/null +++ b/OpenOOD/scripts/sweep/sweep_posthoc_total.sh @@ -0,0 +1,10 @@ +# sh ./scripts/sweep/sweep_posthoc_total.sh +python ./scripts/sweep/sweep_posthoc.py \ +--benchmarks 'cifar6' 'cifar50' 'mnist6' 'tin20' 'cifar10' 'cifar100' \ +--methods 'msp' 'odin' 'mds' 'gram' 'ebo' 'gradnorm' 'react' 'dice' 'vim' 'mls' 'klm' 'knn' \ +--metrics 'osr' 'ood' \ +--metric2save 'fpr95' 'auroc' 'aupr_in' \ +--output-dir './results/total' \ +--launcher 'local' \ +--merge-option 'pass' \ +--update_form_only diff --git a/OpenOOD/scripts/sweep/sweep_train.py b/OpenOOD/scripts/sweep/sweep_train.py new file mode 100644 index 0000000000000000000000000000000000000000..038e0013fb728d53eaa1a0dc831b1b2c53acd8cb --- /dev/null +++ b/OpenOOD/scripts/sweep/sweep_train.py @@ -0,0 +1,55 @@ +import argparse +import os + +# dictionary with keywords from benchmarks +network_dict = { + 'mnist': 'lenet', + 'mnist6': 'lenet', + 'cifar10': 'resnet18_32x32', + 'cifar6': 'resnet18_32x32', + 'cifar100': 'resnet18_32x32', + 'cifar50': 'resnet18_32x32', + 'imagenet': 'resnet50', + 'tin20': 'resnet18_64x64' +} + + +def make_args_list(benchmarks, methods, metrics): + args_list = [] + for benchmark in benchmarks: + for method in methods: + for metric in metrics: + args_list.append([benchmark, method, metric]) + return args_list + + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='Run a sweep') + parser.add_argument('--benchmarks', + nargs='+', + default=['mnist', 'cifar10', 'cifar100', 'imagenet']) + parser.add_argument('--launcher', + default='local', + choices=['local', 'slurm']) + args = parser.parse_args() + + # different command with different job schedulers + if args.launcher == 'slurm': + command_prefix = ("PYTHONPATH='.':$PYTHONPATH \ + srun -p dsta --mpi=pmi2 --gres=gpu:1 -n1 \ + --cpus-per-task=1 --ntasks-per-node=1 \ + --kill-on-bad-exit=1 -w SG-IDC1-10-51-2-67 ") + else: + command_prefix = "PYTHONPATH='.':$PYTHONPATH " + + print(f'{len(args.benchmarks)} experiments have been setup...', flush=True) + for exp_id, benchmark in enumerate(args.benchmarks): + print(f'Experiment #{exp_id} Starts...', flush=True) + for sid in range(1, 6): + print(f'5 OSR Exp, {sid} out of 5', flush=True) + command = (f'python main.py --config \ + configs/datasets/osr_{benchmark}/{benchmark}_seed{sid}.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/networks/{network_dict[benchmark]}.yml \ + configs/pipelines/train/baseline.yml') + os.system(command_prefix + command + ' &') diff --git a/OpenOOD/scripts/sweep/sweep_train.sh b/OpenOOD/scripts/sweep/sweep_train.sh new file mode 100644 index 0000000000000000000000000000000000000000..e4ebc86ce3f52a63b7c63b2833564a03f63d0f67 --- /dev/null +++ b/OpenOOD/scripts/sweep/sweep_train.sh @@ -0,0 +1,4 @@ +# sh ./scripts/sweep/sweep_train.sh +python ./scripts/sweep/sweep_train.py \ +--benchmarks 'tin20' \ +--launcher 'slurm' diff --git a/OpenOOD/scripts/sweep/write_metrics.py b/OpenOOD/scripts/sweep/write_metrics.py new file mode 100644 index 0000000000000000000000000000000000000000..9682b8f4221beb8b2c77e0c36534605dad2daff7 --- /dev/null +++ b/OpenOOD/scripts/sweep/write_metrics.py @@ -0,0 +1,225 @@ +import csv +import os + +import numpy as np + + +def make_args_list(benchmarks, methods, metrics, benchmark_dict): + args_list = [] + for metric in metrics: + for benchmark in set(benchmarks) & set(benchmark_dict[metric]): + for method in methods: + args_list.append([benchmark, method, metric]) + return args_list + + +def write_metric(args, folder_list, save_line_dict, benchmark_dict): + + metric_list = [ + 'fpr95', 'auroc', 'aupr_in', 'aupr_out', 'ccr_4', 'ccr_3', 'ccr_2', + 'ccr_1', 'acc' + ] + save_list = [] + for metric in args.metric2save: + save_list.append(metric_list.index(metric) + 1) + + for metric in args.metrics: + if metric == 'ood': + for benchmark in set(args.benchmarks) & set( + benchmark_dict[metric]): + args_list = make_args_list([benchmark], args.methods, ['ood'], + benchmark_dict) + sub_form_content = [] + for key_param in args_list: + for folder in folder_list: + key_folder = folder.split('_') + if all(key in key_folder for key in key_param): + target_folder = folder + break + else: + print("No respective folder path, something's wrong.") + raise FileNotFoundError + # quit() + + with open( + os.path.join(args.output_dir, target_folder, + 'ood.csv'), 'r') as f: + lines = f.readlines()[save_line_dict[key_param[-1]]:] + sub_line_content = {} + sub_line_content['method/{}'.format( + args.metric2save)] = key_param[1] + for line in lines: + split = line.split(',') + content = '' + for metric in save_list: + content = content + '{:.2f}'.format( + float(split[metric])) + ' / ' + else: + content = content[:-3] + # use method name as key + sub_line_content[split[0]] = content + sub_form_content.append(sub_line_content) + csv_path = os.path.join(args.output_dir, + '{}_ood.csv'.format(key_param[0])) + with open(csv_path, 'w', newline='') as csvfile: + fieldnames = order_fieldnames( + list(sub_form_content[0].keys()), args) + writer = csv.DictWriter(csvfile, fieldnames=fieldnames) + writer.writeheader() + for sub_line_content in sub_form_content: + writer.writerow(sub_line_content) + + elif metric == 'osr': + sub_form_content = [] + for method in args.methods: + args_list = make_args_list(args.benchmarks, [method], ['osr'], + benchmark_dict) + sub_line_content = {} + + for key_param in args_list: + sub_line_content['method/{}'.format( + args.metric2save)] = key_param[1] + target_folder = [] + seeds = ['seed1', 'seed2', 'seed3', 'seed4', 'seed5'] + for seed in seeds: + key_param.append(seed) + for folder in folder_list: + key_folder = folder.split('_') + if all(key in key_folder for key in key_param): + target_folder.append(folder) + break + else: + print( + "No respective folder path, something's wrong." + ) + raise FileNotFoundError + # quit() + key_param.pop(-1) + + temp = np.ndarray(shape=(len(seeds), len(save_list))) + for i, folder in enumerate(target_folder): + with open( + os.path.join(args.output_dir, folder, + 'ood.csv'), 'r') as f: + lines = f.readlines( + )[save_line_dict[key_param[-1]]:] + for line in lines: + split = line.split(',') + for j, metric_index in enumerate(save_list): + temp[i][j] = split[metric_index] + content = '' + for item in np.mean(temp, axis=0): + content = content + '{:.2f}'.format(item) + ' / ' + else: + content = content[:-3] + + sub_line_content[key_param[0]] = content + sub_form_content.append(sub_line_content) + + csv_path = os.path.join(args.output_dir, 'total_osr.csv') + with open(csv_path, 'w', newline='') as csvfile: + fieldnames = order_fieldnames(list(sub_form_content[0].keys()), + args) + writer = csv.DictWriter(csvfile, fieldnames=fieldnames) + writer.writeheader() + for sub_line_content in sub_form_content: + writer.writerow(sub_line_content) + + +def write_total(args, folder_list, save_line_dict, benchmark_dict, + main_content_extract_dict): + main_form_content = [] + for method in args.methods: + main_line_content = {} + for metric in args.metrics: + args_list = make_args_list(args.benchmarks, [method], [metric], + benchmark_dict) + for key_param in args_list: + main_line_content['method --> auroc'] = key_param[1] + + if metric == 'ood': + for folder in folder_list: + key_folder = folder.split('_') + if all(key in key_folder for key in key_param): + target_folder = folder + break + else: + print("No respective folder path, something's wrong.") + # quit() + + with open( + os.path.join(args.output_dir, target_folder, + 'ood.csv'), 'r') as f: + lines = f.readlines()[save_line_dict[key_param[-1]]:] + + content = '' + for line in lines: + if line.split(',')[0] in main_content_extract_dict[ + key_param[-1]]: + + # take auroc only + content = content + '{:.2f}'.format( + float(line.split(',')[2])) + ' / ' + else: + content = content[:-3] + # use benchmark name as key + main_line_content[key_param[0]] = content + + if metric == 'osr': + target_folder = [] + seeds = ['seed1', 'seed2', 'seed3', 'seed4', 'seed5'] + for seed in seeds: + key_param.append(seed) + for folder in folder_list: + key_folder = folder.split('_') + if all(key in key_folder for key in key_param): + target_folder.append(folder) + break + else: + print( + "No respective folder path, something's wrong." + ) + # quit() + key_param.pop(-1) + + temp = np.ndarray(shape=(len(seeds), 1)) + for i, folder in enumerate(target_folder): + with open( + os.path.join(args.output_dir, folder, + 'ood.csv'), 'r') as f: + lines = f.readlines( + )[save_line_dict[key_param[-1]]:] + for line in lines: + split = line.split(',') + temp[i] = split[2] + content = '{:.2f}'.format(np.mean(temp, axis=0).item()) + main_line_content[key_param[0]] = content + + main_form_content.append(main_line_content) + + csv_path = os.path.join(args.output_dir, 'total_result.csv') + with open(csv_path, 'w', newline='') as csvfile: + fieldnames = order_fieldnames(list(main_form_content[0].keys()), args) + writer = csv.DictWriter(csvfile, fieldnames=fieldnames) + writer.writeheader() + for main_line_content in main_form_content: + writer.writerow(main_line_content) + + +verify_dir = './results/total' +for folder in os.listdir(verify_dir): + if os.path.isdir(os.path.join(verify_dir, folder)): + if 'ood.csv' not in os.listdir(os.path.join(verify_dir, folder)): + # if 'seed1' in folder.split('_'): + print(folder) + + +def order_fieldnames(keys, args): + + ordered_keys = [] + ordered_keys.append(keys[0]) + for item in args.benchmarks: + if item in keys: + ordered_keys.append(item) + + return ordered_keys diff --git a/OpenOOD/scripts/uncertainty/augmix/cifar100_test_ood_msp.sh b/OpenOOD/scripts/uncertainty/augmix/cifar100_test_ood_msp.sh new file mode 100644 index 0000000000000000000000000000000000000000..c40d6cc89fbc6ad89e888875b526a800195227d0 --- /dev/null +++ b/OpenOOD/scripts/uncertainty/augmix/cifar100_test_ood_msp.sh @@ -0,0 +1,14 @@ +#!/bin/bash +# sh scripts/uncertainty/augmix/cifar100_test_ood_msp.sh + +############################################ +# alternatively, we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood.py +# especially if you want to get results from +# multiple runs +python scripts/eval_ood.py \ + --id-data cifar100 \ + --root ./results/cifar100_resnet18_32x32_augmix_e100_lr0.1_no-jsd \ + --postprocessor msp \ + --save-score --save-csv diff --git a/OpenOOD/scripts/uncertainty/augmix/cifar100_train_augmix.sh b/OpenOOD/scripts/uncertainty/augmix/cifar100_train_augmix.sh new file mode 100644 index 0000000000000000000000000000000000000000..ca3c4f7994ba4382c3835723940feb2ea4483c78 --- /dev/null +++ b/OpenOOD/scripts/uncertainty/augmix/cifar100_train_augmix.sh @@ -0,0 +1,17 @@ +#!/bin/bash +# sh scripts/uncertainty/augmix/cifar10_train_augmix.sh + +# somehow the loss will diverge to NaN if using JSD +# so just use no-jsd here +python main.py \ + --config configs/datasets/cifar100/cifar100.yml \ + configs/networks/resnet18_32x32.yml \ + configs/pipelines/train/train_augmix.yml \ + configs/preprocessors/augmix_preprocessor.yml \ + --preprocessor.severity 3 \ + --trainer.trainer_args.jsd False \ + --dataset.train.dataset_class ImglistDataset \ + --optimizer.num_epochs 100 \ + --dataset.train.batch_size 128 \ + --seed 0 \ + --mark no-jsd diff --git a/OpenOOD/scripts/uncertainty/augmix/cifar10_test_ood_msp.sh b/OpenOOD/scripts/uncertainty/augmix/cifar10_test_ood_msp.sh new file mode 100644 index 0000000000000000000000000000000000000000..03ae255a986eaf710722e81f1e3d8a9e0a4f41ff --- /dev/null +++ b/OpenOOD/scripts/uncertainty/augmix/cifar10_test_ood_msp.sh @@ -0,0 +1,14 @@ +#!/bin/bash +# sh scripts/uncertainty/augmix/cifar10_test_ood_msp.sh + +############################################ +# alternatively, we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood.py +# especially if you want to get results from +# multiple runs +python scripts/eval_ood.py \ + --id-data cifar10 \ + --root ./results/cifar10_resnet18_32x32_augmix_e100_lr0.1_no-jsd \ + --postprocessor msp \ + --save-score --save-csv diff --git a/OpenOOD/scripts/uncertainty/augmix/cifar10_train_augmix.sh b/OpenOOD/scripts/uncertainty/augmix/cifar10_train_augmix.sh new file mode 100644 index 0000000000000000000000000000000000000000..df705c9507d967ffde0d3c5c73f362c861f4ba99 --- /dev/null +++ b/OpenOOD/scripts/uncertainty/augmix/cifar10_train_augmix.sh @@ -0,0 +1,17 @@ +#!/bin/bash +# sh scripts/uncertainty/augmix/cifar10_train_augmix.sh + +# somehow the loss will diverge to NaN if using JSD +# so just use no-jsd here +python main.py \ + --config configs/datasets/cifar10/cifar10.yml \ + configs/networks/resnet18_32x32.yml \ + configs/pipelines/train/train_augmix.yml \ + configs/preprocessors/augmix_preprocessor.yml \ + --preprocessor.severity 3 \ + --trainer.trainer_args.jsd False \ + --dataset.train.dataset_class ImglistDataset \ + --optimizer.num_epochs 100 \ + --dataset.train.batch_size 128 \ + --seed 0 \ + --mark no-jsd diff --git a/OpenOOD/scripts/uncertainty/augmix/imagenet200_test_ood_msp.sh b/OpenOOD/scripts/uncertainty/augmix/imagenet200_test_ood_msp.sh new file mode 100644 index 0000000000000000000000000000000000000000..7c43a403b06a127e4de615c2fd3c0f5205c0ffed --- /dev/null +++ b/OpenOOD/scripts/uncertainty/augmix/imagenet200_test_ood_msp.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# sh scripts/uncertainty/augmix/imagenet200_test_ood_msp.sh + +############################################ +# alternatively, we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood.py +# especially if you want to get results from +# multiple runs + +# ood +python scripts/eval_ood.py \ + --id-data imagenet200 \ + --root ./results/imagenet200_resnet18_224x224_augmix_e90_lr0.1_default \ + --postprocessor msp \ + --save-score --save-csv #--fsood + +# full-spectrum ood +python scripts/eval_ood.py \ + --id-data imagenet200 \ + --root ./results/imagenet200_resnet18_224x224_augmix_e90_lr0.1_default \ + --postprocessor msp \ + --save-score --save-csv --fsood diff --git a/OpenOOD/scripts/uncertainty/augmix/imagenet200_train_augmix.sh b/OpenOOD/scripts/uncertainty/augmix/imagenet200_train_augmix.sh new file mode 100644 index 0000000000000000000000000000000000000000..ab4dfb9bbd7be22c826b96b17fb3b933c2fef81b --- /dev/null +++ b/OpenOOD/scripts/uncertainty/augmix/imagenet200_train_augmix.sh @@ -0,0 +1,14 @@ +#!/bin/bash +# sh scripts/uncertainty/augmix/imagenet200_train_augmix.sh + +python main.py \ + --config configs/datasets/imagenet200/imagenet200.yml \ + configs/networks/resnet18_224x224.yml \ + configs/pipelines/train/train_augmix.yml \ + configs/preprocessors/augmix_preprocessor.yml \ + --dataset.train.dataset_class ImglistAugMixDataset \ + --optimizer.num_epochs 90 \ + --dataset.train.batch_size 128 \ + --num_gpus 2 --num_workers 16 \ + --merge_option merge \ + --seed 0 diff --git a/OpenOOD/scripts/uncertainty/augmix/imagenet_test_ood_msp.sh b/OpenOOD/scripts/uncertainty/augmix/imagenet_test_ood_msp.sh new file mode 100644 index 0000000000000000000000000000000000000000..7cfc667b7c73d39a811152ec571f980f5fc30a08 --- /dev/null +++ b/OpenOOD/scripts/uncertainty/augmix/imagenet_test_ood_msp.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# sh scripts/uncertainty/augmix/imagenet_test_ood_msp.sh + +############################################ +# we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood_imagenet.py + +# available architectures: +# resnet50 +# ood +python scripts/eval_ood_imagenet.py \ + --ckpt-path ./results/imagenet_resnet50_tvsv1_augmix_default/ckpt.pth \ + --arch resnet50 \ + --postprocessor msp \ + --save-score --save-csv #--fsood + +# full-spectrum ood +python scripts/eval_ood_imagenet.py \ + --ckpt-path ./results/imagenet_resnet50_tvsv1_augmix_default/ckpt.pth \ + --arch resnet50 \ + --postprocessor msp \ + --save-score --save-csv --fsood diff --git a/OpenOOD/scripts/uncertainty/cutmix/cifar100_test_ood_msp.sh b/OpenOOD/scripts/uncertainty/cutmix/cifar100_test_ood_msp.sh new file mode 100644 index 0000000000000000000000000000000000000000..55e0b5bb1ac25be27e653c7d18bded3ffbb88fa5 --- /dev/null +++ b/OpenOOD/scripts/uncertainty/cutmix/cifar100_test_ood_msp.sh @@ -0,0 +1,22 @@ +#!/bin/bash +# sh scripts/uncertainty/cutmix/cifar100_test_ood_msp.sh + +# GPU=1 +# CPU=1 +# node=36 +# jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +#srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +#--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +#--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ +python main.py \ +--config configs/datasets/cifar100/cifar100.yml \ +configs/datasets/cifar100/cifar100_ood.yml \ +configs/networks/resnet18_32x32.yml \ +configs/pipelines/test/test_ood.yml \ +configs/preprocessors/base_preprocessor.yml \ +configs/postprocessors/msp.yml \ +--num_workers 8 \ +--network.checkpoint 'results/cifar100_resnet18_32x32_cutmix_e100_lr0.1_cutmix/best.ckpt' \ +--mark cutmix diff --git a/OpenOOD/scripts/uncertainty/cutmix/cifar100_train_cutmix.sh b/OpenOOD/scripts/uncertainty/cutmix/cifar100_train_cutmix.sh new file mode 100644 index 0000000000000000000000000000000000000000..e7be866c30e0b01c4c43fac5f8fa1fa1f76be86b --- /dev/null +++ b/OpenOOD/scripts/uncertainty/cutmix/cifar100_train_cutmix.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# sh scripts/uncertainty/cutmix/cifar100_train_cutmix.sh + +# GPU=1 +# CPU=1 +# node=73 +# jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +# --kill-on-bad-exit=1 --job-name=${jobname} \ +# -w SG-IDC1-10-51-2-${node} \ + +python main.py \ +--config configs/datasets/cifar100/cifar100.yml \ +configs/networks/resnet18_32x32.yml \ +configs/pipelines/train/train_cutmix.yml \ +configs/preprocessors/base_preprocessor.yml \ +--num_workers 8 \ +--optimizer.num_epochs 100 \ +--trainer.trainer_args.cutmix_prob 0.5 \ +--mark cutmix diff --git a/OpenOOD/scripts/uncertainty/cutmix/cifar10_test_ood_msp.sh b/OpenOOD/scripts/uncertainty/cutmix/cifar10_test_ood_msp.sh new file mode 100644 index 0000000000000000000000000000000000000000..c5f7f8407498416835479d767e8174d606a7c735 --- /dev/null +++ b/OpenOOD/scripts/uncertainty/cutmix/cifar10_test_ood_msp.sh @@ -0,0 +1,22 @@ +#!/bin/bash +# sh scripts/uncertainty/cutmix/cifar10_test_ood_msp.sh + +# GPU=1 +# CPU=1 +# node=36 +# jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +#srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +#--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +#--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ +python main.py \ +--config configs/datasets/cifar10/cifar10.yml \ +configs/datasets/cifar10/cifar10_ood.yml \ +configs/networks/resnet18_32x32.yml \ +configs/pipelines/test/test_ood.yml \ +configs/preprocessors/base_preprocessor.yml \ +configs/postprocessors/msp.yml \ +--num_workers 8 \ +--network.checkpoint 'results/cifar10_resnet18_32x32_cutmix_e100_lr0.1_cutmix/best.ckpt' \ +--mark cutmix diff --git a/OpenOOD/scripts/uncertainty/cutmix/cifar10_train_cutmix.sh b/OpenOOD/scripts/uncertainty/cutmix/cifar10_train_cutmix.sh new file mode 100644 index 0000000000000000000000000000000000000000..2db2147cbf5f67048e9fa95de28c3a10a403d5ef --- /dev/null +++ b/OpenOOD/scripts/uncertainty/cutmix/cifar10_train_cutmix.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# sh scripts/uncertainty/cutmix/cifar10_train_cutmix.sh + +# GPU=1 +# CPU=1 +# node=73 +# jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +# --kill-on-bad-exit=1 --job-name=${jobname} \ +# -w SG-IDC1-10-51-2-${node} \ + +python main.py \ +--config configs/datasets/cifar10/cifar10.yml \ +configs/networks/resnet18_32x32.yml \ +configs/pipelines/train/train_cutmix.yml \ +configs/preprocessors/base_preprocessor.yml \ +--num_workers 8 \ +--optimizer.num_epochs 100 \ +--trainer.trainer_args.cutmix_prob 0.5 \ +--mark cutmix diff --git a/OpenOOD/scripts/uncertainty/cutmix/mnist_test_ood_msp.sh b/OpenOOD/scripts/uncertainty/cutmix/mnist_test_ood_msp.sh new file mode 100644 index 0000000000000000000000000000000000000000..7c6d708de1a88a503036cf1e75356f566843f0c1 --- /dev/null +++ b/OpenOOD/scripts/uncertainty/cutmix/mnist_test_ood_msp.sh @@ -0,0 +1,22 @@ +#!/bin/bash +# sh scripts/uncertainty/cutmix/mnist_test_ood_msp.sh + +# GPU=1 +# CPU=1 +# node=36 +# jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +#srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +#--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +#--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ +python main.py \ +--config configs/datasets/mnist/mnist.yml \ +configs/datasets/mnist/mnist_ood.yml \ +configs/networks/lenet.yml \ +configs/pipelines/test/test_ood.yml \ +configs/preprocessors/base_preprocessor.yml \ +configs/postprocessors/msp.yml \ +--num_workers 8 \ +--network.checkpoint 'results/mnist_lenet_cutmix_e100_lr0.1_cutmix/best.ckpt' \ +--mark cutmix diff --git a/OpenOOD/scripts/uncertainty/cutmix/mnist_train_cutmix.sh b/OpenOOD/scripts/uncertainty/cutmix/mnist_train_cutmix.sh new file mode 100644 index 0000000000000000000000000000000000000000..5ef9bfdfaa4e152831cd22e8ffc27fd05526807a --- /dev/null +++ b/OpenOOD/scripts/uncertainty/cutmix/mnist_train_cutmix.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# sh scripts/uncertainty/cutmix/mnist_train_cutmix.sh + +# GPU=1 +# CPU=1 +# node=73 +# jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +# --kill-on-bad-exit=1 --job-name=${jobname} \ +# -w SG-IDC1-10-51-2-${node} \ + +python main.py \ +--config configs/datasets/mnist/mnist.yml \ +configs/networks/lenet.yml \ +configs/pipelines/train/train_cutmix.yml \ +configs/preprocessors/base_preprocessor.yml \ +--num_workers 8 \ +--optimizer.num_epochs 100 \ +--trainer.trainer_args.cutmix_prob 0.5 \ +--mark cutmix diff --git a/OpenOOD/scripts/uncertainty/cutmix/osr_mnist6_test_ood_msp.sh b/OpenOOD/scripts/uncertainty/cutmix/osr_mnist6_test_ood_msp.sh new file mode 100644 index 0000000000000000000000000000000000000000..8c33b6961615a774150b4283101381683b0addf9 --- /dev/null +++ b/OpenOOD/scripts/uncertainty/cutmix/osr_mnist6_test_ood_msp.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# sh scripts/uncertainty/cutmix/osr_mnist6_test_ood_msp.sh + +GPU=1 +CPU=1 +node=73 +jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +# --kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ + +python main.py \ +--config configs/datasets/osr_mnist6/mnist6_seed1.yml \ +configs/datasets/osr_mnist6/mnist6_seed1_ood.yml \ +configs/networks/lenet.yml \ +configs/pipelines/test/test_osr.yml \ +configs/preprocessors/base_preprocessor.yml \ +configs/postprocessors/msp.yml \ +--num_workers 8 \ +--network.checkpoint 'results/osr_mnist6_seed1_lenet_cutmix_e100_lr0.1_cutmix/best.ckpt' \ +--mark cutmix diff --git a/OpenOOD/scripts/uncertainty/cutmix/osr_mnist6_train_cutmix.sh b/OpenOOD/scripts/uncertainty/cutmix/osr_mnist6_train_cutmix.sh new file mode 100644 index 0000000000000000000000000000000000000000..71adacb4d23e505291a8ec2bd28a96ee446aa670 --- /dev/null +++ b/OpenOOD/scripts/uncertainty/cutmix/osr_mnist6_train_cutmix.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# sh scripts/uncertainty/cutmix/osr_mnist6_train_cutmix.sh + +# GPU=1 +# CPU=1 +# node=73 +# jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +# --kill-on-bad-exit=1 --job-name=${jobname} \ +# -w SG-IDC1-10-51-2-${node} \ + +python main.py \ +--config configs/datasets/osr_mnist6/mnist6_seed1.yml \ +configs/networks/lenet.yml \ +configs/pipelines/train/train_cutmix.yml \ +configs/preprocessors/base_preprocessor.yml \ +--num_workers 8 \ +--optimizer.num_epochs 100 \ +--trainer.trainer_args.cutmix_prob 0.5 \ +--mark cutmix diff --git a/OpenOOD/scripts/uncertainty/cutmix/sweep.py b/OpenOOD/scripts/uncertainty/cutmix/sweep.py new file mode 100644 index 0000000000000000000000000000000000000000..a46077a03214dd570973b926216c44a4c7b4c441 --- /dev/null +++ b/OpenOOD/scripts/uncertainty/cutmix/sweep.py @@ -0,0 +1,26 @@ +# python scripts/uncertainty/cutmix/sweep.py +import os + +config = [ + ['osr_cifar6/cifar6_seed1.yml', 'resnet18_32x32'], + ['osr_cifar50/cifar50_seed1.yml', 'resnet18_32x32'], + ['osr_tin20/tin20_seed1.yml', 'resnet18_64x64'], + ['osr_mnist4/mnist4_seed1.yml', 'lenet'], + ['mnist/mnist.yml', 'lenet'], +] + +for [dataset, network] in config: + command = (f"PYTHONPATH='.':$PYTHONPATH \ + srun -p dsta --mpi=pmi2 --gres=gpu:1 -n1 \ + --cpus-per-task=1 --ntasks-per-node=1 \ + --kill-on-bad-exit=1 --job-name=openood \ + python main.py \ + --config configs/datasets/{dataset} \ + configs/networks/{network}.yml \ + configs/pipelines/train/train_cutmix.yml \ + configs/preprocessors/base_preprocessor.yml \ + --network.pretrained False \ + --trainer.trainer_args.cutmix_prob 0.5 \ + --optimizer.num_epochs 100 \ + --num_workers 8 &") + os.system(command) diff --git a/OpenOOD/scripts/uncertainty/cutout/cifar100_test_ood_msp.sh b/OpenOOD/scripts/uncertainty/cutout/cifar100_test_ood_msp.sh new file mode 100644 index 0000000000000000000000000000000000000000..724b370e57ded80236b3e733f3ec33d550cd0bfe --- /dev/null +++ b/OpenOOD/scripts/uncertainty/cutout/cifar100_test_ood_msp.sh @@ -0,0 +1,14 @@ +#!/bin/bash +# sh scripts/uncertainty/cutout/cifar100_test_ood_msp.sh + +############################################ +# alternatively, we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood.py +# especially if you want to get results from +# multiple runs +python scripts/eval_ood.py \ + --id-data cifar100 \ + --root ./results/cifar100_resnet18_32x32_base_e100_lr0.1_cutout-1-8 \ + --postprocessor msp \ + --save-score --save-csv diff --git a/OpenOOD/scripts/uncertainty/cutout/cifar100_train_cutout.sh b/OpenOOD/scripts/uncertainty/cutout/cifar100_train_cutout.sh new file mode 100644 index 0000000000000000000000000000000000000000..3872335e5c2f16c6057b1950ba5745022a0ab1b2 --- /dev/null +++ b/OpenOOD/scripts/uncertainty/cutout/cifar100_train_cutout.sh @@ -0,0 +1,11 @@ +#!/bin/bash +# sh scripts/uncertainty/cutout/cifar100_train_cutout.sh + +python main.py \ + --config configs/datasets/cifar100/cifar100.yml \ + configs/networks/resnet18_32x32.yml \ + configs/pipelines/train/baseline.yml \ + configs/preprocessors/cutout_preprocessor.yml \ + --preprocessor.length 8 \ + --seed 0 \ + --mark cutout-1-8 diff --git a/OpenOOD/scripts/uncertainty/cutout/cifar10_test_ood_msp.sh b/OpenOOD/scripts/uncertainty/cutout/cifar10_test_ood_msp.sh new file mode 100644 index 0000000000000000000000000000000000000000..3e68ef8c82266752d50caac28ba04e1ecb2fac6f --- /dev/null +++ b/OpenOOD/scripts/uncertainty/cutout/cifar10_test_ood_msp.sh @@ -0,0 +1,14 @@ +#!/bin/bash +# sh scripts/uncertainty/cutout/cifar10_test_ood_msp.sh + +############################################ +# alternatively, we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood.py +# especially if you want to get results from +# multiple runs +python scripts/eval_ood.py \ + --id-data cifar10 \ + --root ./results/cifar10_resnet18_32x32_base_e100_lr0.1_cutout-1-16 \ + --postprocessor msp \ + --save-score --save-csv diff --git a/OpenOOD/scripts/uncertainty/cutout/cifar10_train_cutout.sh b/OpenOOD/scripts/uncertainty/cutout/cifar10_train_cutout.sh new file mode 100644 index 0000000000000000000000000000000000000000..6738a35c7511353a1bf729ae5096c0b69daaf91b --- /dev/null +++ b/OpenOOD/scripts/uncertainty/cutout/cifar10_train_cutout.sh @@ -0,0 +1,10 @@ +#!/bin/bash +# sh scripts/uncertainty/cutout/cifar10_train_cutout.sh + +python main.py \ + --config configs/datasets/cifar10/cifar10.yml \ + configs/networks/resnet18_32x32.yml \ + configs/pipelines/train/baseline.yml \ + configs/preprocessors/cutout_preprocessor.yml \ + --seed 0 \ + --mark cutout-1-16 diff --git a/OpenOOD/scripts/uncertainty/deepaugment/imagenet200_test_ood_msp.sh b/OpenOOD/scripts/uncertainty/deepaugment/imagenet200_test_ood_msp.sh new file mode 100644 index 0000000000000000000000000000000000000000..9f20d9c616b10fb656c06cc5d583d91a2cd2fb9b --- /dev/null +++ b/OpenOOD/scripts/uncertainty/deepaugment/imagenet200_test_ood_msp.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# sh scripts/uncertainty/deepaugment/imagenet200_test_ood_msp.sh + +############################################ +# alternatively, we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood.py +# especially if you want to get results from +# multiple runs + +# ood +python scripts/eval_ood.py \ + --id-data imagenet200 \ + --root ./results/imagenet200_resnet18_224x224_base_e30_lr0.1_deepaugment \ + --postprocessor msp \ + --save-score --save-csv #--fsood + +# full-spectrum ood +python scripts/eval_ood.py \ + --id-data imagenet200 \ + --root ./results/imagenet200_resnet18_224x224_base_e30_lr0.1_deepaugment \ + --postprocessor msp \ + --save-score --save-csv --fsood diff --git a/OpenOOD/scripts/uncertainty/deepaugment/imagenet200_train_deepaugment.sh b/OpenOOD/scripts/uncertainty/deepaugment/imagenet200_train_deepaugment.sh new file mode 100644 index 0000000000000000000000000000000000000000..932d40d5a47d94d06fda23a71521295ffd58efa9 --- /dev/null +++ b/OpenOOD/scripts/uncertainty/deepaugment/imagenet200_train_deepaugment.sh @@ -0,0 +1,17 @@ +#!/bin/bash +# sh scripts/uncertainty/deepaugment/imagenet200_train_deepaugment.sh + +# the model sees three times the data as the baseline +# so only trains for 90/3=30 epochs +python main.py \ + --config configs/datasets/imagenet200/imagenet200.yml \ + configs/networks/resnet18_224x224.yml \ + configs/pipelines/train/baseline.yml \ + configs/preprocessors/base_preprocessor.yml \ + --dataset.train.imglist_pth ./data/benchmark_imglist/imagenet200/train_imagenet200_deepaugment.txt \ + --optimizer.num_epochs 30 \ + --dataset.train.batch_size 128 \ + --num_gpus 2 --num_workers 16 \ + --merge_option merge \ + --seed 0 \ + --mark deepaugment diff --git a/OpenOOD/scripts/uncertainty/deepaugment/imagenet_test_ood_msp.sh b/OpenOOD/scripts/uncertainty/deepaugment/imagenet_test_ood_msp.sh new file mode 100644 index 0000000000000000000000000000000000000000..98a6c3a690890b3346020eb31e84f0f7296c5c21 --- /dev/null +++ b/OpenOOD/scripts/uncertainty/deepaugment/imagenet_test_ood_msp.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# sh scripts/uncertainty/deepaugment/imagenet_test_ood_msp.sh + +############################################ +# we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood_imagenet.py + +# available architectures: +# resnet50 +# ood +python scripts/eval_ood_imagenet.py \ + --ckpt-path ./results/imagenet_resnet50_tvsv1_base_deepaugment/ckpt.pth \ + --arch resnet50 \ + --postprocessor msp \ + --save-score --save-csv #--fsood + +# full-spectrum ood +python scripts/eval_ood_imagenet.py \ + --ckpt-path ./results/imagenet_resnet50_tvsv1_base_deepaugment/ckpt.pth \ + --arch resnet50 \ + --postprocessor msp \ + --save-score --save-csv --fsood diff --git a/OpenOOD/scripts/uncertainty/ensemble/2_mnist_ensemble_train.sh b/OpenOOD/scripts/uncertainty/ensemble/2_mnist_ensemble_train.sh new file mode 100644 index 0000000000000000000000000000000000000000..799a8dd631e0fd6570e567c010b3539f87856027 --- /dev/null +++ b/OpenOOD/scripts/uncertainty/ensemble/2_mnist_ensemble_train.sh @@ -0,0 +1,24 @@ +#!/bin/bash +# sh scripts/d_uncertainty/2_mnist_ensemble_train.sh + +# for ensemble (mnist + lenet) + +GPU=1 +CPU=1 +node=73 +jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +--kill-on-bad-exit=1 --job-name=${jobname} \ +-w SG-IDC1-10-51-2-${node} \ +python main.py \ +--config configs/datasets/digits/mnist.yml \ +configs/networks/lenet.yml \ +configs/preprocessors/base_preprocessor.yml \ +configs/pipelines/train/baseline.yml \ +--optimizer.num_epochs 50 \ +--num_workers 8 \ +--output_dir ./results/lenet_ensemble_pretrained \ +--exp_name network5 diff --git a/OpenOOD/scripts/uncertainty/ensemble/cifar100_test_ood_ensemble.sh b/OpenOOD/scripts/uncertainty/ensemble/cifar100_test_ood_ensemble.sh new file mode 100644 index 0000000000000000000000000000000000000000..3009da104e966c6ee76faace347db02082361142 --- /dev/null +++ b/OpenOOD/scripts/uncertainty/ensemble/cifar100_test_ood_ensemble.sh @@ -0,0 +1,29 @@ +#!/bin/bash +# sh scripts/uncertainty/ensemble/cifar100_test_ood_ensemble.sh + +# GPU=1 +# CPU=1 +# node=73 +# jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +# --kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ + +python main.py \ +--config configs/datasets/cifar100/cifar100.yml \ +configs/datasets/cifar100/cifar100_ood.yml \ +configs/networks/resnet18_32x32.yml \ +configs/pipelines/test/test_ood.yml \ +configs/preprocessors/base_preprocessor.yml \ +configs/postprocessors/ensemble.yml \ +--network.pretrained False \ +--num_workers 8 \ +--mark 0 \ +--postprocessor.postprocessor_args.network_name resnet18_32x32 \ +--postprocessor.postprocessor_args.checkpoint_root 'results/cifar100_resnet18_test_ensemble' \ +--postprocessor.postprocessor_args.num_networks 5 \ +--dataset.test.batch_size 64 \ +--dataset.val.batch_size 64 \ +--ood_dataset.batch_size 64 diff --git a/OpenOOD/scripts/uncertainty/ensemble/cifar10_test_ood_ensemble.sh b/OpenOOD/scripts/uncertainty/ensemble/cifar10_test_ood_ensemble.sh new file mode 100644 index 0000000000000000000000000000000000000000..04b72198a3bfa1c410c24a765ae99f3f7c463c2e --- /dev/null +++ b/OpenOOD/scripts/uncertainty/ensemble/cifar10_test_ood_ensemble.sh @@ -0,0 +1,29 @@ +#!/bin/bash +# sh scripts/uncertainty/ensemble/cifar10_test_ood_ensemble.sh + +# GPU=1 +# CPU=1 +# node=73 +# jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +# --kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ + +python main.py \ +--config configs/datasets/cifar10/cifar10.yml \ +configs/datasets/cifar10/cifar10_ood.yml \ +configs/networks/resnet18_32x32.yml \ +configs/pipelines/test/test_ood.yml \ +configs/preprocessors/base_preprocessor.yml \ +configs/postprocessors/ensemble.yml \ +--network.pretrained False \ +--num_workers 8 \ +--mark 0 \ +--postprocessor.postprocessor_args.network_name resnet18_32x32 \ +--postprocessor.postprocessor_args.checkpoint_root 'results/cifar10_resnet18_test_ensemble' \ +--postprocessor.postprocessor_args.num_networks 5 \ +--dataset.test.batch_size 64 \ +--dataset.val.batch_size 64 \ +--ood_dataset.batch_size 64 diff --git a/OpenOOD/scripts/uncertainty/ensemble/mnist_ensemble_test.sh b/OpenOOD/scripts/uncertainty/ensemble/mnist_ensemble_test.sh new file mode 100644 index 0000000000000000000000000000000000000000..03ac3235400fa516b7e75182558b3ab13e9bace0 --- /dev/null +++ b/OpenOOD/scripts/uncertainty/ensemble/mnist_ensemble_test.sh @@ -0,0 +1,28 @@ +#!/bin/bash +# sh scripts/uncertainty/ensemble/mnist_ensemble_test.sh + +#GPU=1 +#CPU=1 +#node=73 +#jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +#srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +#--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +#--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ +python main.py \ +--config configs/datasets/mnist/mnist.yml \ +configs/datasets/mnist/mnist_ood.yml \ +configs/networks/lenet.yml \ +configs/pipelines/test/test_ood.yml \ +configs/preprocessors/base_preprocessor.yml \ +configs/postprocessors/ensemble.yml \ +--network.pretrained False \ +--num_workers 8 \ +--mark 0 \ +--postprocessor.postprocessor_args.network_name lenet \ +--postprocessor.postprocessor_args.checkpoint_root 'results/mnist_lenet_test_ensemble' \ +--postprocessor.postprocessor_args.num_networks 5 \ +--dataset.test.batch_size 64 \ +--dataset.val.batch_size 64 \ +--ood_dataset.batch_size 64 diff --git a/OpenOOD/scripts/uncertainty/ensemble/osr_mnist_test_ood_ensemble.sh b/OpenOOD/scripts/uncertainty/ensemble/osr_mnist_test_ood_ensemble.sh new file mode 100644 index 0000000000000000000000000000000000000000..82bcaa2429cf1b47899be9ca8287f2afa488799d --- /dev/null +++ b/OpenOOD/scripts/uncertainty/ensemble/osr_mnist_test_ood_ensemble.sh @@ -0,0 +1,29 @@ +#!/bin/bash +# sh scripts/uncertainty/ensemble/osr_mnist_test_ood_ensemble.sh + +# GPU=1 +# CPU=1 +# node=73 +# jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +# --kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ + +python main.py \ +--config configs/datasets/osr_mnist6/mnist6_seed1.yml \ +configs/datasets/osr_mnist6/mnist6_seed1_ood.yml \ +configs/networks/lenet.yml \ +configs/pipelines/test/test_osr.yml \ +configs/preprocessors/base_preprocessor.yml \ +configs/postprocessors/ensemble.yml \ +--network.pretrained False \ +--num_workers 8 \ +--mark 0 \ +--postprocessor.postprocessor_args.network_name lenet \ +--postprocessor.postprocessor_args.checkpoint_root 'results/_osr_mnist6_test_ensemble' \ +--postprocessor.postprocessor_args.num_networks 5 \ +--dataset.test.batch_size 64 \ +--dataset.val.batch_size 64 \ +--ood_dataset.batch_size 64 diff --git a/OpenOOD/scripts/uncertainty/ensemble/osr_test_ood_ensemble.sh b/OpenOOD/scripts/uncertainty/ensemble/osr_test_ood_ensemble.sh new file mode 100644 index 0000000000000000000000000000000000000000..2869d940587a220f6c345d92fe10bcfa8918f7b0 --- /dev/null +++ b/OpenOOD/scripts/uncertainty/ensemble/osr_test_ood_ensemble.sh @@ -0,0 +1,28 @@ +#!/bin/bash +# sh scripts/uncertainty/ensemble/osr_test_ood_ensemble.sh + +GPU=1 +CPU=1 +node=73 +jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +--kill-on-bad-exit=1 --job-name=${jobname} \ +python main.py \ +--config configs/datasets/osr_tin20/tin20_seed1.yml \ +configs/datasets/osr_tin20/tin20_seed1_ood.yml \ +configs/networks/resnet18_64x64.yml \ +configs/pipelines/test/test_osr.yml \ +configs/preprocessors/base_preprocessor.yml \ +configs/postprocessors/ensemble.yml \ +--network.pretrained False \ +--num_workers 8 \ +--mark 0 \ +--postprocessor.postprocessor_args.network_name resnet18_64x64 \ +--postprocessor.postprocessor_args.checkpoint_root 'results/osr_tin20_seed1' \ +--postprocessor.postprocessor_args.num_networks 5 \ +--dataset.test.batch_size 64 \ +--dataset.val.batch_size 64 \ +--ood_dataset.batch_size 64 diff --git a/OpenOOD/scripts/uncertainty/mc_dropout/cifar100_test_mc_dropout.sh b/OpenOOD/scripts/uncertainty/mc_dropout/cifar100_test_mc_dropout.sh new file mode 100644 index 0000000000000000000000000000000000000000..32e5d2af4c5608b1e1cfb8b552b506fd01d3911b --- /dev/null +++ b/OpenOOD/scripts/uncertainty/mc_dropout/cifar100_test_mc_dropout.sh @@ -0,0 +1,25 @@ +#!/bin/bash +# sh scripts/uncertainty/mc_dropout/cifar100_test_mc_dropout.sh + + +# GPU=1 +# CPU=1 +# node=73 +# jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +# --kill-on-bad-exit=1 --job-name=${jobname} \ +# -w SG-IDC1-10-51-2-${node} \ + +python main.py \ +--config configs/datasets/cifar100/cifar100.yml \ +configs/datasets/cifar100/cifar100_ood.yml \ +configs/networks/dropout_net.yml \ +configs/pipelines/test/test_ood.yml \ +configs/preprocessors/base_preprocessor.yml \ +configs/postprocessors/dropout.yml \ +--num_workers 8 \ +--network.checkpoint 'results/cifar100_dropout_net_base_e100_lr0.1_default/best.ckpt' \ +--mark 0 diff --git a/OpenOOD/scripts/uncertainty/mc_dropout/cifar100_train_mc_dropout.sh b/OpenOOD/scripts/uncertainty/mc_dropout/cifar100_train_mc_dropout.sh new file mode 100644 index 0000000000000000000000000000000000000000..1e59e078858fec6f240fd007bbe0c6e58499b38b --- /dev/null +++ b/OpenOOD/scripts/uncertainty/mc_dropout/cifar100_train_mc_dropout.sh @@ -0,0 +1,24 @@ +#!/bin/bash +# sh scripts/uncertainty/mc_dropout/cifar100_train_mc_dropout.sh + + +# GPU=1 +# CPU=1 +# node=73 +# jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +# --kill-on-bad-exit=1 --job-name=${jobname} \ +# -w SG-IDC1-10-51-2-${node} \ + +python main.py \ +--config configs/datasets/cifar100/cifar100.yml \ +configs/networks/dropout_net.yml \ +configs/pipelines/train/baseline.yml \ +configs/preprocessors/base_preprocessor.yml \ +--network.backbone.name resnet18_32x32 \ +--network.backbone.pretrained False \ +--optimizer.num_epochs 100 \ +--num_workers 8 diff --git a/OpenOOD/scripts/uncertainty/mc_dropout/cifar10_test_mc_dropout.sh b/OpenOOD/scripts/uncertainty/mc_dropout/cifar10_test_mc_dropout.sh new file mode 100644 index 0000000000000000000000000000000000000000..bbcac0e34d8f81802305be93866b023af3ee891b --- /dev/null +++ b/OpenOOD/scripts/uncertainty/mc_dropout/cifar10_test_mc_dropout.sh @@ -0,0 +1,26 @@ +#!/bin/bash +# sh scripts/uncertainty/mc_dropout/cifar10_test_mc_dropout.sh + + +# GPU=1 +# CPU=1 +# node=73 +# jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +# --kill-on-bad-exit=1 --job-name=${jobname} \ +# -w SG-IDC1-10-51-2-${node} \ + +python main.py \ +--config configs/datasets/cifar10/cifar10.yml \ +configs/datasets/cifar10/cifar10_ood.yml \ +configs/networks/dropout_net.yml \ +configs/pipelines/test/test_ood.yml \ +configs/preprocessors/base_preprocessor.yml \ +configs/postprocessors/dropout.yml \ +--evaluator.name ood \ +--num_workers 8 \ +--network.checkpoint 'results/cifar10_dropout_net_base_e100_lr0.1_default/best.ckpt' \ +--mark 0 diff --git a/OpenOOD/scripts/uncertainty/mc_dropout/cifar10_train_mc_dropout.sh b/OpenOOD/scripts/uncertainty/mc_dropout/cifar10_train_mc_dropout.sh new file mode 100644 index 0000000000000000000000000000000000000000..e18eb831f37c0e83e11d27a9aee9ea039f783daa --- /dev/null +++ b/OpenOOD/scripts/uncertainty/mc_dropout/cifar10_train_mc_dropout.sh @@ -0,0 +1,24 @@ +#!/bin/bash +# sh scripts/uncertainty/mc_dropout/cifar10_train_mc_dropout.sh + + +# GPU=1 +# CPU=1 +# node=73 +# jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +# --kill-on-bad-exit=1 --job-name=${jobname} \ +# -w SG-IDC1-10-51-2-${node} \ + +python main.py \ +--config configs/datasets/cifar10/cifar10.yml \ +configs/networks/dropout_net.yml \ +configs/pipelines/train/baseline.yml \ +configs/preprocessors/base_preprocessor.yml \ +--network.backbone.name resnet18_32x32 \ +--network.backbone.pretrained False \ +--optimizer.num_epochs 100 \ +--num_workers 8 diff --git a/OpenOOD/scripts/uncertainty/mc_dropout/mnist_test_mc_dropout.sh b/OpenOOD/scripts/uncertainty/mc_dropout/mnist_test_mc_dropout.sh new file mode 100644 index 0000000000000000000000000000000000000000..6fa027870c3d5351bd8eabeb1994dabfc2c9872b --- /dev/null +++ b/OpenOOD/scripts/uncertainty/mc_dropout/mnist_test_mc_dropout.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# sh scripts/uncertainty/mc_dropout/mnist_test_mc_dropout.sh + +#GPU=1 +#CPU=1 +#node=73 +#jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +#srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +#--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +#--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ +python main.py \ +--config configs/datasets/mnist/mnist.yml \ +configs/datasets/mnist/mnist_ood.yml \ +configs/networks/dropout_net.yml \ +configs/pipelines/test/test_ood.yml \ +configs/preprocessors/base_preprocessor.yml \ +configs/postprocessors/dropout.yml \ +--network.backbone.name lenet \ +--num_workers 8 \ +--network.checkpoint 'results/mnist_dropout_net_base_e100_lr0.1_default/best.ckpt' \ +--mark 0 diff --git a/OpenOOD/scripts/uncertainty/mc_dropout/mnist_train_mc_dropout.sh b/OpenOOD/scripts/uncertainty/mc_dropout/mnist_train_mc_dropout.sh new file mode 100644 index 0000000000000000000000000000000000000000..6da576cea3eb4cf3dee596286ed5a631dd93c0f9 --- /dev/null +++ b/OpenOOD/scripts/uncertainty/mc_dropout/mnist_train_mc_dropout.sh @@ -0,0 +1,24 @@ +#!/bin/bash +# sh scripts/uncertainty/mc_dropout/mnist_train_mc_dropout.sh + + +# GPU=1 +# CPU=1 +# node=73 +# jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +# --kill-on-bad-exit=1 --job-name=${jobname} \ +# -w SG-IDC1-10-51-2-${node} \ + +python main.py \ +--config configs/datasets/mnist/mnist.yml \ +configs/networks/dropout_net.yml \ +configs/pipelines/train/baseline.yml \ +configs/preprocessors/base_preprocessor.yml \ +--network.backbone.name lenet \ +--network.backbone.pretrained False \ +--optimizer.num_epochs 100 \ +--num_workers 8 diff --git a/OpenOOD/scripts/uncertainty/mc_dropout/osr_mnist6_test_mc_dropout.sh b/OpenOOD/scripts/uncertainty/mc_dropout/osr_mnist6_test_mc_dropout.sh new file mode 100644 index 0000000000000000000000000000000000000000..ad52baaa443f8cd721aedd9b844d92762f6e0738 --- /dev/null +++ b/OpenOOD/scripts/uncertainty/mc_dropout/osr_mnist6_test_mc_dropout.sh @@ -0,0 +1,26 @@ +#!/bin/bash +# sh scripts/uncertainty/mc_dropout/osr_mnist6_test_mc_dropout.sh + + +# GPU=1 +# CPU=1 +# node=73 +# jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +# --kill-on-bad-exit=1 --job-name=${jobname} \ +# -w SG-IDC1-10-51-2-${node} \ + +python main.py \ +--config configs/datasets/osr_mnist6/mnist6_seed1.yml \ +configs/datasets/osr_mnist6/mnist6_seed1_ood.yml \ +configs/networks/dropout_net.yml \ +configs/pipelines/test/test_osr.yml \ +configs/preprocessors/base_preprocessor.yml \ +configs/postprocessors/dropout.yml \ +--network.backbone.name lenet \ +--num_workers 8 \ +--network.checkpoint 'results/osr_mnist6_seed1_dropout_net_base_e100_lr0.1_default/best.ckpt' \ +--mark 0 diff --git a/OpenOOD/scripts/uncertainty/mc_dropout/osr_mnist6_train_mc_dropout.sh b/OpenOOD/scripts/uncertainty/mc_dropout/osr_mnist6_train_mc_dropout.sh new file mode 100644 index 0000000000000000000000000000000000000000..f89d9ea51cc7dab0f2ffd44fdf9de427bb6a42e0 --- /dev/null +++ b/OpenOOD/scripts/uncertainty/mc_dropout/osr_mnist6_train_mc_dropout.sh @@ -0,0 +1,24 @@ +#!/bin/bash +# sh scripts/uncertainty/mc_dropout/osr_mnist6_train_mc_dropout.sh + + +# GPU=1 +# CPU=1 +# node=73 +# jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +# --kill-on-bad-exit=1 --job-name=${jobname} \ +# -w SG-IDC1-10-51-2-${node} \ + +python main.py \ +--config configs/datasets/osr_mnist6/mnist6_seed1.yml \ +configs/networks/dropout_net.yml \ +configs/pipelines/train/baseline.yml \ +configs/preprocessors/base_preprocessor.yml \ +--network.backbone.name lenet \ +--network.backbone.pretrained False \ +--optimizer.num_epochs 100 \ +--num_workers 8 diff --git a/OpenOOD/scripts/uncertainty/mc_dropout/sweep.py b/OpenOOD/scripts/uncertainty/mc_dropout/sweep.py new file mode 100644 index 0000000000000000000000000000000000000000..198b118bcb3275357e51ca1bcadf629fb54a9679 --- /dev/null +++ b/OpenOOD/scripts/uncertainty/mc_dropout/sweep.py @@ -0,0 +1,26 @@ +# python scripts/uncertainty/mc_dropout/sweep.py +import os + +config = [ + ['osr_cifar6/cifar6_seed1.yml', 'resnet18_32x32'], + ['osr_cifar50/cifar50_seed1.yml', 'resnet18_32x32'], + ['osr_tin20/tin20_seed1.yml', 'resnet18_64x64'], + ['osr_mnist4/mnist4_seed1.yml', 'lenet'], + ['mnist/mnist.yml', 'lenet'], +] + +for [dataset, network] in config: + command = (f"PYTHONPATH='.':$PYTHONPATH \ + srun -p dsta --mpi=pmi2 --gres=gpu:1 -n1 \ + --cpus-per-task=1 --ntasks-per-node=1 \ + --kill-on-bad-exit=1 --job-name=openood \ + python main.py \ + --config configs/datasets/{dataset} \ + configs/networks/dropout_net.yml \ + configs/pipelines/train/baseline.yml \ + configs/preprocessors/base_preprocessor.yml \ + --network.backbone.name {network} \ + --network.pretrained False \ + --optimizer.num_epochs 100 \ + --num_workers 8 &") + os.system(command) diff --git a/OpenOOD/scripts/uncertainty/mc_dropout/sweep_test.py b/OpenOOD/scripts/uncertainty/mc_dropout/sweep_test.py new file mode 100644 index 0000000000000000000000000000000000000000..46217537269d46959e12b47817ae71ab82263385 --- /dev/null +++ b/OpenOOD/scripts/uncertainty/mc_dropout/sweep_test.py @@ -0,0 +1,41 @@ +# python scripts/uncertainty/mc_dropout/sweep_test.py +import os + +config = [ + [ + 'osr_cifar6/cifar6_seed1.yml', 'osr_cifar6/cifar6_seed1_ood.yml', + 'resnet18_32x32', + 'osr_cifar6_seed1_dropout_net_base_e100_lr0.1_default' + ], + [ + 'osr_cifar50/cifar50_seed1.yml', 'osr_cifar50/cifar50_seed1_ood.yml', + 'resnet18_32x32', + 'osr_cifar50_seed1_dropout_net_base_e100_lr0.1_default' + ], + [ + 'osr_tin20/tin20_seed1.yml', 'osr_tin20/tin20_seed1_ood.yml', + 'resnet18_64x64', 'osr_tin20_seed1_dropout_net_base_e100_lr0.1_default' + ], + [ + 'osr_mnist4/mnist4_seed1.yml', 'osr_mnist4/mnist4_seed1_ood.yml', + 'lenet', 'osr_mnist4_seed1_dropout_net_base_e100_lr0.1_default' + ], +] + +for [dataset, ood_data, network, pth] in config: + command = (f"PYTHONPATH='.':$PYTHONPATH \ + srun -p dsta --mpi=pmi2 --gres=gpu:1 -n1 \ + --cpus-per-task=1 --ntasks-per-node=1 \ + --kill-on-bad-exit=1 --job-name=openood \ + python main.py \ + --config configs/datasets/{dataset} \ + configs/datasets/{ood_data} \ + configs/networks/dropout_net.yml \ + configs/pipelines/test/test_osr.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/postprocessors/dropout.yml \ + --num_workers 8 \ + --network.checkpoint 'results/{pth}/best.ckpt' \ + --mark 0 \ + --merge_option merge &") + os.system(command) diff --git a/OpenOOD/scripts/uncertainty/mixup/cifar100_test_ood_msp.sh b/OpenOOD/scripts/uncertainty/mixup/cifar100_test_ood_msp.sh new file mode 100644 index 0000000000000000000000000000000000000000..14615439c5b4d7956e1c6ec3baa11e96afef7642 --- /dev/null +++ b/OpenOOD/scripts/uncertainty/mixup/cifar100_test_ood_msp.sh @@ -0,0 +1,22 @@ +#!/bin/bash +# sh scripts/uncertainty/mixup/cifar100_test_ood_msp.sh + +# GPU=1 +# CPU=1 +# node=36 +# jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +#srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +#--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +#--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ +python main.py \ +--config configs/datasets/cifar100/cifar100.yml \ +configs/datasets/cifar100/cifar100_ood.yml \ +configs/networks/resnet18_32x32.yml \ +configs/pipelines/test/test_ood.yml \ +configs/preprocessors/base_preprocessor.yml \ +configs/postprocessors/msp.yml \ +--num_workers 8 \ +--network.checkpoint 'results/cifar100_resnet18_32x32_mixup_e100_lr0.1_alpha0.2/best.ckpt' \ +--mark mixup diff --git a/OpenOOD/scripts/uncertainty/mixup/cifar100_train_mixup.sh b/OpenOOD/scripts/uncertainty/mixup/cifar100_train_mixup.sh new file mode 100644 index 0000000000000000000000000000000000000000..022bc555e35b21522375873f9ab5416ff3064e2a --- /dev/null +++ b/OpenOOD/scripts/uncertainty/mixup/cifar100_train_mixup.sh @@ -0,0 +1,21 @@ +#!/bin/bash +# sh scripts/uncertainty/mixup/cifar100_train_mixup.sh + +GPU=1 +CPU=1 +node=73 +jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +# --kill-on-bad-exit=1 --job-name=${jobname} \ +# -w SG-IDC1-10-51-2-${node} \ +python main.py \ +--config configs/datasets/cifar100/cifar100.yml \ +configs/networks/resnet18_32x32.yml \ +configs/pipelines/train/train_mixup.yml \ +configs/preprocessors/base_preprocessor.yml \ +--network.pretrained False \ +--optimizer.num_epochs 100 \ +--num_workers 8 \ No newline at end of file diff --git a/OpenOOD/scripts/uncertainty/mixup/cifar10_test_ood_mixup.sh b/OpenOOD/scripts/uncertainty/mixup/cifar10_test_ood_mixup.sh new file mode 100644 index 0000000000000000000000000000000000000000..9751bcefa9c7630e59d30fc1471f2f8ebda52453 --- /dev/null +++ b/OpenOOD/scripts/uncertainty/mixup/cifar10_test_ood_mixup.sh @@ -0,0 +1,22 @@ +#!/bin/bash +# sh scripts/uncertainty/mixup/cifar10_test_ood_mixup.sh + +# GPU=1 +# CPU=1 +# node=36 +# jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +#srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +#--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +#--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ +python main.py \ +--config configs/datasets/cifar10/cifar10.yml \ +configs/datasets/cifar10/cifar10_ood.yml \ +configs/networks/resnet18_32x32.yml \ +configs/pipelines/test/test_ood.yml \ +configs/preprocessors/base_preprocessor.yml \ +configs/postprocessors/msp.yml \ +--num_workers 8 \ +--network.checkpoint 'results/cifar10_resnet18_32x32_mixup_e100_lr0.1_alpha0.2_default/best.ckpt' \ +--mark mixup diff --git a/OpenOOD/scripts/uncertainty/mixup/cifar10_train_mixup.sh b/OpenOOD/scripts/uncertainty/mixup/cifar10_train_mixup.sh new file mode 100644 index 0000000000000000000000000000000000000000..08bb4f348f9377c1e3c04f2e14e3b1ab5a9d2428 --- /dev/null +++ b/OpenOOD/scripts/uncertainty/mixup/cifar10_train_mixup.sh @@ -0,0 +1,21 @@ +#!/bin/bash +# sh scripts/uncertainty/mixup/cifar10_train_mixup.sh + +GPU=1 +CPU=1 +node=73 +jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +# --kill-on-bad-exit=1 --job-name=${jobname} \ +# -w SG-IDC1-10-51-2-${node} \ +python main.py \ +--config configs/datasets/cifar10/cifar10.yml \ +configs/networks/resnet18_32x32.yml \ +configs/pipelines/train/train_mixup.yml \ +configs/preprocessors/base_preprocessor.yml \ +--network.pretrained False \ +--optimizer.num_epochs 100 \ +--num_workers 8 diff --git a/OpenOOD/scripts/uncertainty/mixup/mnist_test_ood_mixup.sh b/OpenOOD/scripts/uncertainty/mixup/mnist_test_ood_mixup.sh new file mode 100644 index 0000000000000000000000000000000000000000..59155f31803cdbc30b874ed3b3b3534e4fdf325d --- /dev/null +++ b/OpenOOD/scripts/uncertainty/mixup/mnist_test_ood_mixup.sh @@ -0,0 +1,22 @@ +#!/bin/bash +# sh scripts/uncertainty/mixup/mnist_test_ood_mixup.sh + +# GPU=1 +# CPU=1 +# node=36 +# jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +#srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +#--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +#--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ +python main.py \ +--config configs/datasets/mnist/mnist.yml \ +configs/datasets/mnist/mnist_ood.yml \ +configs/networks/lenet.yml \ +configs/pipelines/test/test_ood.yml \ +configs/preprocessors/base_preprocessor.yml \ +configs/postprocessors/msp.yml \ +--num_workers 8 \ +--network.checkpoint 'results/mnist_lenet_mixup_e100_lr0.1_alpha0.2_default/best.ckpt' \ +--mark mixup diff --git a/OpenOOD/scripts/uncertainty/mixup/mnist_train_mixup.sh b/OpenOOD/scripts/uncertainty/mixup/mnist_train_mixup.sh new file mode 100644 index 0000000000000000000000000000000000000000..fbdbbe34bc935f1437bcb00d57af74d719051c63 --- /dev/null +++ b/OpenOOD/scripts/uncertainty/mixup/mnist_train_mixup.sh @@ -0,0 +1,21 @@ +#!/bin/bash +# sh scripts/uncertainty/mixup/mnist_train_mixup.sh + +GPU=1 +CPU=1 +node=73 +jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +# --kill-on-bad-exit=1 --job-name=${jobname} \ +# -w SG-IDC1-10-51-2-${node} \ +python main.py \ +--config configs/datasets/mnist/mnist.yml \ +configs/networks/lenet.yml \ +configs/pipelines/train/train_mixup.yml \ +configs/preprocessors/base_preprocessor.yml \ +--network.pretrained False \ +--optimizer.num_epochs 100 \ +--num_workers 8 diff --git a/OpenOOD/scripts/uncertainty/mixup/osr_mnist6_test_ood_msp.sh b/OpenOOD/scripts/uncertainty/mixup/osr_mnist6_test_ood_msp.sh new file mode 100644 index 0000000000000000000000000000000000000000..64ce9cae332d7312fcd7987cdf42f49fb598506a --- /dev/null +++ b/OpenOOD/scripts/uncertainty/mixup/osr_mnist6_test_ood_msp.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# sh scripts/uncertainty/mixup/osr_mnist6_test_ood_msp.sh + +GPU=1 +CPU=1 +node=73 +jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +# --kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ + +python main.py \ +--config configs/datasets/osr_mnist6/mnist6_seed1.yml \ +configs/datasets/osr_mnist6/mnist6_seed1_ood.yml \ +configs/networks/lenet.yml \ +configs/pipelines/test/test_osr.yml \ +configs/preprocessors/base_preprocessor.yml \ +configs/postprocessors/msp.yml \ +--num_workers 8 \ +--network.checkpoint 'results/osr_mnist6_seed1_lenet_mixup_e100_lr0.1_alpha0.2_default/best.ckpt' \ +--mark mixup diff --git a/OpenOOD/scripts/uncertainty/mixup/osr_mnist6_train_mixup.sh b/OpenOOD/scripts/uncertainty/mixup/osr_mnist6_train_mixup.sh new file mode 100644 index 0000000000000000000000000000000000000000..672dde94897f11736d0ed03501a0638999f2c01f --- /dev/null +++ b/OpenOOD/scripts/uncertainty/mixup/osr_mnist6_train_mixup.sh @@ -0,0 +1,21 @@ +#!/bin/bash +# sh scripts/uncertainty/mixup/osr_mnist6_train_mixup.sh + +GPU=1 +CPU=1 +node=73 +jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +# --kill-on-bad-exit=1 --job-name=${jobname} \ +# -w SG-IDC1-10-51-2-${node} \ +python main.py \ +--config configs/datasets/osr_mnist6/mnist6_seed1.yml \ +configs/networks/lenet.yml \ +configs/pipelines/train/train_mixup.yml \ +configs/preprocessors/base_preprocessor.yml \ +--network.pretrained False \ +--optimizer.num_epochs 100 \ +--num_workers 8 diff --git a/OpenOOD/scripts/uncertainty/mixup/sweep.py b/OpenOOD/scripts/uncertainty/mixup/sweep.py new file mode 100644 index 0000000000000000000000000000000000000000..38d23472e23c6193856d551d768034db29367ccb --- /dev/null +++ b/OpenOOD/scripts/uncertainty/mixup/sweep.py @@ -0,0 +1,26 @@ +# python scripts/uncertainty/mixup/sweep.py +import os + +config = [ + ['osr_cifar6/cifar6_seed1.yml', 'resnet18_32x32'], + ['osr_cifar50/cifar50_seed1.yml', 'resnet18_32x32'], + ['osr_tin20/tin20_seed1.yml', 'resnet18_64x64'], + ['osr_mnist4/mnist4_seed1.yml', 'lenet'], + ['mnist/mnist.yml', 'lenet'], +] + +for [dataset, network] in config: + command = (f"PYTHONPATH='.':$PYTHONPATH \ + srun -p dsta --mpi=pmi2 --gres=gpu:1 -n1 \ + --cpus-per-task=1 --ntasks-per-node=1 \ + --kill-on-bad-exit=1 --job-name=openood \ + -w SG-IDC1-10-51-2-75 \ + python main.py \ + --config configs/datasets/{dataset} \ + configs/networks/{network}.yml \ + configs/pipelines/train/train_mixup.yml \ + configs/preprocessors/base_preprocessor.yml \ + --network.pretrained False \ + --optimizer.num_epochs 100 \ + --num_workers 8 &") + os.system(command) diff --git a/OpenOOD/scripts/uncertainty/mixup/sweep_test.py b/OpenOOD/scripts/uncertainty/mixup/sweep_test.py new file mode 100644 index 0000000000000000000000000000000000000000..740354c3e5760f85c072360e7ac1ebdf69b2e101 --- /dev/null +++ b/OpenOOD/scripts/uncertainty/mixup/sweep_test.py @@ -0,0 +1,33 @@ +# python scripts/uncertainty/mixup/sweep_test.py +import os + +config = [ + [ + 'osr_cifar6/cifar6_seed1.yml', 'osr_cifar6/cifar6_seed1_ood.yml', + 'resnet18_32x32', + './results/cifar10_osr_resnet18_32x32_base_e100_lr0.1_default/best_epoch94_acc0.9773.ckpt' + ], + [ + 'osr_cifar50/cifar50_seed1.yml', 'osr_cifar50/cifar50_seed1_ood.yml', + 'resnet18_32x32', + './results/cifar100_osr_resnet18_32x32_base_e100_lr0.1_default/best.ckpt' + ], +] + +for [dataset, ood_dataset, network, pth] in config: + command = (f"PYTHONPATH='.':$PYTHONPATH \ + srun -p dsta --mpi=pmi2 --gres=gpu:1 -n1 \ + --cpus-per-task=1 --ntasks-per-node=1 \ + --kill-on-bad-exit=1 --job-name=openood \ + python main.py \ + --config configs/datasets/{dataset} \ + configs/datasets/{ood_dataset} \ + configs/networks/{network}.yml \ + configs/pipelines/test/test_osr.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/postprocessors/msp.yml \ + --network.pretrained True \ + --network.checkpoint {pth} \ + --num_workers 8 \ + --merge_option merge &") + os.system(command) diff --git a/OpenOOD/scripts/uncertainty/pixmix/cifar100_test_ood_msp.sh b/OpenOOD/scripts/uncertainty/pixmix/cifar100_test_ood_msp.sh new file mode 100644 index 0000000000000000000000000000000000000000..c96ff9f009ca4e6a0bb3ddcaa7884489d1abcfa3 --- /dev/null +++ b/OpenOOD/scripts/uncertainty/pixmix/cifar100_test_ood_msp.sh @@ -0,0 +1,34 @@ +#!/bin/bash +# sh scripts/uncertainty/pixmix/cifar100_test_ood_msp.sh + +# GPU=1 +# CPU=1 +# node=36 +# jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +#srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +#--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +#--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ +python main.py \ + --config configs/datasets/cifar100/cifar100.yml \ + configs/datasets/cifar100/cifar100_ood.yml \ + configs/networks/resnet18_32x32.yml \ + configs/pipelines/test/test_ood.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/postprocessors/msp.yml \ + --num_workers 8 \ + --network.checkpoint 'results/cifar100_resnet18_32x32_base_e100_lr0.1_pixmix/s0/best.ckpt' \ + --mark pixmix + +############################################ +# alternatively, we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood.py +# especially if you want to get results from +# multiple runs +python scripts/eval_ood.py \ + --id-data cifar100 \ + --root ./results/cifar100_resnet18_32x32_base_e100_lr0.1_pixmix \ + --postprocessor msp \ + --save-score --save-csv diff --git a/OpenOOD/scripts/uncertainty/pixmix/cifar100_train_pixmix.sh b/OpenOOD/scripts/uncertainty/pixmix/cifar100_train_pixmix.sh new file mode 100644 index 0000000000000000000000000000000000000000..ef4a09f2b33fa99b5a038762ab365ba9dd285bc4 --- /dev/null +++ b/OpenOOD/scripts/uncertainty/pixmix/cifar100_train_pixmix.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# sh scripts/uncertainty/pixmix/cifar100_train_pixmix.sh + +# GPU=1 +# CPU=1 +# node=73 +# jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +# --kill-on-bad-exit=1 --job-name=${jobname} \ +# -w SG-IDC1-10-51-2-${node} \ + +python main.py \ + --config configs/datasets/cifar100/cifar100.yml \ + configs/networks/resnet18_32x32.yml \ + configs/pipelines/train/baseline.yml \ + configs/preprocessors/pixmix_preprocessor.yml \ + --num_workers 8 \ + --optimizer.num_epochs 100 \ + --mark pixmix \ + --seed 0 diff --git a/OpenOOD/scripts/uncertainty/pixmix/cifar10_test_ood_msp.sh b/OpenOOD/scripts/uncertainty/pixmix/cifar10_test_ood_msp.sh new file mode 100644 index 0000000000000000000000000000000000000000..b30402cbb205ee5fd03cb16cb649abdae3f480b2 --- /dev/null +++ b/OpenOOD/scripts/uncertainty/pixmix/cifar10_test_ood_msp.sh @@ -0,0 +1,35 @@ +#!/bin/bash +# sh scripts/uncertainty/pixmix/cifar10_test_ood_msp.sh + +# GPU=1 +# CPU=1 +# node=36 +# jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +#srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +#--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +#--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ +python main.py \ + --config configs/datasets/cifar10/cifar10.yml \ + configs/datasets/cifar10/cifar10_ood.yml \ + configs/networks/resnet18_32x32.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/pipelines/test/test_ood.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/postprocessors/msp.yml \ + --num_workers 8 \ + --network.checkpoint 'results/cifar10_resnet18_32x32_base_e100_lr0.1_pixmix/s0/best.ckpt' \ + --mark pixmix + +############################################ +# alternatively, we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood.py +# especially if you want to get results from +# multiple runs +python scripts/eval_ood.py \ + --id-data cifar10 \ + --root ./results/cifar10_resnet18_32x32_base_e100_lr0.1_pixmix \ + --postprocessor msp \ + --save-score --save-csv diff --git a/OpenOOD/scripts/uncertainty/pixmix/cifar10_train_pixmix.sh b/OpenOOD/scripts/uncertainty/pixmix/cifar10_train_pixmix.sh new file mode 100644 index 0000000000000000000000000000000000000000..63da442af6ed7316392166e31769d3e2bebea0b7 --- /dev/null +++ b/OpenOOD/scripts/uncertainty/pixmix/cifar10_train_pixmix.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# sh scripts/uncertainty/pixmix/cifar10_train_pixmix.sh + +# GPU=1 +# CPU=1 +# node=73 +# jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +# --kill-on-bad-exit=1 --job-name=${jobname} \ +# -w SG-IDC1-10-51-2-${node} \ + +python main.py \ + --config configs/datasets/cifar10/cifar10.yml \ + configs/networks/resnet18_32x32.yml \ + configs/pipelines/train/baseline.yml \ + configs/preprocessors/pixmix_preprocessor.yml \ + --num_workers 8 \ + --optimizer.num_epochs 100 \ + --mark pixmix \ + --seed 0 diff --git a/OpenOOD/scripts/uncertainty/pixmix/imagenet200_test_ood_msp.sh b/OpenOOD/scripts/uncertainty/pixmix/imagenet200_test_ood_msp.sh new file mode 100644 index 0000000000000000000000000000000000000000..4fc6c58fd5555e70fe4fc3ae2b3e76b65337e487 --- /dev/null +++ b/OpenOOD/scripts/uncertainty/pixmix/imagenet200_test_ood_msp.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# sh scripts/uncertainty/pixmix/imagenet200_test_ood_msp.sh + +############################################ +# alternatively, we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood.py +# especially if you want to get results from +# multiple runs + +# ood +python scripts/eval_ood.py \ + --id-data imagenet200 \ + --root ./results/imagenet200_resnet18_224x224_base_e90_lr0.1_pixmix \ + --postprocessor msp \ + --save-score --save-csv #--fsood + +# full-spectrum ood +python scripts/eval_ood.py \ + --id-data imagenet200 \ + --root ./results/imagenet200_resnet18_224x224_base_e90_lr0.1_pixmix \ + --postprocessor msp \ + --save-score --save-csv --fsood diff --git a/OpenOOD/scripts/uncertainty/pixmix/imagenet200_train_pixmix.sh b/OpenOOD/scripts/uncertainty/pixmix/imagenet200_train_pixmix.sh new file mode 100644 index 0000000000000000000000000000000000000000..7298f98a9496c23ef4545cc94bf1df4b91037d28 --- /dev/null +++ b/OpenOOD/scripts/uncertainty/pixmix/imagenet200_train_pixmix.sh @@ -0,0 +1,16 @@ +#!/bin/bash +# sh scripts/uncertainty/pixmix/imagenet200_train_pixmix.sh + +python main.py \ + --config configs/datasets/imagenet200/imagenet200.yml \ + configs/networks/resnet18_224x224.yml \ + configs/pipelines/train/baseline.yml \ + configs/preprocessors/pixmix_preprocessor.yml \ + --preprocessor.preprocessor_args.aug_severity 1 \ + --preprocessor.preprocessor_args.beta 4 \ + --optimizer.num_epochs 90 \ + --dataset.train.batch_size 128 \ + --num_gpus 2 --num_workers 16 \ + --merge_option merge \ + --seed ${SEED} \ + --mark pixmix diff --git a/OpenOOD/scripts/uncertainty/pixmix/imagenet_test_ood_msp.sh b/OpenOOD/scripts/uncertainty/pixmix/imagenet_test_ood_msp.sh new file mode 100644 index 0000000000000000000000000000000000000000..39a4226ff6f02c8cb9beb71c54bb58c9cb9372b9 --- /dev/null +++ b/OpenOOD/scripts/uncertainty/pixmix/imagenet_test_ood_msp.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# sh scripts/uncertainty/pixmix/imagenet_test_ood_msp.sh + +############################################ +# we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood_imagenet.py + +# available architectures: +# resnet50 +# ood +python scripts/eval_ood_imagenet.py \ + --ckpt-path ./results/imagenet_resnet50_tvsv1_base_pixmix/ckpt.pth \ + --arch resnet50 \ + --postprocessor msp \ + --save-score --save-csv #--fsood + +# full-spectrum ood +python scripts/eval_ood_imagenet.py \ + --ckpt-path ./results/imagenet_resnet50_tvsv1_base_pixmix/ckpt.pth \ + --arch resnet50 \ + --postprocessor msp \ + --save-score --save-csv --fsood diff --git a/OpenOOD/scripts/uncertainty/pixmix/mnist_test_ood_pixmix.sh b/OpenOOD/scripts/uncertainty/pixmix/mnist_test_ood_pixmix.sh new file mode 100644 index 0000000000000000000000000000000000000000..e5d3afcb02d9585afd1b41952762d27f3e509a74 --- /dev/null +++ b/OpenOOD/scripts/uncertainty/pixmix/mnist_test_ood_pixmix.sh @@ -0,0 +1,22 @@ +!/bin/bash +# sh scripts/uncertainty/pixmix/mnist_test_ood_pixmix.sh + +# GPU=1 +# CPU=1 +# node=36 +# jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +#srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +#--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +#--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ +python main.py \ +--config configs/datasets/mnist/mnist.yml \ +configs/datasets/mnist/mnist_ood.yml \ +configs/networks/lenet.yml \ +configs/pipelines/test/test_ood.yml \ +configs/preprocessors/base_preprocessor.yml \ +configs/postprocessors/msp.yml \ +--num_workers 8 \ +--network.checkpoint 'results/mnist_lenet_base_e100_lr0.1_pixmix/best.ckpt' \ +--mark pixmix diff --git a/OpenOOD/scripts/uncertainty/pixmix/mnist_train_pixmix.sh b/OpenOOD/scripts/uncertainty/pixmix/mnist_train_pixmix.sh new file mode 100644 index 0000000000000000000000000000000000000000..42f6c2a40bdb25a8c92dd33a95558fb70ebbe47d --- /dev/null +++ b/OpenOOD/scripts/uncertainty/pixmix/mnist_train_pixmix.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# sh scripts/uncertainty/pixmix/mnist_train_pixmix.sh + +# GPU=1 +# CPU=1 +# node=73 +# jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +# --kill-on-bad-exit=1 --job-name=${jobname} \ +# -w SG-IDC1-10-51-2-${node} \ + +#python main.py \ +python main.py \ +--config configs/datasets/mnist/mnist.yml \ +configs/networks/lenet.yml \ +configs/pipelines/train/baseline.yml \ +configs/preprocessors/pixmix_preprocessor.yml \ +--num_workers 0 \ +--optimizer.num_epochs 100 \ +--mark pixmix diff --git a/OpenOOD/scripts/uncertainty/pixmix/osr_mnist6_test_ood_msp.sh b/OpenOOD/scripts/uncertainty/pixmix/osr_mnist6_test_ood_msp.sh new file mode 100644 index 0000000000000000000000000000000000000000..339fc571c62b78bf4cb52de87e09d5bdb1590993 --- /dev/null +++ b/OpenOOD/scripts/uncertainty/pixmix/osr_mnist6_test_ood_msp.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# sh scripts/uncertainty/pixmix/osr_mnist6_test_ood_msp.sh + +GPU=1 +CPU=1 +node=73 +jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +# --kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ + +python main.py \ +--config configs/datasets/osr_mnist6/mnist6_seed1.yml \ +configs/datasets/osr_mnist6/mnist6_seed1_ood.yml \ +configs/networks/lenet.yml \ +configs/pipelines/test/test_osr.yml \ +configs/preprocessors/base_preprocessor.yml \ +configs/postprocessors/msp.yml \ +--num_workers 8 \ +--network.checkpoint 'results/osr_mnist6_seed1_lenet_base_e100_lr0.1_pixmix/best.ckpt' \ +--mark osr_mnist6_pixmix diff --git a/OpenOOD/scripts/uncertainty/pixmix/osr_mnist6_train_pixmix.sh b/OpenOOD/scripts/uncertainty/pixmix/osr_mnist6_train_pixmix.sh new file mode 100644 index 0000000000000000000000000000000000000000..c7eb4672261d620c0075cc78d1ffcde886b6475d --- /dev/null +++ b/OpenOOD/scripts/uncertainty/pixmix/osr_mnist6_train_pixmix.sh @@ -0,0 +1,24 @@ +#!/bin/bash +# sh scripts/uncertainty/pixmix/osr_mnist6_train_pixmix.sh + +# GPU=1 +# CPU=1 +# node=73 +# jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +# --kill-on-bad-exit=1 --job-name=${jobname} \ +# -w SG-IDC1-10-51-2-${node} \ + +CUDA_VISIBLE_DEVICES=1 python main.py \ +--config configs/datasets/osr_mnist6/mnist6_seed1.yml \ +configs/networks/lenet.yml \ +configs/pipelines/train/baseline.yml \ +configs/preprocessors/pixmix_preprocessor.yml \ +--dataset.train.batch_size 4096 \ +--num_workers 0 \ +--optimizer.num_epochs 100 \ +--mark pixmix \ +--merge_option merge diff --git a/OpenOOD/scripts/uncertainty/pixmix/sweep.py b/OpenOOD/scripts/uncertainty/pixmix/sweep.py new file mode 100644 index 0000000000000000000000000000000000000000..f14841090117f02d64ca29dc0bc6ab10d0e9c6e0 --- /dev/null +++ b/OpenOOD/scripts/uncertainty/pixmix/sweep.py @@ -0,0 +1,23 @@ +# python scripts/uncertainty/pixmix/sweep.py +import os + +config = [ + ['osr_cifar6/cifar6_seed1.yml', 'resnet18_32x32', 'cifar10'], + ['osr_cifar50/cifar50_seed1.yml', 'resnet18_32x32', 'cifar100'], + ['osr_tin20/tin20_seed1.yml', 'resnet18_64x64', 'tin'], +] + +for [dataset, network, od] in config: + command = (f"PYTHONPATH='.':$PYTHONPATH \ + srun -p dsta --mpi=pmi2 --gres=gpu:1 -n1 \ + --cpus-per-task=1 --ntasks-per-node=1 \ + --kill-on-bad-exit=1 --job-name=openood \ + python main.py \ + --config configs/datasets/{dataset} \ + configs/networks/{network}.yml \ + configs/pipelines/train/baseline.yml \ + configs/preprocessors/pixmix_preprocessor.yml \ + --optimizer.num_epochs 100 \ + --dataset.name {od}_osr \ + --num_workers 8") + os.system(command) diff --git a/OpenOOD/scripts/uncertainty/randaugment/cifar100_test_ood_msp.sh b/OpenOOD/scripts/uncertainty/randaugment/cifar100_test_ood_msp.sh new file mode 100644 index 0000000000000000000000000000000000000000..23eae553ae7b5408ae49ba837d0c2fef696fe0ba --- /dev/null +++ b/OpenOOD/scripts/uncertainty/randaugment/cifar100_test_ood_msp.sh @@ -0,0 +1,14 @@ +#!/bin/bash +# sh scripts/uncertainty/randaugment/cifar100_test_ood_msp.sh + +############################################ +# alternatively, we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood.py +# especially if you want to get results from +# multiple runs +python scripts/eval_ood.py \ + --id-data cifar100 \ + --root ./results/cifar100_resnet18_32x32_base_e100_lr0.1_randaugment-1-14 \ + --postprocessor msp \ + --save-score --save-csv diff --git a/OpenOOD/scripts/uncertainty/randaugment/cifar100_train_randaugment.sh b/OpenOOD/scripts/uncertainty/randaugment/cifar100_train_randaugment.sh new file mode 100644 index 0000000000000000000000000000000000000000..c28d90f31d234f0a01aeca6888a873320f301af0 --- /dev/null +++ b/OpenOOD/scripts/uncertainty/randaugment/cifar100_train_randaugment.sh @@ -0,0 +1,10 @@ +#!/bin/bash +# sh scripts/uncertainty/randaugment/cifar100_train_randaugment.sh + +python main.py \ + --config configs/datasets/cifar100/cifar100.yml \ + configs/networks/resnet18_32x32.yml \ + configs/pipelines/train/baseline.yml \ + configs/preprocessors/randaugment_preprocessor.yml \ + --seed 0 \ + --mark randaugment-1-14 diff --git a/OpenOOD/scripts/uncertainty/randaugment/cifar10_test_ood_msp.sh b/OpenOOD/scripts/uncertainty/randaugment/cifar10_test_ood_msp.sh new file mode 100644 index 0000000000000000000000000000000000000000..9ba147b19c88b3c16d6fdff2225c9320d4a96031 --- /dev/null +++ b/OpenOOD/scripts/uncertainty/randaugment/cifar10_test_ood_msp.sh @@ -0,0 +1,14 @@ +#!/bin/bash +# sh scripts/uncertainty/randaugment/cifar10_test_ood_msp.sh + +############################################ +# alternatively, we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood.py +# especially if you want to get results from +# multiple runs +python scripts/eval_ood.py \ + --id-data cifar10 \ + --root ./results/cifar10_resnet18_32x32_base_e100_lr0.1_randaugment-1-14 \ + --postprocessor msp \ + --save-score --save-csv diff --git a/OpenOOD/scripts/uncertainty/randaugment/cifar10_train_randaugment.sh b/OpenOOD/scripts/uncertainty/randaugment/cifar10_train_randaugment.sh new file mode 100644 index 0000000000000000000000000000000000000000..b878d41aeaed1479eea65aac31ded1f79fecea29 --- /dev/null +++ b/OpenOOD/scripts/uncertainty/randaugment/cifar10_train_randaugment.sh @@ -0,0 +1,10 @@ +#!/bin/bash +# sh scripts/uncertainty/randaugment/cifar10_train_randaugment.sh + +python main.py \ + --config configs/datasets/cifar10/cifar10.yml \ + configs/networks/resnet18_32x32.yml \ + configs/pipelines/train/baseline.yml \ + configs/preprocessors/randaugment_preprocessor.yml \ + --seed 0 \ + --mark randaugment-1-14 diff --git a/OpenOOD/scripts/uncertainty/randaugment/imagenet200_test_ood_msp.sh b/OpenOOD/scripts/uncertainty/randaugment/imagenet200_test_ood_msp.sh new file mode 100644 index 0000000000000000000000000000000000000000..43d45e557c61e337b1ae5c08e5c970617afb68cc --- /dev/null +++ b/OpenOOD/scripts/uncertainty/randaugment/imagenet200_test_ood_msp.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# sh scripts/uncertainty/randaugment/imagenet200_test_ood_msp.sh + +############################################ +# alternatively, we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood.py +# especially if you want to get results from +# multiple runs + +# ood +python scripts/eval_ood.py \ + --id-data imagenet200 \ + --root ./results/imagenet200_resnet18_224x224_base_e90_lr0.1_randaugment-1-10 \ + --postprocessor msp \ + --save-score --save-csv #--fsood + +# full-spectrum ood +python scripts/eval_ood.py \ + --id-data imagenet200 \ + --root ./results/imagenet200_resnet18_224x224_base_e90_lr0.1_randaugment-1-10 \ + --postprocessor msp \ + --save-score --save-csv --fsood diff --git a/OpenOOD/scripts/uncertainty/randaugment/imagenet200_train_randaugment.sh b/OpenOOD/scripts/uncertainty/randaugment/imagenet200_train_randaugment.sh new file mode 100644 index 0000000000000000000000000000000000000000..35b35a0a59d9e8928af68a3f2e84bf103306e9b8 --- /dev/null +++ b/OpenOOD/scripts/uncertainty/randaugment/imagenet200_train_randaugment.sh @@ -0,0 +1,15 @@ +#!/bin/bash +# sh scripts/uncertainty/randaugment/imagenet200_train_randaugment.sh + +python main.py \ + --config configs/datasets/imagenet200/imagenet200.yml \ + configs/networks/resnet18_224x224.yml \ + configs/pipelines/train/baseline.yml \ + configs/preprocessors/randaugment_preprocessor.yml \ + --preprocessor.m 10 \ + --optimizer.num_epochs 90 \ + --dataset.train.batch_size 128 \ + --num_gpus 2 --num_workers 16 \ + --merge_option merge \ + --seed 0 \ + --mark randaugment-1-10 diff --git a/OpenOOD/scripts/uncertainty/randaugment/imagenet_test_ood_msp.sh b/OpenOOD/scripts/uncertainty/randaugment/imagenet_test_ood_msp.sh new file mode 100644 index 0000000000000000000000000000000000000000..dff2b290e9043bbc992481b9cccaf76449bbab67 --- /dev/null +++ b/OpenOOD/scripts/uncertainty/randaugment/imagenet_test_ood_msp.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# sh scripts/uncertainty/randaugment/imagenet_test_ood_msp.sh + +############################################ +# we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood_imagenet.py + +# available architectures: +# resnet50 +# ood +python scripts/eval_ood_imagenet.py \ + --ckpt-path ./results/imagenet_resnet50_base_e30_lr0.001_randaugment-2-9/s0/best.ckpt \ + --arch resnet50 \ + --postprocessor msp \ + --save-score --save-csv #--fsood + +# full-spectrum ood +python scripts/eval_ood_imagenet.py \ + --ckpt-path ./results/imagenet_resnet50_base_e30_lr0.001_randaugment-2-9/s0/best.ckpt \ + --arch resnet50 \ + --postprocessor msp \ + --save-score --save-csv --fsood diff --git a/OpenOOD/scripts/uncertainty/randaugment/imagenet_train_randaugment.sh b/OpenOOD/scripts/uncertainty/randaugment/imagenet_train_randaugment.sh new file mode 100644 index 0000000000000000000000000000000000000000..e4388b6d027d1ab244b6b0500a62deec9015d469 --- /dev/null +++ b/OpenOOD/scripts/uncertainty/randaugment/imagenet_train_randaugment.sh @@ -0,0 +1,19 @@ +#!/bin/bash +# sh scripts/uncertainty/randaugment/imagenet_train_randaugment.sh + +python main.py \ + --config configs/datasets/imagenet/imagenet.yml \ + configs/networks/resnet50.yml \ + configs/pipelines/train/baseline.yml \ + configs/preprocessors/randaugment_preprocessor.yml \ + --preprocessor.n 2 \ + --preprocessor.m 9 \ + --network.pretrained True \ + --network.checkpoint ./results/pretrained_weights/resnet50_imagenet1k_v1.pth \ + --optimizer.lr 0.001 \ + --optimizer.num_epochs 30 \ + --dataset.train.batch_size 128 \ + --num_gpus 2 --num_workers 16 \ + --merge_option merge \ + --seed 0 \ + --mark randaugment-2-9 diff --git a/OpenOOD/scripts/uncertainty/regmixup/cifar100_test_ood_msp.sh b/OpenOOD/scripts/uncertainty/regmixup/cifar100_test_ood_msp.sh new file mode 100644 index 0000000000000000000000000000000000000000..dbf37b3a14eb7586a7f25620e322e0acd6761490 --- /dev/null +++ b/OpenOOD/scripts/uncertainty/regmixup/cifar100_test_ood_msp.sh @@ -0,0 +1,14 @@ +#!/bin/bash +# sh scripts/uncertainty/regmixup/cifar100_test_ood_msp.sh + +############################################ +# alternatively, we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood.py +# especially if you want to get results from +# multiple runs +python scripts/eval_ood.py \ + --id-data cifar100 \ + --root ./results/cifar100_resnet18_32x32_regmixup_e100_lr0.1_alpha10_default \ + --postprocessor msp \ + --save-score --save-csv diff --git a/OpenOOD/scripts/uncertainty/regmixup/cifar100_train_regmixup.sh b/OpenOOD/scripts/uncertainty/regmixup/cifar100_train_regmixup.sh new file mode 100644 index 0000000000000000000000000000000000000000..06b84423f91bacbbcf1e9a91914056f2c8812e0b --- /dev/null +++ b/OpenOOD/scripts/uncertainty/regmixup/cifar100_train_regmixup.sh @@ -0,0 +1,10 @@ +#!/bin/bash +# sh scripts/uncertainty/regmixup/cifar100_train_regmixup.sh + +python main.py \ + --config configs/datasets/cifar100/cifar100.yml \ + configs/networks/resnet18_32x32.yml \ + configs/pipelines/train/train_regmixup.yml \ + configs/preprocessors/base_preprocessor.yml \ + --trainer.trainer_args.alpha 10 \ + --seed 0 diff --git a/OpenOOD/scripts/uncertainty/regmixup/cifar10_test_ood_msp.sh b/OpenOOD/scripts/uncertainty/regmixup/cifar10_test_ood_msp.sh new file mode 100644 index 0000000000000000000000000000000000000000..4933e86a417bf1bd9bce834ccb40b1c5407e8074 --- /dev/null +++ b/OpenOOD/scripts/uncertainty/regmixup/cifar10_test_ood_msp.sh @@ -0,0 +1,14 @@ +#!/bin/bash +# sh scripts/uncertainty/regmixup/cifar10_test_ood_msp.sh + +############################################ +# alternatively, we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood.py +# especially if you want to get results from +# multiple runs +python scripts/eval_ood.py \ + --id-data cifar10 \ + --root ./results/cifar10_resnet18_32x32_regmixup_e100_lr0.1_alpha20_default \ + --postprocessor msp \ + --save-score --save-csv diff --git a/OpenOOD/scripts/uncertainty/regmixup/cifar10_train_regmixup.sh b/OpenOOD/scripts/uncertainty/regmixup/cifar10_train_regmixup.sh new file mode 100644 index 0000000000000000000000000000000000000000..ff838f675f292aac412dfec702a3b85c9d41d546 --- /dev/null +++ b/OpenOOD/scripts/uncertainty/regmixup/cifar10_train_regmixup.sh @@ -0,0 +1,10 @@ +#!/bin/bash +# sh scripts/uncertainty/regmixup/cifar10_train_regmixup.sh + +python main.py \ + --config configs/datasets/cifar10/cifar10.yml \ + configs/networks/resnet18_32x32.yml \ + configs/pipelines/train/train_regmixup.yml \ + configs/preprocessors/base_preprocessor.yml \ + --trainer.trainer_args.alpha 20 \ + --seed 0 diff --git a/OpenOOD/scripts/uncertainty/regmixup/imagenet200_test_ood_msp.sh b/OpenOOD/scripts/uncertainty/regmixup/imagenet200_test_ood_msp.sh new file mode 100644 index 0000000000000000000000000000000000000000..f8797971d7cb4904424ad62d3e7f90b3a5aa4a4f --- /dev/null +++ b/OpenOOD/scripts/uncertainty/regmixup/imagenet200_test_ood_msp.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# sh scripts/uncertainty/regmixup/imagenet200_test_ood_msp.sh + +############################################ +# alternatively, we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood.py +# especially if you want to get results from +# multiple runs + +# ood +python scripts/eval_ood.py \ + --id-data imagenet200 \ + --root ./results/imagenet200_resnet18_224x224_regmixup_e90_lr0.1_alpha10_default \ + --postprocessor msp \ + --save-score --save-csv #--fsood + +# full-spectrum ood +python scripts/eval_ood.py \ + --id-data imagenet200 \ + --root ./results/imagenet200_resnet18_224x224_regmixup_e90_lr0.1_alpha10_default \ + --postprocessor msp \ + --save-score --save-csv --fsood diff --git a/OpenOOD/scripts/uncertainty/regmixup/imagenet200_train_regmixup.sh b/OpenOOD/scripts/uncertainty/regmixup/imagenet200_train_regmixup.sh new file mode 100644 index 0000000000000000000000000000000000000000..3a518e87270d9a8947fd640c25e9ffcd08b93b19 --- /dev/null +++ b/OpenOOD/scripts/uncertainty/regmixup/imagenet200_train_regmixup.sh @@ -0,0 +1,14 @@ +#!/bin/bash +# sh scripts/uncertainty/regmixup/imagenet200_train_regmixup.sh + +python main.py \ + --config configs/datasets/imagenet200/imagenet200.yml \ + configs/networks/resnet18_224x224.yml \ + configs/pipelines/train/train_regmixup.yml \ + configs/preprocessors/base_preprocessor.yml \ + --trainer.trainer_args.alpha 10 \ + --optimizer.num_epochs 90 \ + --dataset.train.batch_size 128 \ + --num_gpus 2 --num_workers 16 \ + --merge_option merge \ + --seed 0 diff --git a/OpenOOD/scripts/uncertainty/regmixup/imagenet_test_ood_msp.sh b/OpenOOD/scripts/uncertainty/regmixup/imagenet_test_ood_msp.sh new file mode 100644 index 0000000000000000000000000000000000000000..33e41cd492e3f982daedbed49b0214ca5e037db2 --- /dev/null +++ b/OpenOOD/scripts/uncertainty/regmixup/imagenet_test_ood_msp.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# sh scripts/uncertainty/regmixup/imagenet_test_ood_msp.sh + +############################################ +# we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood_imagenet.py + +# available architectures: +# resnet50 +# ood +python scripts/eval_ood_imagenet.py \ + --ckpt-path ./results/imagenet_resnet50_regmixup_e30_lr0.001_alpha10_default/s0/best.ckpt \ + --arch resnet50 \ + --postprocessor msp \ + --save-score --save-csv #--fsood + +# full-spectrum ood +python scripts/eval_ood_imagenet.py \ + --ckpt-path ./results/imagenet_resnet50_regmixup_e30_lr0.001_alpha10_default/s0/best.ckpt \ + --arch resnet50 \ + --postprocessor msp \ + --save-score --save-csv --fsood diff --git a/OpenOOD/scripts/uncertainty/regmixup/imagenet_train_regmixup.sh b/OpenOOD/scripts/uncertainty/regmixup/imagenet_train_regmixup.sh new file mode 100644 index 0000000000000000000000000000000000000000..3b8d639556ba7087a29d8172fa78d114647c7267 --- /dev/null +++ b/OpenOOD/scripts/uncertainty/regmixup/imagenet_train_regmixup.sh @@ -0,0 +1,17 @@ +#!/bin/bash +# sh scripts/uncertainty/regmixup/imagenet_train_regmixup.sh + +python main.py \ + --config configs/datasets/imagenet/imagenet.yml \ + configs/networks/resnet50.yml \ + configs/pipelines/train/train_regmixup.yml \ + configs/preprocessors/base_preprocessor.yml \ + --trainer.trainer_args.alpha 10 \ + --network.pretrained True \ + --network.checkpoint ./results/pretrained_weights/resnet50_imagenet1k_v1.pth \ + --optimizer.lr 0.001 \ + --optimizer.num_epochs 30 \ + --dataset.train.batch_size 128 \ + --num_gpus 2 --num_workers 16 \ + --merge_option merge \ + --seed 0 diff --git a/OpenOOD/scripts/uncertainty/rts/cifar100_test_ood_rts.sh b/OpenOOD/scripts/uncertainty/rts/cifar100_test_ood_rts.sh new file mode 100644 index 0000000000000000000000000000000000000000..493bd7f5b1d47037ae711688f3e1bf65a9a33818 --- /dev/null +++ b/OpenOOD/scripts/uncertainty/rts/cifar100_test_ood_rts.sh @@ -0,0 +1,22 @@ +#!/bin/bash +# sh scripts/uncertainty/rts/cifar100_test_rts_msp.sh + +# GPU=1 +# CPU=1 +# node=36 +# jobname=openood + +# PYTHONPATH='.':$PYTHONPATH \ +#srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +#--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +#--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ +python main.py \ +--config configs/datasets/cifar100/cifar100.yml \ +configs/datasets/cifar100/cifar100_ood.yml \ +configs/networks/rts_net.yml \ +configs/pipelines/test/test_ood.yml \ +configs/preprocessors/base_preprocessor.yml \ +configs/postprocessors/rts.yml \ +--network.backbone.name resnet18_32x32 \ +--num_workers 8 \ +--network.checkpoint 'results/cifar100_rts_net_rts_e100_lr0.1_default/best_epoch89_acc0.7850.ckpt' \ diff --git a/OpenOOD/scripts/uncertainty/rts/cifar100_train_rts.sh b/OpenOOD/scripts/uncertainty/rts/cifar100_train_rts.sh new file mode 100644 index 0000000000000000000000000000000000000000..934b5a49ecad6cb61b8a22364559e49cbb6b70e7 --- /dev/null +++ b/OpenOOD/scripts/uncertainty/rts/cifar100_train_rts.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# sh scripts/uncertainty/rts/cifar100_train_rts.sh + +# GPU=1 +# CPU=1 +# node=73 +# jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +# --kill-on-bad-exit=1 --job-name=${jobname} \ +# -w SG-IDC1-10-51-2-${node} \ +python main.py \ +--config configs/datasets/cifar100/cifar100.yml \ +configs/networks/rts_net.yml \ +configs/pipelines/train/baseline.yml \ +configs/preprocessors/base_preprocessor.yml \ +configs/postprocessors/rts.yml \ +--network.backbone.name resnet18_32x32 \ +--num_workers 8 \ +--trainer.name rts \ +--optimizer.num_epochs 100 \ diff --git a/OpenOOD/scripts/uncertainty/styleaug/imagenet200_test_ood_msp.sh b/OpenOOD/scripts/uncertainty/styleaug/imagenet200_test_ood_msp.sh new file mode 100644 index 0000000000000000000000000000000000000000..ecee26164250d0a42f63b2c6a9106709418e055f --- /dev/null +++ b/OpenOOD/scripts/uncertainty/styleaug/imagenet200_test_ood_msp.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# sh scripts/uncertainty/deepaugment/imagenet200_test_ood_msp.sh + +############################################ +# alternatively, we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood.py +# especially if you want to get results from +# multiple runs + +# ood +python scripts/eval_ood.py \ + --id-data imagenet200 \ + --root ./results/imagenet200_resnet18_224x224_base_e45_lr0.1_stylized \ + --postprocessor msp \ + --save-score --save-csv #--fsood + +# full-spectrum ood +python scripts/eval_ood.py \ + --id-data imagenet200 \ + --root ./results/imagenet200_resnet18_224x224_base_e45_lr0.1_stylized \ + --postprocessor msp \ + --save-score --save-csv --fsood diff --git a/OpenOOD/scripts/uncertainty/styleaug/imagenet200_train_styleaug.sh b/OpenOOD/scripts/uncertainty/styleaug/imagenet200_train_styleaug.sh new file mode 100644 index 0000000000000000000000000000000000000000..36ead75bbf9a0f8762f848379d31a166ef568ceb --- /dev/null +++ b/OpenOOD/scripts/uncertainty/styleaug/imagenet200_train_styleaug.sh @@ -0,0 +1,17 @@ +#!/bin/bash +# sh scripts/uncertainty/styleaug/imagenet200_train_styleaug.sh + +# the model sees twice the data as the baseline +# so only trains for 90/2=45 epochs +python main.py \ + --config configs/datasets/imagenet200/imagenet200.yml \ + configs/networks/resnet18_224x224.yml \ + configs/pipelines/train/baseline.yml \ + configs/preprocessors/base_preprocessor.yml \ + --dataset.train.imglist_pth ./data/benchmark_imglist/imagenet200/train_imagenet200_stylized.txt \ + --optimizer.num_epochs 45 \ + --dataset.train.batch_size 128 \ + --num_gpus 2 --num_workers 16 \ + --merge_option merge \ + --seed 0 \ + --mark stylized diff --git a/OpenOOD/scripts/uncertainty/styleaug/imagenet_test_ood_msp.sh b/OpenOOD/scripts/uncertainty/styleaug/imagenet_test_ood_msp.sh new file mode 100644 index 0000000000000000000000000000000000000000..06163d31f6dce76f3a804f47e885b81942584f08 --- /dev/null +++ b/OpenOOD/scripts/uncertainty/styleaug/imagenet_test_ood_msp.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# sh scripts/uncertainty/styleaug/imagenet_test_ood_msp.sh + +############################################ +# we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood_imagenet.py + +# available architectures: +# resnet50 +# ood +python scripts/eval_ood_imagenet.py \ + --ckpt-path ./results/imagenet_resnet50_tvsv1_base_stylized/ckpt.pth \ + --arch resnet50 \ + --postprocessor msp \ + --save-score --save-csv #--fsood + +# full-spectrum ood +python scripts/eval_ood_imagenet.py \ + --ckpt-path ./results/imagenet_resnet50_tvsv1_base_stylized/ckpt.pth \ + --arch resnet50 \ + --postprocessor msp \ + --save-score --save-csv --fsood diff --git a/OpenOOD/scripts/uncertainty/temp_scaling/0_tempscaling.sh b/OpenOOD/scripts/uncertainty/temp_scaling/0_tempscaling.sh new file mode 100644 index 0000000000000000000000000000000000000000..5f8eae2c3005528e3ba0cf14f7147238a497f4e9 --- /dev/null +++ b/OpenOOD/scripts/uncertainty/temp_scaling/0_tempscaling.sh @@ -0,0 +1,47 @@ +#!/bin/bash +# sh scripts/d_uncertainty/0_tempscaling.sh + +# mnist +# GPU=1 +# CPU=1 +# node=73 +# jobname=openood + +# PYTHONPATH='.':$PYTHONPATH \ +# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +# --kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ +# python main.py \ +# --config configs/datasets/digits/mnist.yml \ +# configs/datasets/digits/mnist_fsood.yml \ +# configs/networks/lenet.yml \ +# configs/pipelines/test/test_fsood.yml \ +# configs/postprocessors/temperature_scaling.yml \ +# configs/preprocessors/base_preprocessor.yml \ +# --num_workers 8 \ +# --network.checkpoint ./results/mnist_lenet_base_e100_lr0.1/best_epoch86_acc0.9920.ckpt \ +# --mark 0 \ +# --exp_name mnist_lenet_base_e100_lr0.1_test_fsood_temperature_scaling + + +# cifar10 +GPU=1 +CPU=1 +node=73 +jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ +python main.py \ +--config configs/datasets/objects/cifar10.yml \ +configs/datasets/objects/cifar10_fsood.yml \ +configs/networks/resnet18_32x32.yml \ +configs/pipelines/test/test_fsood.yml \ +configs/preprocessors/base_preprocessor.yml \ +configs/postprocessors/temperature_scaling.yml \ +--num_workers 8 \ +--mark 0 \ +--network.checkpoint ./results/cifar10_resnet18_32x32_base_e100_lr0.1/best.ckpt \ +--exp_name cifar10_resnet18_32x32_base_e100_lr0.1_test_fsood_temperature_scaling diff --git a/OpenOOD/scripts/uncertainty/temp_scaling/cifar100_test_ood_tempscaling.sh b/OpenOOD/scripts/uncertainty/temp_scaling/cifar100_test_ood_tempscaling.sh new file mode 100644 index 0000000000000000000000000000000000000000..6e8b02e7bf1d3c5de9dbecada145d032a7b292e5 --- /dev/null +++ b/OpenOOD/scripts/uncertainty/temp_scaling/cifar100_test_ood_tempscaling.sh @@ -0,0 +1,35 @@ +#!/bin/bash +# sh scripts/uncertainty/temp_scaling/cifar100_test_ood_tempscaling.sh + +# GPU=1 +# CPU=1 +# node=73 +# jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +# --kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ + +python main.py \ + --config configs/datasets/cifar100/cifar100.yml \ + configs/datasets/cifar100/cifar100_ood.yml \ + configs/networks/resnet18_32x32.yml \ + configs/pipelines/test/test_ood.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/postprocessors/temp_scaling.yml \ + --num_workers 8 \ + --network.checkpoint 'results/cifar100_resnet18_32x32_base_e100_lr0.1_default/s0/best.ckpt' \ + --mark 0 + +############################################ +# alternatively, we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood.py +# especially if you want to get results from +# multiple runs +python scripts/eval_ood.py \ + --id-data cifar100 \ + --root ./results/cifar100_resnet18_32x32_base_e100_lr0.1_default \ + --postprocessor temp_scaling \ + --save-score --save-csv diff --git a/OpenOOD/scripts/uncertainty/temp_scaling/cifar10_test_ood_tempscaling.sh b/OpenOOD/scripts/uncertainty/temp_scaling/cifar10_test_ood_tempscaling.sh new file mode 100644 index 0000000000000000000000000000000000000000..5f13f29988634ecbf3654d6e89b7d473b9620f13 --- /dev/null +++ b/OpenOOD/scripts/uncertainty/temp_scaling/cifar10_test_ood_tempscaling.sh @@ -0,0 +1,35 @@ +#!/bin/bash +# sh scripts/uncertainty/temp_scaling/cifar10_test_ood_tempscaling.sh + +# GPU=1 +# CPU=1 +# node=73 +# jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +# --kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ + +python main.py \ + --config configs/datasets/cifar10/cifar10.yml \ + configs/datasets/cifar10/cifar10_ood.yml \ + configs/networks/resnet18_32x32.yml \ + configs/pipelines/test/test_ood.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/postprocessors/temp_scaling.yml \ + --num_workers 8 \ + --network.checkpoint 'results/cifar10_resnet18_32x32_base_e100_lr0.1_default/s0/best.ckpt' \ + --mark 0 + +############################################ +# alternatively, we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood.py +# especially if you want to get results from +# multiple runs +python scripts/eval_ood.py \ + --id-data cifar10 \ + --root ./results/cifar10_resnet18_32x32_base_e100_lr0.1_default \ + --postprocessor temp_scaling \ + --save-score --save-csv diff --git a/OpenOOD/scripts/uncertainty/temp_scaling/imagenet200_test_ood_tempscaling.sh b/OpenOOD/scripts/uncertainty/temp_scaling/imagenet200_test_ood_tempscaling.sh new file mode 100644 index 0000000000000000000000000000000000000000..82a84e4a1c43c000d8382399123f5c81b02428a3 --- /dev/null +++ b/OpenOOD/scripts/uncertainty/temp_scaling/imagenet200_test_ood_tempscaling.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# sh scripts/ood/temp_scaling/imagenet200_test_ood_tempscaling.sh + +############################################ +# alternatively, we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood.py +# especially if you want to get results from +# multiple runs + +# ood +python scripts/eval_ood.py \ + --id-data imagenet200 \ + --root ./results/imagenet200_resnet18_224x224_base_e90_lr0.1_default \ + --postprocessor temp_scaling \ + --save-score --save-csv #--fsood + +# full-spectrum ood +python scripts/eval_ood.py \ + --id-data imagenet200 \ + --root ./results/imagenet200_resnet18_224x224_base_e90_lr0.1_default \ + --postprocessor temp_scaling \ + --save-score --save-csv --fsood diff --git a/OpenOOD/scripts/uncertainty/temp_scaling/imagenet_test_ood_tempscaling.sh b/OpenOOD/scripts/uncertainty/temp_scaling/imagenet_test_ood_tempscaling.sh new file mode 100644 index 0000000000000000000000000000000000000000..ee62ae26326a87f1ec758576be8eac1ae100c812 --- /dev/null +++ b/OpenOOD/scripts/uncertainty/temp_scaling/imagenet_test_ood_tempscaling.sh @@ -0,0 +1,48 @@ +#!/bin/bash +# sh scripts/ood/temp_scaling/imagenet_test_ood_tempscaling.sh + +GPU=1 +CPU=1 +node=73 +jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +#srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +#--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +#--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ +python main.py \ + --config configs/datasets/imagenet/imagenet.yml \ + configs/datasets/imagenet/imagenet_ood.yml \ + configs/networks/resnet50.yml \ + configs/pipelines/test/test_ood.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/postprocessors/temp_scaling.yml \ + --num_workers 10 \ + --ood_dataset.image_size 256 \ + --dataset.test.batch_size 256 \ + --dataset.val.batch_size 256 \ + --network.pretrained True \ + --network.checkpoint 'results/pretrained_weights/resnet50_imagenet1k_v1.pth' \ + --merge_option merge + +############################################ +# we recommend using the +# new unified, easy-to-use evaluator with +# the example script scripts/eval_ood_imagenet.py + +# available architectures: +# resnet50, swin-t, vit-b-16 +# ood +python scripts/eval_ood_imagenet.py \ + --tvs-pretrained \ + --arch resnet50 \ + --postprocessor temp_scaling \ + --save-score --save-csv #--fsood + + +# full-spectrum ood +python scripts/eval_ood_imagenet.py \ + --tvs-pretrained \ + --arch resnet50 \ + --postprocessor temp_scaling \ + --save-score --save-csv --fsood diff --git a/OpenOOD/scripts/uncertainty/temp_scaling/mnist_test_ood_tempscaling.sh b/OpenOOD/scripts/uncertainty/temp_scaling/mnist_test_ood_tempscaling.sh new file mode 100644 index 0000000000000000000000000000000000000000..8a26ef92dba0fce250a445a791003e8151a18e08 --- /dev/null +++ b/OpenOOD/scripts/uncertainty/temp_scaling/mnist_test_ood_tempscaling.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# sh scripts/uncertainty/temp_scaling/mnist_test_ood_tempscaling.sh + +# GPU=1 +# CPU=1 +# node=73 +# jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +# --kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ + +python main.py \ +--config configs/datasets/mnist/mnist.yml \ +configs/datasets/mnist/mnist_ood.yml \ +configs/networks/lenet.yml \ +configs/pipelines/test/test_ood.yml \ +configs/preprocessors/base_preprocessor.yml \ +configs/postprocessors/temp_scaling.yml \ +--num_workers 8 \ +--network.checkpoint 'results/checkpoints/mnist_lenet_acc99.30.ckpt' \ +--mark 0 diff --git a/OpenOOD/scripts/uncertainty/temp_scaling/osr_mnist6_test_ood_tempscaling.sh b/OpenOOD/scripts/uncertainty/temp_scaling/osr_mnist6_test_ood_tempscaling.sh new file mode 100644 index 0000000000000000000000000000000000000000..2396a6aa4ebe9a03c3a5d7c546f3ef07e69f8ae9 --- /dev/null +++ b/OpenOOD/scripts/uncertainty/temp_scaling/osr_mnist6_test_ood_tempscaling.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# sh scripts/uncertainty/temp_scaling/osr_mnist6_test_ood_tempscaling.sh + +# GPU=1 +# CPU=1 +# node=73 +# jobname=openood + +PYTHONPATH='.':$PYTHONPATH \ +# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \ +# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \ +# --kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \ + +python main.py \ +--config configs/datasets/osr_mnist6/mnist6_seed1.yml \ +configs/datasets/osr_mnist6/mnist6_seed1_ood.yml \ +configs/networks/lenet.yml \ +configs/pipelines/test/test_osr.yml \ +configs/preprocessors/base_preprocessor.yml \ +configs/postprocessors/temp_scaling.yml \ +--num_workers 8 \ +--network.checkpoint 'results/checkpoints/osr/mnist6_seed1.ckpt' \ +--mark 0 diff --git a/OpenOOD/scripts/uncertainty/temp_scaling/sweep_osr.py b/OpenOOD/scripts/uncertainty/temp_scaling/sweep_osr.py new file mode 100644 index 0000000000000000000000000000000000000000..00a0e636a806a151f446491756374cb5ef641ab5 --- /dev/null +++ b/OpenOOD/scripts/uncertainty/temp_scaling/sweep_osr.py @@ -0,0 +1,38 @@ +# python scripts/uncertainty/temp_scaling/sweep_osr.py +import os + +config = [ + [ + 'osr_cifar6/cifar6_seed1.yml', 'osr_cifar6/cifar6_seed1_ood.yml', + 'resnet18_32x32', 'results/checkpoints/osr/cifar6_seed1.ckpt' + ], + [ + 'osr_cifar50/cifar50_seed1.yml', 'osr_cifar50/cifar50_seed1_ood.yml', + 'resnet18_32x32', 'results/checkpoints/osr/cifar50_seed1.ckpt' + ], + [ + 'osr_tin20/tin20_seed1.yml', 'osr_tin20/tin20_seed1_ood.yml', + 'resnet18_64x64', 'results/checkpoints/osr/tin20_seed1.ckpt' + ], + [ + 'osr_mnist6/mnist6_seed1.yml', 'osr_mnist6/mnist6_seed1_ood.yml', + 'lenet', 'results/checkpoints/osr/mnist6_seed1.ckpt' + ], +] + +for [dataset, ood_dataset, network, pth] in config: + command = (f"PYTHONPATH='.':$PYTHONPATH \ + srun -p dsta --mpi=pmi2 --gres=gpu:1 -n1 \ + --cpus-per-task=1 --ntasks-per-node=1 \ + --kill-on-bad-exit=1 --job-name=openood \ + python main.py \ + --config configs/datasets/{dataset} \ + configs/datasets/{ood_dataset} \ + configs/networks/{network}.yml \ + configs/pipelines/test/test_osr.yml \ + configs/preprocessors/base_preprocessor.yml \ + configs/postprocessors/temp_scaling.yml \ + --network.checkpoint {pth} \ + --num_workers 8 \ + --merge_option merge &") + os.system(command) diff --git a/OpenOOD/setup.py b/OpenOOD/setup.py new file mode 100644 index 0000000000000000000000000000000000000000..369369342bae5bd533fc11cb7b8834cd3ac2fc48 --- /dev/null +++ b/OpenOOD/setup.py @@ -0,0 +1,29 @@ +import setuptools + +with open('README.md', 'r', encoding='utf-8') as fh: + long_description = fh.read() + +setuptools.setup( + name='openood', + version='1.5', + author='openood dev team', + author_email='jingkang001@e.ntu.edu.sg', + description= + 'This package provides a unified test platform for Out-of-Distribution detection.', + long_description=long_description, + long_description_content_type='text/markdown', + url='https://github.com/Jingkang50/OpenOOD', + packages=setuptools.find_packages(), + install_requires=[ + 'torch>=1.13.1', 'torchvision>=0.13', 'scikit-learn', 'json5', + 'matplotlib', 'scipy', 'tqdm', 'pyyaml>=5.4.1', 'pre-commit', + 'opencv-python>=4.4.0.46', 'imgaug>=0.4.0', 'pandas', 'diffdist>=0.1', + 'Cython>=0.29.30', 'faiss-gpu>=1.7.2', 'gdown>=4.7.1', 'libmr>=0.1.9' + ], + classifiers=[ + 'Programming Language :: Python :: 3', + 'License :: OSI Approved :: Apache Software License', + 'Operating System :: OS Independent', + ], + include_package_data=True, +) diff --git a/OpenOOD/tools/plot/tsne_tools.py b/OpenOOD/tools/plot/tsne_tools.py new file mode 100644 index 0000000000000000000000000000000000000000..49607b07d90d8539fa9c44e9b573d4fd1c5d72b2 --- /dev/null +++ b/OpenOOD/tools/plot/tsne_tools.py @@ -0,0 +1,61 @@ +# srun -p dsta --mpi=pmi2 --cpus-per-task=1 +# --kill-on-bad-exit=1 --job-name=tsne -w SG-IDC1-10-51-2-73 +# python compute_tsne.py + +import os +import time + +import numpy as np +from sklearn.decomposition import PCA +from sklearn.manifold import TSNE + +l2_normalize = lambda x: x / np.linalg.norm(x, axis=1, keepdims=True) + + +def tsne_compute(x, n_components=50): + start_time = time.time() + if n_components < x.shape[1]: + pca = PCA(n_components=50) + x = pca.fit_transform(x) + tsne = TSNE(n_components=2, verbose=0, perplexity=40, n_iter=2000) + tsne_pos = tsne.fit_transform(x) + + hours, rem = divmod(time.time() - start_time, 3600) + minutes, seconds = divmod(rem, 60) + print('TSNE Computation Duration: {:0>2}:{:0>2}:{:05.2f}'.format( + int(hours), int(minutes), seconds), + flush=True) + + return tsne_pos + + +dataset_list = [ + 'mnist', 'usps', 'svhn', 'notmnist', 'fashionmnist', 'texture', 'cifar10', + 'tin' +] +dirname = '/mnt/lustre/jkyang/FSOOD22/report/test/test_tsne' +sample_rate = 0.1 + +highfeat_list, featstat_list, idx_list = [], [], [] +for idx, dataset in enumerate(dataset_list): + file_name = os.path.join(dirname, f'{dataset}.npz') + highfeat_sublist = np.load(file_name)['highfeat_list'] + featstat_sublist = np.load(file_name)['featstat_list'] + # label_list = np.load(file_name)['label_list'] + # selection: + num_samples = len(highfeat_sublist) + index_list = np.arange(num_samples) + index_select = np.random.choice(index_list, + int(sample_rate * num_samples), + replace=False) + highfeat_list.extend(highfeat_sublist[index_select]) + featstat_list.extend(featstat_sublist[index_select]) + idx_list.extend(idx * np.ones(len(index_select))) + +highfeat_list, featstat_list, index_list = np.array(highfeat_list), np.array( + featstat_list), np.array(idx_list) +tsne_pos_highfeat = tsne_compute(highfeat_list) +tsne_pos_lowfeat = tsne_compute(featstat_list) +np.save(os.path.join(dirname, 'tsne_pos_highfeat'), tsne_pos_highfeat) +np.save(os.path.join(dirname, 'tsne_pos_lowfeat'), tsne_pos_lowfeat) +np.save(os.path.join(dirname, 'idx'), idx_list) diff --git a/OpenOOD/tools/report/plot.ipynb b/OpenOOD/tools/report/plot.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..cd82bb15dec1a021cd564080d54d2416e238b9a3 --- /dev/null +++ b/OpenOOD/tools/report/plot.ipynb @@ -0,0 +1,684 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 19, + "metadata": {}, + "outputs": [], + "source": [ + "import numpy as np\n", + "import matplotlib.pyplot as plt\n", + "from matplotlib.pyplot import figure\n", + "from matplotlib.ticker import PercentFormatter" + ] + }, + { + "cell_type": "code", + "execution_count": 24, + "metadata": {}, + "outputs": [], + "source": [ + "id_datasets = ['cifar10']\n", + "csid_datasets = ['cifar10c', 'imagenet10']\n", + "nearood_datasets = ['mnist', 'fashionmnist']\n", + "farood_datasets = ['texture', 'cifar10', 'tin', 'cifar100c']" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "id_datasets = ['mnist']\n", + "csid_datasets = ['usps', 'svhn']\n", + "nearood_datasets = ['notmnist', 'fashionmnist']\n", + "farood_datasets = ['texture', 'cifar10', 'tin', 'places365']" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Train ID" + ] + }, + { + "cell_type": "code", + "execution_count": 25, + "metadata": {}, + "outputs": [], + "source": [ + "def plot_spectrum(method, size, use_log=True):\n", + " id_score_list = []\n", + " # dirname = f'./mnist_0408/mnist_lenet_test_ood_fsood_{method}_0331/scores/'\n", + " dirname = f'cifar10_resnet18_32x32_test_ood_fsood_{method}_0331/scores/'\n", + " for id_dataset in id_datasets:\n", + " feature_dict = np.load(f'{dirname}/{id_dataset}.npz')\n", + " id_score_list.extend(feature_dict['conf'])\n", + " id_score_list = np.array(id_score_list)\n", + " if use_log: id_score_list = np.log(id_score_list)\n", + " \n", + " csid_score_list = []\n", + " for csid_dataset in csid_datasets:\n", + " feature_dict = np.load(f'{dirname}/{csid_dataset}.npz')\n", + " csid_score_list.extend(feature_dict['conf'])\n", + " csid_score_list = np.array(csid_score_list)\n", + " if use_log: csid_score_list = np.log(csid_score_list)\n", + " \n", + " nearood_score_list = []\n", + " for nearood_dataset in nearood_datasets:\n", + " feature_dict = np.load(f'{dirname}/{nearood_dataset}.npz')\n", + " nearood_score_list.extend(feature_dict['conf'])\n", + " nearood_score_list = np.array(nearood_score_list)\n", + " if use_log: nearood_score_list = np.log(nearood_score_list)\n", + " \n", + " farood_score_list = []\n", + " for farood_dataset in farood_datasets:\n", + " feature_dict = np.load(f'{dirname}/{farood_dataset}.npz')\n", + " farood_score_list.extend(feature_dict['conf'])\n", + " farood_score_list = np.array(farood_score_list)\n", + " if use_log: farood_score_list = np.log(farood_score_list)\n", + " \n", + " font = {'family': \"Times New Roman\", 'weight' : 'bold', 'size' : 15}\n", + " plt.rc('font', **font)\n", + " plt.figure(figsize=(8, 3), dpi=200)\n", + "\n", + " n_bins = 500\n", + "\n", + " figure(figsize=(10, 3), dpi=80)\n", + " plt.hist(farood_score_list, n_bins, density=True, \n", + " weights=np.ones(len(farood_score_list)) / len(farood_score_list), \n", + " facecolor='#FFC690', alpha=0.9)\n", + "\n", + " plt.hist(nearood_score_list, n_bins, density=True, \n", + " weights=np.ones(len(nearood_score_list)) / len(nearood_score_list), \n", + " facecolor='#FFDEBF', alpha=0.8)\n", + "\n", + " plt.hist(csid_score_list, n_bins, density=True, \n", + " weights=np.ones(len(csid_score_list)) / len(csid_score_list), \n", + " facecolor='#BFEBFF', alpha=0.7)\n", + "\n", + " plt.hist(id_score_list, n_bins, density=True, \n", + " weights=np.ones(len(id_score_list)) / len(id_score_list), \n", + " facecolor='#90B1C0', alpha=0.6)\n", + " \n", + " plt.xlim(size[0], size[1])\n", + " plt.ylim(size[2], size[3])\n", + " # plt.axis('off')\n", + "\n", + " plt.savefig(f'./{method}.pdf')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## PLOT" + ] + }, + { + "cell_type": "code", + "execution_count": 27, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAqcAAADlCAYAAABu6l5KAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjUuMCwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8/fFQqAAAACXBIWXMAAAxOAAAMTgF/d4wjAAAeo0lEQVR4nO3df7Bc5X3f8c/XutGFSjY2xAaD4kg2xWCMSRVoDRMMiRwZI0d4LJI6jVOrjdO4TtMppMmMwT9wSjxumlFLkvEwdTymTcaehHGC+RFwbGEZOZgB4VojGVRhw/UgWSDQjXAR6F5JfPvHOXvvs3vP2XOec3fvPrv7fs3s8Ow5z3nOsw9H937uc36subsAAACAFLxi0B0AAAAAWginAAAASAbhFAAAAMkgnAIAACAZhFMAAAAkg3AKAACAZBBOAQAAkAzCKQAAAJIRFU7NbNLMrjezR83sqJkdMrPbzWxtze3fYWa3mdn3zezHZnbMzJ42s7vN7MpmHwEAAACjwup+Q5SZTUi6V9K6gtUzkja4+9aKNj4m6b90qfJr7v7FWh0CAADAyImZOf2I5oPpbkmbJN2Uv5+UdKuZTVa0sV/SzZJ+NW/r1yU9Fqz/nYj+AAAAYMTEzJw+Kum8/O0l7v5gvvxeSe/Kl1/j7l+O6oDZeyX9bf72e+7+1pjtAQAAMDpqzZya2amaD6bHJD0crH4gKF9Wd8dmtszM1kj6YLD4G3W3BwAAwOiZqFlvdVA+5O4ngvcHg/KaOo2Z2dOSTg8WHZf0JUkfLal/naTrWu+XLVt21hlnnFFnVwAAABiA/fv3z7p71SWfC9QNpyuC8mzHutmSejFO5C8rWunuWyRtab1ftWqV79u3r+GuAAAA0G9m9myT7eqG0yNBuTMBLy+p181GZUH2jZKulXS+pM2SXinpmpptAAAAYMTUDadTQfk0M5tw9+P5+/D8+pN1GnP3h/LiN8zsPklP5O/fZ2YnufvRmv0CAADACKl1Q5S7T2v+kU8Tki4OVl8SlLd3a8fMTi5qPqwi6VV1+gQAAIDRE/Oc01uC8ufM7H1mdpOk9fmyfZLukiQz22Zmnr9WB9vtN7M/MbMPmNk6M9ss6c5g/VOSGl2fAAAAgOFX97S+JH1W2bWi65RdIxo+z3RG0mZ3n6lo4zUqf9D+MUn/wes+eBUAAAAjp/bMaX6N6QZJN0jaoyyQTku6Q9KlVV9dmvuUpG9KOqAsjL4k6XFJn5d0kbvfEdV7AAAAjJTa3xCVEh4lBQAAkDYz2+/uq2K3i7nmFAAAAOgrwikAAACSQTgFAABAMginAAAASAbhFAAAAMkgnAIAACAZhFMAAAAkg3AKAACAZBBOAQAAkAzCKQAAAJJBOAUAAEAyCKcAAABIBuEUAAAAySCcAgAAIBmEUwAAACSDcAoAAIBkEE4BAACQDMIpAAAAkkE4BQAAQDIIpwAAAEgG4RQAAADJIJwCAAAgGYRTAAAAJINwCgAAgGQQTgEAAJAMwikAAACSQTgFAABAMiYG3QEAAABktu7cO1ded+E5A+zJ4DBzCgAAMABbd+5tC6PIEE4BAACQDE7rAwAADBCzp+2YOQUAAEAyCKcAAABIBuEUAAAAySCcAgAAIBmEUwAAACSDcAoAAIBk8CgpAACAJcJjo6oxcwoAAIBkEE4BAACQDMIpAAAAksE1pwAAAH3EdaZxmDkFAABAMginAAAASAbhFAAAAMkgnAIAACAZUeHUzCbN7Hoze9TMjprZITO73czW1tz+cjO72cx2mNnTZjZrZgfM7K/M7G3NPgIAAABGRe279c1sQtLdktYFiyclXS3pSjPb4O5bK5r5qKR3dSw7Q9KvSNpoZr/g7t+u2ycAAACMlpiZ049oPpjulrRJ0k35+0lJt5rZZI12npB0vaT1kj4k6UC+/CRJn4noDwAAAEZMzHNOPxyUf9PdH5T0N2Z2sbLZ0FWS3iPpy13a+CNJ97v78dYCM3tO0u3524sj+gMAAIARU2vm1MxOlXRe/vaYpIeD1Q8E5cu6tePu94XBNPd4UD5Spz8AAAAYTXVP668Oyofc/UTw/mBQXtOgD5uC8j1FFczsOjPb13q98MILDXYDAACA1NUNpyuC8mzHutmSepXM7CpJH8vfTkv6eFE9d9/i7qtar5UrV8bsBgAAAEOibjgNT7d33vS0vKReV2a2SdLf5tu/IOk97v7DutsDAABg9NS9IWoqKJ9mZhPBtaNnBOuerNOYmX1Q0uclLZN0WNJVPEIKAACMiq079w66C0Or1sypu09Leix/O6H2u+ovCcrbq9oys9+W9AVlwfSgpCsIpgAAAJDinnN6S1D+nJm9z8xuUva8UknaJ+kuSTKzbWbm+Wt1ayMzu1bSn0kySTPKHsr/SjP7udZrEZ8FAAAAQy7mOaeflbRR2YP4z1f780xnJG1295mKNq4OypPKTu13sog+AQAAYITUnjnNrzHdIOkGSXuUBdJpSXdIurTGV5cCAAAAXcXMnCqfGf10/upW74qY5QAAAIAUd80pAAAA0FeEUwAAACSDcAoAAIBkEE4BAACQDMIpAAAAkkE4BQAAQDKiHiUFAACAclt37h10F4YeM6cAAABIBjOnAAAACQpnYdddeM4Ae7K0mDkFAABAMginAAAASAbhFAAAAMkgnAIAACAZhFMAAAAkg3AKAACAZBBOAQAAkAzCKQAAAJJBOAUAAEAyCKcAAABIBuEUAAAAySCcAgAAIBmEUwAAACRjYtAdAAAAGDZbd+6dK6+78JwB9mT0EE4BAABqCAMp+odwCgAAUIJAuvS45hQAAADJIJwCAAAgGYRTAAAAJINrTgEAABaB61J7i5lTAAAAJINwCgAAgGQQTgEAAJAMwikAAACSQTgFAABAMginAAAASAbhFAAAAMkgnAIAACAZhFMAAAAkg3AKAACAZPD1pQAAAB34StLBYeYUAAAAySCcAgAAIBmEUwAAACSDcAoAAIBkEE4BAACQDO7WBwAAYyu8K3/dhecMsCdoYeYUAAAAyYgKp2Y2aWbXm9mjZnbUzA6Z2e1mtrbm9qvNbIuZPWhmM2bm+evGRr0HAADoka079869MDi1T+ub2YSkuyWtCxZPSrpa0pVmtsHdt1Y08zOSro3tJAAAAMZDzMzpRzQfTHdL2iTppvz9pKRbzWyyoo0jkr4m6VOSvhKxbwAAAIyBmHD64aD8m+7+N+7+cUlfzZetkvSebg24+9fcfb273yhpT1RPAQAAMPJqhVMzO1XSefnbY5IeDlY/EJQv61G/AAAAMIbqXnO6OigfcvcTwfuDQXnNontUwMyuk3Rd6/0pp5zSj90AAIAxwA1Paat7Wn9FUJ7tWDdbUq9n3H2Lu69qvVauXNmP3QAAAGDA6obTI0G586an5SX1AAAAgCh1w+lUUD4tf6xUyxlB+clF9wgAAABjq1Y4dfdpSY/lbyckXRysviQob+9RvwAAADCGaj+EX9Itkm7Oy58zs09IWitpfb5sn6S7JMnMtkm6PF++xt2n8uWvDZa/OWj7LWZ2TV7+prs/G9EvAAAw5sKbnNZdeE5lnWFT5/ONiphw+llJG5U9iP98SV8O1s1I2uzuMxVtnC/ptoLlv5y/JOnnJW2L6BcAAABGRO2H8Lv7cUkbJN2g7AH6M5KmJd0h6dIaX10KAAAAdBUzc6p8ZvTT+atbvStKlm+TZDH7BAAAwPiICqcAAADDZJivMx1XtU/rAwAAAP1GOAUAAEAyCKcAAABIBuEUAAAAyeCGKAAAkLyiG5tG8WH7YOYUAAAACSGcAgAAIBmEUwAAACSDcAoAAIBkcEMUAAAYStz4NJqYOQUAAEAymDkFAABYpOnZ+fKpy2tu9Oye7L+vPXfhss7lY4RwCgAAMEhhIAXhFAAA9Fd4bWjZg/MHrTXzWXvWsymCaCXCKQAASMYwBNloeSCdPnmVdPIqnfrSvgF3KG2EUwAAsGSGNXyWXVMaLq+qi3oIpwAAABGKAmnXuiev6ltfRhHhFAAADNzIPbN09oVB92BoEU4BAECS+hFYq065c0p+8AinAABgJMWcfh+U6eCUPzdKZQinAACg5+rMenbWSW3WslG45XT+ohFOAQDAogzrHfhIE+EUAAAMhV7PrA7qtP903bv3x/SrTAmnAAAgadGPbsJQI5wCAICh1tdAWuca0uUr+9iB8UM4BQAA2vX8fPmCU/qzj6LT8k2DJTOko4twCgAA2rSCamxInZ6Vbnt4bxJ32i8p7tDvKcIpAADoauvOvQtmKs89Z/6u/G6zmGXrqmY+mRkdX4RTAADGyFKcvh8JzIYODOEUAIBY+3fMl8+6aHD9WKQwqI6thEIo3xaVIZwCg7Ljz+fLF32ofFkv2l1qKfQBi1MnfLXqxISzXrQbtlGkaVgsa7fVXtV+Y/cR9rPiM+965vBc+YLTX91o12EQ3bN3/qH54en5cLkkHexy7Whn3RCn5LEYhFOgrjBwharCV9Og1nR/vdDad+y+ivpc9fmr1qc8Q1XVt34FvDr9KdKL0Dao/wcxwbAqAIYafp5dE2fPlS8oqxMEysK6Rf2JOGba+nD8+5X9lKQ9Tz4lLZucX3BiRjqa9/OkV7cvlzQ9G9QN5etLLSvZDqiBcIrhFhOiioJTP4NeVVCrs3wptfpwyqrsFS7rFI5bk77HzkC9+Nz8vjr+n+16XnO/XNt+QZ91Uffjo2qWrLU/KRuPf/KT9fpa1m4vg2yoF7OWTYNfjKog1st99XofBWGwVhtl9QsUtl0QbsvqlvatSFmwPHp44bqqEFpnH0VBtWp9vyV0Kh8LEU4xHJqGt6rtejE7udTBsmp/rWAptc/KVYXzcLsqj98rSdq1Yq105kZJ0gU/uqO4recjrpuqCgqnrFpYZ+Js6UR+DrEVYlt9bPUj729bf+oEzpgxiQhSRY/pqTMT16b1WR+/t35wDvUi+PWjrQRVBb+oYBhpz5NPzZXPXfNTfd9H3zUNu3XaffnlrOwn2h+KTxAdOoRT9FYvr6OMCX39vMaxbhgMQ1hZqCmos2vyLXOLLjj498VthIGqqt3crhVr59/86KB05kZdcOQ77XXDz1bQ9q5W8DzyneL91qnb+pwr1mb9kHRBGCLD9cuyC9zKTlG2abWx4g3VdQv609ZGYFerj8HnWNC3gu1KA2JYt1Xn6GHpxKx2HZFkef9PzM7tQ/t3FG9XtN8IXU8Dt9puEnR7oCrg1TomCtoKtyvaR9X6Xuu2j6YhsWq7foXa2qoCaZ3AumwyC5m2rH35K15RXJ9AOtQIpyhWFvaaBsam+2uiFzOZeRu7Xrdeet16SV2C41IoCo55+KwTHIvaaguvgej2FqmtH0UzoJ26rAvbKvocZevLxqJNVd/y5WUhO1we7cXnsu3zIF71/2guAJ2YzerWDJxlfSwKcDFhsRfqXF9ZtR0S0ZrhlMrD5WJ0Blgpm00dds/uCd6cU1ptFBBOUa1fp60bXH+5Kw+KUkdYrKjbZuKkbPvOX/D5jGLbdnldSV0DoiSpIPgsCENFoTbYR8xMZb/UCmpLqGl/Gof3iv3GBNld9ob2md0TFbcwl21X1YeY0BvuI9yuaH9hIO8IuHVmYatmw7sGx9a+Cz5bdFANx72jvaUKr70+PR8zy7qkp+1DYQhtul0rvJ6YKQ6ddfdRtK00GqF1BBFOh13MjGMvZkNjFJ3uDq/967ZNp7CNVpgL6hYGkTBYStLxo21vF/yCrwgdMWGnlwGvVlsVs6FRbS1yu6rZyzrtLcX4pRbCF6Uq9MZsV6etjjph0K1113hr+/BShjqK+hYGYan57HSMLsG5Z2KCXatuOAvZ69nJon0UrW/abtP1izWks6xbd2aP8Vp34WjOoBJOB2lUngXZ9OaXsjY6ZKczFwaJ6HDRGVYbWuqA18u6Sy3lvo2LRV1OELbRUhZeyy57iAnODcLyoo6xorbLxiqou2fqgCTp3NWvr9xuzw9+WL8/iw1ivZypLGuv36fkU5DILGvrgfzj+DB+wukoWexjlUJV11SWhdCi7cpOhxfMRC42zBCGgAJNZ1YbiP43GNG3ViiUOoJhRXtt273pp+v3raSNwvXhafs3nF67bkygbHx6vt+zj+OiFVqHYGZ12BFOy/T6m3p6ue+qdosemyPF3f1doO10bdGyqps0xuH0KjCkCmcDh0St0NpS87rf2P0uiaUImQTZaonMrI4ywmkqKh7p0xY4KwJl27WRZdcidgmEdWYyi5YTMoGFooJTRRvDGBz7pZfBsBf/j8raKzQqAXBUPkevhKGVoLooox9Oe31dZ9HDpg98d75cFhybniavuoEot+Q34ABjoNehJQUxn6np5+/FPhYbyMsCYkyoXfKZUaBA69pTaXyuPx3+cLoUD3gPA2nRMw4rrqkMlZ767sNzMwmZaRv14NMyjJ9tKUJJUfiqOiaWYoavF6GuyX57vY+mUugDRsCQPgUgFcMZTo8dKQ6XYYgsms0sm4WsCoZF3yJT8UzLXiBcAvUsRdBf6j8mKm/AGcE/borEhkXCJZLFaf/aosKpmU1K+l1JH5D0RklHJG2X9AfuXutJ12b2Kkk3SLpG0ipJhyV9TdIn3f0H9TqyrPgZmgVfidhS62HoZXUrNAmRBM/RNMyBYVDhq87M4WJPE/c6sMTMcC4FAhmAUVI7nJrZhKS7Ja0LFk9KulrSlWa2wd23VrTxKmVh9m3B4tdJ+jVJV5nZ5e6+q26fJFV+a89iv8IwNqhWIZSOt5jZsNBir9uLaatbP2LaqNtWU3XaHdTp+aq6VctGyah/PqARZlG7MnevV9HsP0q6OX+7W9InJf0zSR/Ll+2TdLa7z3RpY4uka/O390vaIundkn4rX/awu//zqr6c/voz/euPfLdWv9HMKJ4mjelD1exbVQAs224pNA2nMf2s8/kBADU0DKenvrRP6965sced6S0z2+/u0TfVxITTRyWdl7+9xN0fzJffK+ld+fJr3P3LJdsvl/SMpFdLcklnufsBMzNJj0o6N696kbs/0q0voxJOe3EKs196cWNG1YxSzOwcwQkAMPIiguooh9Nap/XN7FTNB9Njkh4OVj+g+XB6maTCcCrprcqCqSRNufsBSXJ3N7Nvaz6cXiapazhtqmng6uWjV2LvUk0hXC32xoymn2GptwMAYKD4FipJ9a85XR2UD7m3jdrBoLymZhvPdKzr2oaZXSfpumDRy28763QSSLyVkl4YdCeGDGPWDOMWjzFrhnGLx5g1w7jFO6PJRnXD6Yqg3PndbrMl9XrWhrtvUXZ9qiTJzPY1mSYed4xbPMasGcYtHmPWDOMWjzFrhnGLZ2aNvjXgFTXrHQnKkx3rlpfU60cbAAAAGGF1w+lUUD4tf6xUSzhl+2TNNk7vWFe3DQAAAIywWuHU3aclPZa/nZB0cbD6kqC8vUszuyU9n5d/2szOkqT8bv2312yjZUt1FRRg3OIxZs0wbvEYs2YYt3iMWTOMW7xGY9b0Oaffk/QJSWuVfduTFDzn1My2Sbo8X77G3afyNsLnnG6X9MeSNkj6d/myHe4eBl8AAACMkZhwOiHpXrV/Q1TLjKS5b4jqEk6LviGq5bCkd0R/QxQAAABGRt1rTuXux5XNct4gaY+yQDot6Q5Jl1Z9dWnexo+VPcf0vym7tnRW2WOkvijpYoIpAADAeKs9cwoAAAD0W+2ZUwAAAKDfhjKcmtlPmNlOM/PgdVJBvZ81s6+Y2SEzO2pmj5rZR81seVG7o8bM3mFmt5nZ983sx2Z2zMyeNrO7zezKgvrbOsa07TWIzzAIseOWbzPux9rlZnazme3Ix2rWzA6Y2V+Z2YJrzDnWMrHjlm8z7sfaajPbYmYPmtlMcNzcWFJ/qsuxNrW0vR+c2HHLt3mnmX3dzJ43sxfN7Dtm9mEzG8rs0Ctmtrnbzy8z2zzoPg6SmU2a2fX5z6aj+c+q281sbe1G3H3oXsque/WO10kdddYruy62s55L+qqkZYP+HEswTh8r+fyt17/qqL+tW/1Bf56Ex41jLbtZsmy8XpJ0CcdaT8aNY016b8nnv7Gk/lSXMZ4a9OdJeNz+jaSXS7b580F/ngGP5eaK3xGbB93HAY7NhKSvl4zLUUnr6rQzdH/9mNmbJX1c2Ycsq3OypC9o/punbpK0SdmzVqXsB/yH+9jNVOxX9vivX1X2lIVf1/zzaiXpd0q2+66yG9c6X+Oi9rhxrLV5QtL1yj7zhyQdyJefJOkzJdt8V+N9rEk1x41jbc4RSV+T9ClJX4nY7h4tPM6u6Xnv0lV73Mzs9ZL+VJJJOi7pOknvV/azUZJ+w8w29K+rQ+WXtfC4+ruB9miwPqL5pzrtVvYz6qb8/aSkW82s81tCFxp0yo5M5CbpfmUJ/KMqmTmV9CvB8nuD5W8Plu8a9OcZ0Bi+NxiD3R3rtuXLtw26n6m9ysaNY23u8/6CpImOZVcHY/BixzqOtchx41grHL/PBJ/9xpI6U/n6Wwfd31ReVeMm6feD9bcEy98fLL9z0J9jgOO3ORiH1YPuT0ovSY8GY/P2YHl4lmhTVTvDNnP6W8r+Ktmp7HFUZX4uKD8QlHdIOpaX32pmr+lt99JlZsvMbI2kDwaLv1FS/WfN7Ln8uqTHzeyPLHtG7dipMW4ca5Lc/T7PHjcXejwoHynZdKyPtchx41hbnI35tZMvmdnu/Jq4sbhOt4GyY+2BkjrjbHt+rfiz+XWVFw26Q4NiZqdKOi9/e0zSw8Hq8NipPDs2NOHUsq87/a+STkj6jYIf6KHVQfmZViHfZrqk3sgys6eVnZp5QtkM4HFJf6Fs9rnISkmnKTt9eLak35P0LTNb2ffOJqTmuK0OymN/rHXYFJTvKanDsbZQ2bitDsoca/FeI+lVyi6XOF/SH0q628yWDbRXaVodlJ8JygeD8qv5Q0iStErST0j6SWVnPf7BzH5xsF0amNVB+ZC7nwjeh8fOmqqGhiacSvqssh8sW9z9kYq6K4LybMe62ZJ64+RE/rKO5U9L+h/KTt1cqew6kdZ4XSDpPy1N95JVNG4cawXM7CplN5ZJWXD6eEcVjrUCFePGsdbMk5I+rez60quUXUvp+bp3Kru2HO3KjrXO425cj7VZSXcq++r19cpuHmud8ViuLK+Mo579jJroSXcimdkVKj+l3Ok8SW+QtFHSDyR9ssY24amwzgtvl5fUS1rsmLn7nuD9RmUHwxslXats1mCzpFcquCHA3d/f0c5XzexlSZ/I379b8xc2D4UlGDeOtfYxk5ltUvatb8slvSDpPe7+w7AOx1qjceNY6xizOtz95zsW3WNmp0j61/n7d0v6y9h2B2kJxq3sWOu8DGJojrUyTcbS3b+o7N9q2M79yjKKJJ1tZv/U3R9f0MJo69nPqIGEU2WPp5iJqHtmXn6TpBfNOif8JEkvmdlX3P29yi6Abzm9VTCzCWWnEFvCeqmLHbM57v5QXvyGmd2n7DS1JL3PzE5y99InH0h6KCi/tub+U9LvcZsKNhn7Y83MPijp85KWSTos6Sp3/3bNtsb2WKs5blNBeeyPtUV6SPPhdKyOtZqmlJ3BkIJjTdIZQfmwu/9jg7ZT05OxdPcnzOw5Zaf3pey4GrdwOhWUTzOzieASzPDYebKqoYGEU3e/X9l1P7WY2aWRu/iW5h/3E257seY/8+5h+ocVO2ZS9ugZd3+ps6mwirJLJY6a2ZnZbvxAR/1/EZSf0ZDp97iJY22Omf225h8/c1DSenffWVCPYy1Qd9zEsRbNzM6W9Jy7H+5YNZbHWoRvSfqlvHyppP+Vly/pqDP0Gv6OuMjdd3Qse5Pmg6k0hMfVYrn7tJk9puyM94Syn02tP7LDY2d7VVuDmjmN9ZCy06qd/ntQ/j1J/zcv3ynpR8pmXNeb2R9KekTSHwT1b+lDP1Oz38z+Utn4HZD0U5J+N1j/lKRn8/I5ym4O+JKyh3k/r+yOut8P6sc8U3CYxYwbx5okM7tW0pb87Yyym8ZeaWZzd/S6e+uXGcdaLnLcONYkmdlrJV2ev31zsOotZta63Oab7v6ssjvKt5jZ/1Z26nZG0gZJHwi2G5djLWbc/kLZJTYrlD3TdI+yY++Pg+1G/ljr4jYze0rSlyTtVfY74vpg/ffc/QeFW46+W5Q9J1ySPmdmn5C0Vtm1uZK0T9Jdla0M+plYi3yeVuFzTvN1fJNK92+wmJW0Mah7RUX97Z1jPKqvmHHjWJsbg20V4+Yca4sbN4612sePS7oir7u5ot5fS7JBf6bUxi2vzzdElY/lVJcx/H8Knu85bi+N6zdE1eXuf6/sdMSdkv5R2Q/0x5T9dfNL3v6Ig1H1KUnfVDb7d0zZ1yE+ruzatovc/Y6g7g5lz5G9R9k/vKPKLlp+RNJ/VnZAdbs2dZTEjBvHWjyOtYY41qLdpeys233KznjMSvqxpH9Q9m1c/9Lz36ho5+5fkPQuSVuVBa6XJP0fSf9e2V3q4+zfSvqfyh44f1jZ74kfKvsd8TPu/uDgujZYnl1jukHZ18zvUfYzalrSHZIudfetddox/l0CAAAgFSM7cwoAAIDhQzgFAABAMginAAAASAbhFAAAAMkgnAIAACAZhFMAAAAkg3AKAACAZBBOAQAAkAzCKQAAAJJBOAUAAEAy/j8J7zYc4tfZMwAAAABJRU5ErkJggg==", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + } + ], + "source": [ + "plot_spectrum('sem', [-40, 0, 0, 0.3], False)" + ] + }, + { + "cell_type": "code", + "execution_count": 170, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAnwAAADFCAYAAADOty/CAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjMuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8vihELAAAACXBIWXMAAAxOAAAMTgF/d4wjAAAHyUlEQVR4nO3cP28cZQLH8Wejc5BPpyBCisPF3SGhIFHYCCigIg0FxZ3QvQnKvBaXvIkTIgWtqUIBKHaBRIQEvsJQmEgXweUuRlkKNNF4MjM7u157Z3/+fCTLO/M8M/Ns/khfzyQ7mU6nBQCAXFdWvQAAAM6X4AMACCf4AADCCT4AgHCCDwAgnOADAAgn+AAAwgk+AIBwgg8AIJzgAwAIJ/gAAMIJPgCAcIIPACCc4AMACCf4AADCjTL49o7LnVWvAQAgxSiDDwCA5RF8AADhBB8AQDjBBwAQTvABAIQTfAAA4QQfAEA4wQcAEE7wAQCEE3wAAOEEHwBAOMEHABBO8AEAhBN8AADhBB8AQDjBBwAQTvABAIQTfAAA4QQfAEA4wQcAEE7wAQCEE3wAAOEEHwBAOMEHABBO8AEAhBN8AADhBB8AQDjBBwAQTvABAIQTfAAA4QQfAEA4wQcAEE7wAQCEE3wAAOEEHwBAOMEHABBO8AEAhBN8AADhBB8AQDjBBwAQTvABAIQTfAAA4QQfAEA4wQcAEE7wAQCEE3wAAOEEHwBAOMEHABBO8AEAhBN8AADhBB8AQDjBBwAQTvABAIQTfAAA4QQfAEA4wQcAEE7wAQCEE3wAAOEEHwBAOMEHABBO8AEAhBN8AADhBB8AQDjBBwAQTvABAIQTfAAA4QQfAEA4wQcAEE7wAQCEE3wAAOEEHwBAOMEHABBO8AEAhBN8AADhBB8AQDjBBwAQTvABAIQTfAAA4QQfAEA4wQcAEE7wAQCEE3wAAOEEHwBAOMEHABBO8AEAhBN8AADhBB8AQDjBBwAQTvABAIQTfAAA4QQfAEA4wQcAEE7wAQCEE3wAAOEEHwBAOMEHABBO8AEAhBN8AADhBB8AQDjBBwAQTvABAIQTfAAA4QQfAEA4wQcAEE7wAQCEE3wAAOEEHwBAOMEHABBO8AEAhBN8AADhBB8AQDjBBwAQTvABAIQTfAAA4QQfAEA4wQcAEE7wAQCEE3wAAOEEHwBAOMEHABBO8AEAhBN8AADhBB8AQDjBBwAQTvABAIQTfAAA4QQfAEA4wQcAEE7wAQCEE3wAAOEEHwBAOMEHABBO8AEAhBN8AADhBB8AQDjBBwAQTvABAIQTfAAA4QQfAEA4wQcAEE7wAQCEE3wAAOEEHwBAOMEHABBO8AEAhBN8AADhRht8e8flzqrXAACQYLTBBwDAcowy+F745fC9Va8BACDFKIMPAIDlEXwAAOEEHwBAOMEHABBO8AEAhBt18PksPgCAsxt18AEAcHaCDwAg3OiDz2NdAICzGX3wAQBwNoIPFvHd3herXgIADLW+wXd392jVSwAAWAd/WPUChtg7Lndu3Sh/L3d3j8o7t7eeDjSj753bW6f2NbebcwEALoHJdDpd9RqesX94+L/mvp2jjx9c2ALaQlEgUvfd3hfl5VtvrXoZwOJ2P/nso9v/ePfDVa8DLsLaPNLd3/rgevV17hfruit4d/fo1NfQ4wBYut1PPvto1v7mnEWOmefaMFZrE3xN9fBbWQS2BWDzNQCDtQVa9dU3vy/AmnPq55sn3JrrqK+tLRhFIWOyNo9057Fz9PGD/a0PrtcfAze36/vaxqrxoefvusZM1eNjj4zXi0e68IzmI9IqePoemzbnVOeof6/m1ud0na95zKz9s8bm1XaurvfffK/1fctYC9RFBt9YVGFYf90WoQuHIqsj+AixzMBoi7RSno2grqDrmn/eVnm9vl+vZjzPsw1Ngm+Euu5MDrmrODgeq2CsHjuf153G8w6j5vmrz8drXrOa1/f5eW3j9X3N67Rtt73fvrlta+06Dpag6w5aMxba7j5V+6v5fdFWd9FBte7mvcPZFpDNuW2/l1wuowu+veNy54VfDt9b9TrSzHUn8cWbN05t/3T/uHX85vtXy/1PH5eNzYPy4/5W+fPOUTl5tF02Ng/Ky7feKnd3j8qLN2+Ujc2DzmstEmH8ruvXporErsjtC9euffOMzzvvos4zUoveqel7HNh112zWHNbTor+fzbDs+wGA9Te64Fvnu3uTjc2r05NHjxc5ZrKxebWUUvqOb85tM2t8iO2fv/z14E9vdn5G49Dx7Z+//LWUUqrX1THV/vrYoIVtbB6Uk0fbvdullKfRydnNE9z10FzWtevna94VbQvatjBsjg+5Y3vOuu7eDLnbdlFr5PLpe9Tc9ufTHcP1Eh98syKsHlHNWKqOGxJQbaG1jPji/LUFbFu0Vvv7znXv2tv/vfLk5Nr2/7/+6uC51954JnSrSK3Ham3fvc2dV15/+PkfT4VwT7ze29x55fVH+9+eOtcZPD3fGvj03+Wv7/+lHHbt7xpvzi2llGpe/ZjmWPOYNtV15383v7v/n8m/bj4//Wf1fej8WcfX95dSSn3OrHNUx1S6xtrOP+Q9L3oc4yYEx2dtgk84MQbffP/D5NW/vXTuf2mGXqc+r+2Yat833/8wKaWU+txqTtd16sfWv7eNtR3bdb0nZfL4SplercbrY031uW3bXfPr856UydMf+KqxruOZz7WTBw+r1w83rl+rtrtet83tM3TeqnUFejOI2/ZfdvP8B54h/w6xb/yy35EcXfABALBca/vBywAADCP4AADCCT4AgHCCDwAgnOADAAgn+AAAwgk+AIBwvwHTpRzJyBxu2wAAAABJRU5ErkJggg==", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + } + ], + "source": [ + "plot_spectrum('ebo', [])" + ] + }, + { + "cell_type": "code", + "execution_count": 160, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAnwAAADFCAYAAADOty/CAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjMuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8vihELAAAACXBIWXMAAAxOAAAMTgF/d4wjAAAFBUlEQVR4nO3csYpcZQCG4dllVxkiCWoabaIQki4LMZWNaSwNNl5DSq8lZW5DS5u1SpMiSZcqCYI2AUGQQBYyVmsTnD0HPHv2vPM81YH5Gb7pXv5hZm+z2awAAOjan3sAAADTEnwAAHGCDwAgTvABAMQJPgCAOMEHABAn+AAA4gQfAECc4AMAiBN8AABxgg8AIE7wAQDECT4AgDjBBwAQJ/gAAOIO5h4AALA0D3769eG213+8983989oyhBs+AIA4wQcAECf4AADiBB8AQJzgAwCIE3wAAHGCDwAgTvABAMQJPgCAOMEHABAn+AAA4gQfAECc4AMAiBN8AABxgg8AIE7wAQDECT4AgDjBBwAQJ/gAAOIEHwBAnOADAIgTfAAAcYIPACBO8AEAxAk+AIA4wQcAECf4AADiBB8AQJzgAwCIE3wAAHGCDwAgTvABAMQJPgCAOMEHABAn+AAA4gQfAECc4AMAiBN8AABxgg8AIE7wAQDECT4AgDjBBwAQJ/gAAOIEHwBAnOADAIgTfAAAcYIPACBO8AEAxAk+AIA4wQcAECf4AADiBB8AQJzgAwCIE3wAAHGCDwAgTvABAMQJPgCAOMEHABAn+AAA4gQfAECc4AMAiBN8AABxgg8AIE7wAQDECT4AgDjBBwAQJ/gAAOIEHwBAnOADAIgTfAAAcYIPACBO8AEAxAk+AIA4wQcAECf4AADiBB8AQJzgAwCIE3wAAHGCDwAgTvABAMQJPgCAOMEHABAn+AAA4gQfAECc4AMAiBN8AABxgg8AIE7wAQDECT4AgDjBBwAQJ/gAAOIEHwBAnOADAIgTfAAAcYIPACBO8AEAxAk+AIA4wQcAECf4AADiBB8AQJzgAwCIE3wAAHGCDwAgTvABAMQJPgCAOMEHABAn+AAA4gQfAECc4AMAiBN8AABxgg8AIE7wAQDECT4AgDjBBwAQJ/gAAOIEHwBAnOADAIgTfAAAcYIPACBO8AEAxAk+AIA4wQcAECf4AADiBB8AQJzgAwCIE3wAAHGCDwAgTvABAMQJPgCAOMEHABAn+AAA4gQfAECc4AMAiBN8AABxgg8AIE7wAQDECT4AgLiDuQcAACzCi+PHp483rqyunXH6/sRrRnHDBwAQJ/gAAOIEHwBAnOADAIgTfAAAcYIPACBO8AEAxAk+AIA4wQcAECf4AADiBB8AQJzgAwCIE3wAAHGCDwAgTvABAMQJPgCAuIO5BwAALMGT9dH10+e/Dn9bz7llLDd8AABxgg8AIE7wAQDECT4AgDjBBwAQt7O/0j1+vfp56Nm7V1ffTbkFAGBKbvgAAOIEHwBA3M5+pQsAsHpx/Hjw2fXRhEOm5YYPACDODR8AsAxDb+O+vHtn4iWLI/gAgJYxX9PuCMEHAMxnQXG2/+7k8r/Pq83enFvGEnwD+M8+AHbegsJsqCfro+tjzu+/O5lqyuQEH0zt0YPf554A5+rTG1fnnrB4h+tng8+evLl1IXZMacRnfPbRV4Pb5qyAe/7yj0Xd4m0j+P7Dx3+/+nbo2T8vXfvl9HnMbeBqFb0RFDjn6unn338y94a6vcP1B0PPbk7evJ3ifdk5twef/HDCFWN2TGnCz1iKum32NpvN3Bv+N2fF1piIgzqxAVwk28Lr5hefbY2VixhtP3x9dKE2pYIPAID3+eNlAIA4wQcAECf4AADiBB8AQJzgAwCIE3wAAHGCDwAg7h9msFnP0kC8oQAAAABJRU5ErkJggg==", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + } + ], + "source": [ + "plot_spectrum('msp', [])" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "id_score_list = []\n", + "method = 'sem'\n", + "dirname = f'./mnist_lenet_test_ood_fsood_gmm_[-1e-07, 0, 1]_test_sem_0331/scores/'\n", + "for id_dataset in id_datasets:\n", + " feature_dict = np.load(f'{dirname}/{id_dataset}.npz')\n", + " id_score_list.extend(feature_dict['conf'])\n", + "id_score_list = np.array(id_score_list)\n", + "\n", + "\n", + "csid_score_list = []\n", + "for csid_dataset in csid_datasets:\n", + " feature_dict = np.load(f'{dirname}/{csid_dataset}.npz')\n", + " csid_score_list.extend(feature_dict['conf'])\n", + "csid_score_list = np.array(csid_score_list)\n", + "\n", + "nearood_score_list = []\n", + "for nearood_dataset in nearood_datasets:\n", + " feature_dict = np.load(f'{dirname}/{nearood_dataset}.npz')\n", + " nearood_score_list.extend(feature_dict['conf'])\n", + "nearood_score_list = np.array(nearood_score_list)\n", + "\n", + "farood_score_list = []\n", + "for farood_dataset in farood_datasets:\n", + " feature_dict = np.load(f'{dirname}/{farood_dataset}.npz')\n", + " farood_score_list.extend(feature_dict['conf'])\n", + "farood_score_list = np.array(farood_score_list)" + ] + }, + { + "cell_type": "code", + "execution_count": 129, + "metadata": {}, + "outputs": [], + "source": [ + "def plot_ours_spectrum(method, size):\n", + " id_score_list = []\n", + " for id_dataset in id_datasets:\n", + " feature_dict = np.load(f'./digits_baseline/digits_baseline_{method}/{id_dataset}.npz')\n", + " id_score_list.extend(feature_dict['conf'])\n", + " id_score_list = np.array(id_score_list)\n", + " id_score_list = np.log2(id_score_list + 1e-100)\n", + " \n", + " csid_score_list = []\n", + " for csid_dataset in csid_datasets:\n", + " feature_dict = np.load(f'./digits_baseline/digits_baseline_{method}/{csid_dataset}.npz')\n", + " csid_score_list.extend(feature_dict['conf'])\n", + " csid_score_list = np.array(csid_score_list)\n", + " csid_score_list = np.log2(csid_score_list + 1e-100)\n", + " \n", + " nearood_score_list = []\n", + " for nearood_dataset in nearood_datasets:\n", + " feature_dict = np.load(f'./digits_baseline/digits_baseline_{method}/{nearood_dataset}.npz')\n", + " nearood_score_list.extend(feature_dict['conf'])\n", + " nearood_score_list = np.array(nearood_score_list)\n", + " nearood_score_list = np.log2(nearood_score_list + 1e-100)\n", + " \n", + " farood_score_list = []\n", + " for farood_dataset in farood_datasets:\n", + " feature_dict = np.load(f'./digits_baseline/digits_baseline_{method}/{farood_dataset}.npz')\n", + " farood_score_list.extend(feature_dict['conf'])\n", + " farood_score_list = np.array(farood_score_list)\n", + " farood_score_list = np.log2(farood_score_list + 1e-100)\n", + " \n", + " font = {'family': \"Times New Roman\", 'weight' : 'bold', 'size' : 15}\n", + " plt.rc('font', **font)\n", + " plt.figure(figsize=(8, 3), dpi=200)\n", + "\n", + " n_bins = 200\n", + "\n", + " figure(figsize=(10, 3), dpi=80)\n", + " plt.hist(farood_score_list, n_bins, density=True, \n", + " weights=np.ones(len(farood_score_list)) / len(farood_score_list), \n", + " facecolor='#FFC690', alpha=0.9)\n", + "\n", + " plt.hist(nearood_score_list, n_bins, density=True, \n", + " weights=np.ones(len(nearood_score_list)) / len(nearood_score_list), \n", + " facecolor='#FFDEBF', alpha=0.8)\n", + "\n", + " plt.hist(csid_score_list, n_bins, density=True, \n", + " weights=np.ones(len(csid_score_list)) / len(csid_score_list), \n", + " facecolor='#BFEBFF', alpha=0.7)\n", + "\n", + " plt.hist(id_score_list, n_bins, density=True, \n", + " weights=np.ones(len(id_score_list)) / len(id_score_list), \n", + " facecolor='#90B1C0', alpha=0.6)\n", + " \n", + "# plt.xlim(size[0], size[1])\n", + "# plt.ylim(size[2], size[3])\n", + "# plt.axis('off')\n", + "\n", + " plt.savefig(f'./{method}.pdf')" + ] + }, + { + "cell_type": "code", + "execution_count": 130, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAqEAAADbCAYAAABdhEjQAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjMuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8vihELAAAACXBIWXMAAAxOAAAMTgF/d4wjAAAWi0lEQVR4nO3df/BldX3f8edL6IKwXX4U4hjWKCAEiQhx1sRYw7jE8kOU1pnYaMkPhjgGU+NEoK02pJb4C8ZI2rFmSCMVIwZ0NLMjKiqgKWhqWzTSnSmwRnazWYSQqPzYVUKXffePe+7u5e79fr/nfr/3e77fe/f5mDlzfnw+53PO/cyB72s/99xzUlVIkiRJXXrGSp+AJEmSDjyGUEmSJHXOECpJkqTOGUIlSZLUOUOoJEmSOmcIlSRJUucOXukTWIxDDjmkjj322JU+DUmSJM3hgQceeLKqDpmrfCpD6LHHHsuOHTtW+jQkSZI0hyR/N1+5X8dLkiSpc4ZQSZIkdc4QKkmSpM4ZQiVJktQ5Q6gkSZI6N1YITXJxktuS3JDk+iTr5qm7IckdSXYluS/JhUtpT5IkSbOj9SOamhB5HXBBVd2cZDuwCThrRN0TgFuBw4A1wMnAx5J8t6q+Mm57kiRJmi3jjIRe2cy3NfOtwMYkZ46o+0vARuBQ4NeabWm2L6a9lfXAXfsmSZIkLVmrENqMbJ7YrD7RzKuZnzNilz+oqm9Vz58A/6fZ/tAi25MkSdIMaTsSesrA8u6hspOGK1fVE0Ob1gBPAX+6mPYkSZI0W9qG0KMGlmuo7Mj5dkzyXHr3hF5dVVsW016SS5Ps6E87d+5sddKSJElandqG0F3zlD22wL7vAD4L/O5i26uqa6pqfX9au3btAoeUJEnSatb21/H3DiwPB9d75topyauAE+j9An5PkkOBPYttT5IkSbOh7UjofcD9zfLwMOStSc5PckuS8/obk5wIvB3498Dzk7wY+CPgoIXaG+P8JUmSNIVahdCqKuCKZrX/w6HjgDur6g7gKuBc4GqA5qHznwF+HvjfwGbgG8CLq+pHLdqTJEnSDGv9sPqqujHJEcBlSX4F+Brwlqb4E8DxwE3N+ieBU0c0882W7UmSJGmGpTcoOV3Wr19fO3bs6O6Agw+pP25Dd8eVJEmaUkkeqKr1c5WP9e54SZIkaRIMoZIkSeqcIVSSJEmdM4RKkiSpc4ZQSZIkdc4QKkmSpM4ZQiVJktQ5Q6gkSZI6ZwiVJElS5wyhkiRJ6pwhVJIkSZ0zhEqSJKlzhlBJkiR1zhAqSZKkzhlCJUmS1DlDqCRJkjpnCJUkSVLnDKGSJEnqnCFUkiRJnTOESpIkzZDb796y0qfQiiFUkiRpRkxLAAVDqCRJklaAIVSSJEmdM4RKkiSpc4ZQSZIkdc4QKkmSpM4ZQiVJktQ5Q6gkSZI6ZwiVJElS5wyhkiRJ6pwhVJIkSZ0bK4QmuTjJbUluSHJ9knUL1D8iyWVJ/jrJwSPKP5WkhqbLx/0QkiRJmi77BcO5JLkQuA64oKpuTrId2AScNaLuMcAlwNuAo+do72jgNUObnwI+3vacJEmSNJ1ah1Dgyma+rZlvBTYmObOq7hiq+wjwHuBHwO/P0d4bgN8GvjKw7cmqenCMc5IkSRJw+91bVvoUxtIqhCY5ATixWX2imVczPwd4Wgitqt3Nft+bp9mLgBcBfwt8Hbi2qr7c6qwlSZI01dreE3rKwPLuobKTxj1oklOBDcAa4DnA64Dbk7x/jvqXJtnRn3bu3DnuISVJkg4Y0zAq2jaEHjWwXENlRy7iuLuBNwHvAr4A7Gm2X57k7OHKVXVNVa3vT2vXrl3EISVJkrRatL0ndNc8ZY+Ne9Cq2gLsjejN1/0fA14GvBr40rhtSpIkaXq0HQm9d5597lnqSVTV/fTC5+PAk0ttT5IkSatb2xB6H3B/szz8XfitSc5PckuS89oeOMnJSQ7qr1fVD+iF3c+2bUOSJEnTqVUIraoCrmhW+z9EOg64s3k801XAucDVQ7seOrC8pr+Q5PX0gu3mJK9otp0OfL2q/nysTyBJkqSp0/qNSVV1I/Bm4LIkm4CvAa9qij9B777Rm2Dvm5JeD/zGQBPvS7KhWf4yvQfdPxv4XJI/A06vqrcu/qNIkiStHpsf3Tdpf+M8rJ6quha4dsT2dwPvHlh/lF4gvWmOdh4GXjvWmUqSJGlmjPXueEmSJGkSDKGSJEnqnCFUkiRJnTOESpIkqXOGUEmSJHXOECpJkqTOGUIlSZLUOUOoJEnSlLv97i2ttq0mhlBJkiR1zhAqSZKkzhlCJUmS1DlDqCRJkjpnCJUkSVLnDKGSJEnqnCFUkiRJnTOESpIkqXOGUEmSJHXOECpJkqTOGUIlSZLUOUOoJEmSOmcIlSRJUucMoZIkSeqcIVSSJEmdM4RKkiSpc4ZQSZIkdc4QKkmSNMVuv3vLSp/CohhCJUmS1DlDqCRJkjpnCJUkSVLnDl7pE5gGmw9+/t7l01bwPCRJkmbFWCOhSS5OcluSG5Jcn2TdAvWPSHJZkr9Osl/gHbc9SZIkzYbWI6FJLgSuAy6oqpuTbAc2AWeNqHsMcAnwNuDopbYnSZKk2TLOSOiVzXxbM98KbExy5oi6jwDvAd47ofYkSZKm1uZH903qaRVCk5wAnNisPtHMq5mfM1y/qnZXVQHfm0R7kiRJmi1tR0JPGVjePVR20iKOO1Z7SS5NsqM/7dy5cxGHlCRJ0mrRNoQeNbBcQ2VHLuK4Y7VXVddU1fr+tHbt2kUcUpIkSatF2xC6a56yxxZx3Em3J0mSpCnSNoTeO88+9yziuJNuT5IkSVOkbQi9D7i/WR7+LvzWJOcnuSXJeZNor2Ub3XnqyX2TJEmSlqxVCG1+6X5Fs9r/4dBxwJ1VdQdwFXAucPXQrocOLK8Zoz1JkiQt4Pa7t6z0KSxa6+eEVtWNwJuBy5JsAr4GvKop/gS9+zxvgr1vSno98BsDTbwvyYaW7UmSJGmGpTcoOV3Wr19fO3bs6Ox4m7/78N7l0378xzo7riRJml5zPZj+tCMmd4yFRkJ/4fSTJ3ewMSV5oKrWz1U+1rvjJUmSpEkwhEqSJKlzhlBJkiR1zhAqSZKkzhlCJUmS1DlDqCRJkjpnCJUkSVLnDKGSJEnqnCFUkiRJnTOESpIkqXOGUEmSpCm00Cs7VztDqCRJkjpnCJUkSVLnDKGSJEnqnCFUkiRJnTOESpIkqXOGUEmSJHXOECpJkjSjVvNjnAyhkiRJ6pwhVJIkSZ0zhEqSJKlzhlBJkiR1zhAqSZKkzhlCJUmS1DlDqCRJkjp38EqfgCRJ0sx7dPu+5SN+YuXOYxVxJFSSJGnKrOaH0LdlCJUkSVLnDKGSJEnqnCFUkiRJnRsrhCa5OMltSW5Icn2SdfPUXZfkI0luSvLFJL8+os6nktTQdPliPogkSdKBYBbuB4Uxfh2f5ELgOuCCqro5yXZgE3DWHLt8GjgDeBawEbgtye6q+mjT3tHAa4b2eQr4+DgfQJIkSdNnnJHQK5v5tma+FdiY5MzhikleDrwS2F5Ve4D7m6J3DlR7A/DbwAsGppOr6sExzkmSJElTqFUITXICcGKz+kQzr2Z+zohdzp6j7vFJTmqWLwL+E/Al4PeAH6+qfliVJEnSDGs7EnrKwPLuobKT2F+//nBdgJOSnApsANYAzwFeB9ye5P0tz0eSJElTrG0IPWpguYbKjpyn/nDdfv3dwJuAdwFfAPY0ZZcnOXt4hySXJtnRn3bu3NnytCVJkrQatf1h0q55yh4bt35VbQH2/rSr+br/Y8DLgFfT+4p+r6q6Brimv75+/fpR4VaSJElTou1I6L3z7HPPPPVHtb9f/eZe0FcDjwNPtjwnSZKkA8qsPJ4J2ofQ+9j3C/e1Q2W3Jjk/yS1Jzmu2fX6Oulur6jsASU5OclC/oKp+QC+8frb12UuSJGkqtQqhVVXAFc1q/4dIxwF3VtUdwFXAucDVTf076H2lfnwTNI9r9nkPQJLX0wu2m5O8otl2OvD1qvrzJX0iSZIkrXqtH1ZfVTcmOQK4LMmvAF8D3tIUfwI4HrhpYJdfAj4E3AwcBlzUf1A98GV6D7p/BfC5JF8ENlXVWxf/USRJkjQtWodQgKq6Frh2xPZ3A+8e2vYIcOEc7TwMvHacY0uSJE2VR7ev9BmsamO9O16SJEmaBEOoJEmSOmcIlSRJUucMoZIkSeqcIVSSJGkKzNKD6sEQKkmSpBVgCJUkSVLnDKGSJEmr3Kx9FQ+GUEmSJK0AQ6gkSZI6ZwiVJEmaYav1q3xDqCRJkjpnCJUkSVLnDl7pE5AkSZolmx9doMJdH963vOGNy3ouq5kjoZIkSeqcIVSSJGkVW60/LFoqv46XJEmapEe3T6ypWQ2gYAiVJElasgXvA9V+DKGSJElLNcbo5+YfO3vv8mlz1Ln97i38wuknL/GkVjfvCZUkSVLnHAmVJElahM3bJ3fv54HIkVBJkiR1zpFQSZKklhz9nBxDqCRJ0iiDbzbqG/hRkZbGECpJkrSAzSsQPmf5GaFgCJUkSdpnYPRzIsHz4EPnLe4/X/S0I/Ztm/Xw2WcIlSRJB55RX7WzhOC5QNicU//5okf8BHDgBFAwhEqSpFkxGCw3vHH09qVabNhs4UAKoGAIlSRJ02yugDm4/Yj1vfmjO/bfNmwZQ+a8x3vgLmBdt8deYYZQSZK0uowzojlXmBxlqO7mw1885oktn80HP5/v/+hvADh6z2Nw+LErfEbLzxAqSZKW37e/sG/5sGP2Lf/w7/evOxgWB/cbJ3DOYTUFz3nt+rvefEJhdDW+i36sEJrkYuBfAQ8Bu4G3VtVjc9RdB/xn4JnAUcAnq+q6xbYnSZIm6IG7evPjNuy/bdioOsdt2DcyOW44HBU8l8HUBE6Ap56EPXv2394PozBzo6OtQ2iSC4HrgAuq6uYk24FNwFlz7PJp4AzgWcBG4LYku6vqo4tsT5Kk6TQY7gYD3UJ12+i3N7zfXEGvPwrZLx8caZzLqDrf/sJERibH1Q+Wp+365n7bpsW92x7klOc9e+8ysHd9XoOBdNCUhtNxRkKvbObbmvlWYGOSM6vqjsGKSV4OvBL4ZlXtSXJ/U/RO4KPjtidJWibjhKOVPI9xz3PcILdYg0Fvoa+Y+9qEvnGM215Ho5ALWWpwnJbg2Q+cg8Gzv324Xt/3Wbt3+Wh2LnyQKR0tbRVCk5wAnNisPtHMq5mfAwyHxrPnqHt8kpOAp8ZsT5K0nH7495MPR4u10Hms8HkOhp/TGAh0I8Ld0+o2I3ejts21j7o1HBTb1h8e2RxuYzhwLqsJ30u6nNqOhJ4ysLx7qOykeeoP1x1Vf8H2klwKXDqw6akkD41oW+NbC23+maUx2KfLw36dPPt08uzTybNPl0cX/TpvEm4bQo8aWK6hsiPnqT9ct18/47RXVdcA18x3glqcJDuqqvubemaYfbo87NfJs08nzz6dPPt0eayGfm0bQnfNUzbq1+wL1Z/vuP46XpIkacY9o2W9e+fZ55556o9q/55FtCdJkqQZ0jaE3gf0f+G+dqjs1iTnJ7klyXnNts/PUXdrVX1nofZanpMmw9scJs8+XR726+TZp5Nnn06efbo8VrxfUzXqts0RFZM3AH8K/GJVfTrJt4EHq+rMJJuBFwKbq+pFTf0vAj8DHAO8FPgq8Mb+A+vna2+yH1GSJEmrTesQCpDkEuBXgYeBR4C3VNXOJFcAbwfeW1XvbeoeCXyI3o+UDgM+0n9Q/ULtLe0jSZIkabUbK4RKkiRJk9D2nlBJkiRpYgyhB4AkZyS5NcnjSb6T5JdH1Hl78+OyTye5Jsk/Gip/WZLbknw4yeeTnNrdJ1idkpyd5BtJfpjkW0nOHlHnriQ1NP3iQLn9OqBln3qtjinJs5JcmWT/1/PgdboYLfrU63SJkrxwxHX5vSRrBurM28/aX5KLm2vvhiTXJ1m3YidTVU4zPAGnAt8CPkPvvtui99rUFw7U+Z1m+2nAM4En6d3D2y8/md5bFT7QrH+M3n28x6z051vBfv1Z4B+avqpmegL4yYE6PzVQ1p++Dxxivy66T71Wx+vT5wFXAT9s+m3HiDpep5PvU6/TyfT174+4Nj/Utp+dRvbphU2fvaZZ3w58eaXOx5HQ2fci4Ger6gLgJfRek/oMmtejJnkm8O+autuq6kfAg8CvJXles/3fAocD25r1++m9ius3Ozj/1eq19F5P+0zgimbbIc32vouAfwG8YGA6var+oSm3X59u3j71Wl2UB6rq7cB189S5CK/Tcczbp16nk5HkIOCnefp1+QLgHU15m37W/q5s5tua+VZgY5IVeTKRIXTGVdVNA39MvgM8Tu9lAV9stv0c8I+b5Sf6u9F7teo/a9bPHlEOcM5ynPOU+I9VtbWqngLeCzzabH8I9v4P9ELgJuBz9J4ecXhV/c1AG/br083bp3itjq2q/l+z+L1R5V6n41uoT/E6nZRzgY3A/wT+BHgd8FBV9d+q2KafNSDJCcCJzeqquPbavrZTq1CSw4A1C1TbWVW7mz82VwJHApdU1Q+b8lMG6u4e2vek5hjPmat8/LNe/dr269D6GnoBf1Ozfg7w7Gb5hGb65SRvq6oPHmj9OqE+9VodMM5///OUe50OmFCfep0uoGU//zq9QLmO3rd4LwHelOSVVXUfC/TzpM51xqy6PnMkdLr9V+AHC0wvbwLoH9P76iLATUn+ZdPGUf3GqrlBZMCRg+Xs+xfTYPksatWvA/X/Kb2vkC+rqkeabQ8Cb6Y3ovfVZttBwB8keQEHXr9Ook+9Vp9u3D4dxev06SbRp16nC2vTzzcClwP/Bfhus9964CPN8kL9rP2tumvPkdDp9qv07ueaz1PNf6AXJ/kQcDO9kY/LgE8Cu+bZ97EW5bOoVb8OLP8OvZvl/7i/oar+EvjL/nqSn6b3hrBTgPOA/zZP27PYr0vuU7xWh43bp/vxOt3PkvsUr9M2xvnbRZK3AW8EPgj8XJKjsR8XY9X1mSOhU6yq9lTV7gWmGqj/DeC3mtXDm/m9/fIkw9fDPc0oVP+evP3KJ/dpVo9x+jXJm+n9qvW3mvWRj7po/tj/c3r/+nzyQOvXCfWp1+qAcf/7b9mm1+nS+9TrdAGL+Nu1u6quBa5uNj3JAv28/J9iKt07sLwq+swQOuOS/JMkGdj0pWZ+ezO/k3334q0dqFcDdW4ZUQ5w66TOcxoleSm9XxV/APipJC+h9y91khyU5Gn32FTVFuBvgc83m+zXIfP1KV6rE+d1uiy8TicgyTFJjh3a/BfAf6/e673b9LOe7j56T2KAVXLtGUJnWJKfpPfH5CtJ+jfCvxT4K+BdAFW1i979YNC7aX4NvUeFfLyqtjXb30dvGL//x+o4er8M/aPl/gyrVZL1wJ/R+5Xr3cBm4H8BRzdV/g2wJclfJDmj2ecc4INV1f+fgP06YKE+9VpdkkOb+fCPQbxOF29kn3qdTsxXge8m+UCSdc1vG84D/jW07mcNaEaX+4+/G7z27qyqO1binHx3/AxrnqP2YXqPujiY3sjGVuD9VfWDgXoBfpfeYy0eB/4v8I6BR5GQ5OfpPaB5B72bm3+zqv6qo4+yqiQ5FPgfwBkjin+vqt6Z5GTgD4GfAfYAnwU+WVWfGWrLfqVdnzb1vFbHkOTZwCvp/aPzuc3m/wB8qqru8Tod30J92tTxOl2iJBfReyboc+k9UH0T8IeDAbNNP2t/SS6hd1/uw/ReYvOWZnS5+3MxhEqSJKlrfh0vSZKkzhlCJUmS1DlDqCRJkjpnCJUkSVLnDKGSJEnqnCFUkiRJnTOESpIkqXOGUEmSJHXOECpJkqTOGUIlSZLUuf8PQ4AC19UaAMwAAAAASUVORK5CYII=", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + } + ], + "source": [ + "plot_ours_spectrum('llfs', [-80, 0, 0, 0.20])" + ] + }, + { + "cell_type": "code", + "execution_count": 131, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAqkAAADbCAYAAABOUwgkAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjMuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8vihELAAAACXBIWXMAAAxOAAAMTgF/d4wjAAAbTUlEQVR4nO3df7RlZX3f8fcXEGQcZwbKDAu4VoGOGSJEko7GH5QVFAdRtM1qDBr8wUJMlZqkAbW6pFVCNNIqahVrC0QxGECliyUGCgMxGcQ2dTRJh5SBBmaCdxyKCgwOMuKFb//Y+zB7Dvucu8+de+7Z59z3a6299j77efY+z3nWhvuZZ/+KzESSJElqk31G3QBJkiSpmyFVkiRJrWNIlSRJUusYUiVJktQ6hlRJkiS1jiFVkiRJrbPfqBswLAcccECuXLly1M2QJElSD9u2bXs8Mw+oK5vYkLpy5Uqmp6dH3QxJkiT1EBE/7FXm6X5JkiS1jiFVkiRJrWNIlSRJUusYUiVJktQ6hlRJkiS1jiFVkiRJrWNIlSRJUutM7HNSJUnSaGzasXv5uOWja4fGmyOpkiRJah1DqiRJklrHkCpJkqTWGSikRsRZEXFLRFwZEV+MiGWz1F8eEedFxD9ExNOuf51tfxFxTETcFBGXRsTNEfGyQdorSZKk8dQ4pEbEGcDlwKcz883AK4DretQ9JCLOB+4FPg7840H3FxEHA98EdmbmO4C/AtZHxDFN2yxJkqTxNMhI6gXlfGs53wKcFBEn1tR9GPgI8NG92N85wKGV8nuBJcB7B2izJEmSxlCjkBoRRwFHlx93lfMs56d018/MmcxM4Md7sb91Tb9PkiRJk6XpSOqayvJMV9nqOXxvk/2t6VF+eEQs6d5hRJwbEdOdaefOnXNoliRJktqgaUg9qLKcXWUr5vC9TfZ3UI/y2u/MzIszc6ozLV26dA7NkiRJUhs0DamP9il7ZA7f22R/8/2dkiRJGhNNQ+rmPtvcOYfvbbK/zT3KpzPTc/mSJEkTrGlIvYvi7nqA7vPo6yPitRFxY0ScOh/7K+c3zFIuSZKkCdUopJZ36p9ffuzc2HQEcFtmbgA+BrwauKhr02dWlvcfYH8AnwW2d5XvAi5u0mZJkiSNr8bPSc3Mq4B3AedFxHXA7cBryuJrKK4hvRqeetPUG4F/VdnFH0XE2ob7IzMfBF4FLIuIa8rlEzPzjgF/oyRJksZMFIOak2dqaiqnp6dH3QxJkhadTTt2Lx+3fHTtUPtFxLbMnKorG+SNU5IkSdKCMKRKkiSpdQypkiRJah1DqiRJklrHkCpJkqTWMaRKkiSpdQypkiRJah1DqiRJklrHkCpJkqTWMaRKkiSpdQypkiRJah1DqiRJklrHkCpJkqTWMaRKkiSpdQypkiRJah1DqiRJklrHkCpJkqTWMaRKkiSpdQypkiRJah1DqiRJklrHkCpJkqTWMaRKkiSpdQypkiRJah1DqiRJklrHkCpJkqTWMaRKkiSpdQypkiRJah1DqiRJklrHkCpJkqTWGSikRsRZEXFLRFwZEV+MiGV96i6LiC9ExNURcVNEvL2rfENEZM30rUqd02rK7xj8Z0qSJGmc7Ne0YkScAVwOvD4zr4+I+4DrgFf02ORa4HjgUOAk4JaImMnMKyJif+BFPbb7SmX5zJryK5q2WZIkSeOpcUgFLijnW8v5FuCkiDgxMzdUK0bECcDJwPcy88mIuLcs+hBFyPynwC3AHwPbgRlgNXAl8LVyHwcDBwDHdLXjHwZosyRJksZQo5AaEUcBR5cfd5XzLOenABu6NlnXo+6REbEaeAj49cycqXzHqcDtmfmDctWbgNcALwf+jmJk9vOZ2dmnJEmSJlTTa1LXVJZnuspW96nfXRdgdWZurgbU0unsear/bWX7DgJOAD4JfCciVtU1MCLOjYjpzrRz584eP0WSJElt1zSkHlRZzq6yFX3qd9etrR8Rx1Kc1u+c6t8P+CzwfuBSipFXgGOBT9U1MDMvzsypzrR06dK6apIkSRoDTa9JfbRP2SPzUP90YENm3g9QjrJ+qVMYEb8HvA/4MHDabI2VJEnSeGs6krq5zzZ39qlft/+6+t2n+veQmY9l5gXAl4HH+7RTkiRJE6BpSL0L6Nyh330efX1EvDYibixvfgK4oUfdLZl5T3VFRPwKcBTFjVHV9YdHxIqu7b8NfKNhmyVJkjSmGoXUzEzg/PJj50apI4DbysdPfQx4NXBRWX8DcDPF3fz7lnUBPlKz+9OBv8zMBzorymtS7wZ+EBEfiIgDIuKZwIsprlOVJEnSBGv8nNTMvCoilgPnRcRbgNuBd5fF1wBHAldXNjkduAS4HlgCnJmZdQ/i/02KkFv9rpmIuBA4h+I61NOBrwPvycwfNW2zJEkarU07di8ft3x07dD4iWKQdPJMTU3l9PT0qJshSdKiUw2mdQyr6oiIbZk5VVfW9JpUSZIkacEYUiVJktQ6hlRJkiS1jiFVkiRJrWNIlSRJUusYUiVJktQ6hlRJkiS1jiFVkiRJrWNIlSRJUusYUiVJktQ6hlRJkjQaGy8rJqmGIVWSJEmtY0iVJElS6xhSJUmS1DqGVEmStKA27SinVetG3RS1mCFVkiRJrWNIlSRJUuvsN+oGSJKkRa76GKq1Z4+uHWoVR1IlSZLUOobU+bJtYzFJkiRpr3m6X5IkLZwd9426BRoTjqRKkiSpdQypkiRJah1DqiRJklrHkCpJkqTWMaRKkiSpdQypkiRJah1DqiRJklrHkCpJkqTWGSikRsRZEXFLRFwZEV+MiGV96i6LiC9ExNURcVNEvL2mztciIrum91TKjym3vTQibo6Ilw328yRJkjSOGr9xKiLOAC4HXp+Z10fEfcB1wCt6bHItcDxwKHAScEtEzGTmFeX+DgZe17XNE8CXK+XfBG7PzHdExIXA+ohYm5l3Nm23JEmSxs8gI6kXlPOt5XwLcFJEnNhdMSJOAE4G7svMJ4F7y6IPVaq9Cfg3wDGV6fmZub0sP4ci4Ha+715gCfDeAdosSZKkMdQopEbEUcDR5cdd5TzL+Sk1m6zrUffIiFhdLp8JfAq4GfgD4PDM7ITZfvuo+z5JkiRNkKan+9dUlme6ylbzdJ363XUBVkfEM4C15efnlNMbIuLjmdkZKe21j8MjYklm/rS6MiLOBc7tfF6+fHntD5EkSS228bLdy2vPHl07NHJNT/cfVFnOrrIVfep31+3UnwF+G7gQ+O/Ak2XZeyKiM4I62z72kJkXZ+ZUZ1q6dGnNZpIkSRoHTUdSH+1T9sig9TPzbuDuzorycoI/AV4GnEZxCcCjQK/h0LrvlCRJ0oRoOpK6uc82dXfad+rX7f9p9ctrUU8DfgI8Pss+pjNzZ9/WSpIkaaw1Dal3sfsO/e7z6Osj4rURcWNEnFquu6FH3S2ZeQ9ARDw/IvbtFGTmQxTB9Buz7GN9wzZLkiRpTDUKqZmZwPnlx86NUkcAt2XmBuBjwKuBi8r6GyhO2R9ZBtEjym0+AhARb6QIvpsi4tfKdS8E/mdm/kVZ97PA9q7v2wVcPOBvlCRJ0php/DD/zLwqIpYD50XEW4DbgXeXxdcARwJXVzY5HbgEuJ7i+aZndh7kD/w5xYsAfg34s4i4CbguM3+38n0PRsSrgM9FxDUUz0w9MTPvGPhXSpKk0dlx36hboDEUxSDp5Jmamsrp6emF+8JtG4v5EWv715MkacJt2tG1omFIPe6Bm/dc4SOoJl5EbMvMqbqyQd44JUmSJC0IQ6okSZJax5AqSZKk1jGkSpIkqXUa390vSZI0EO/q115wJFWSJEmtY0iVJElS6xhSJUmS1DqGVEmSJLWOIVWSJEmtY0iVJElS6xhSJUmS1Do+J1WSJLXTxst2L689e3Tt0Eg4kipJkqTWMaRKkiSpdQypkiRJah1DqiRJklrHkCpJkqTWMaRKkiSpdQypkiRJah1DqiRJklrHh/lLkqRW2LRq3VPLxz1w8whbojZwJFWSJEmtY0iVJElS6xhSJUmSFsCtf3v3qJswVgypkiRJah1DqiRJklrHkCpJkjQknVP8nuofnCFVkiRpiPoFVMNrbwOF1Ig4KyJuiYgrI+KLEbGsT91lEfGFiLg6Im6KiLd3lT87Ij4XEQ9ExEMR8Z8j4llddU6LiOya7hjsJ0qSJGncNA6pEXEGcDnw6cx8M/AK4Lo+m1wLnAb8FvAfgMsi4m2V8i8DZwMrgRXAO4FLuvZxZs1+r2jaZkmSNCE2XlZMLdI9Cuqo6PwaZCT1gnK+tZxvAU6KiBO7K0bECcDJwH2Z+SRwb1n0obJ8LfAXwIHA0cA9ZfkbK/s4GDgAOKZr+uwAbZYkSRqauqDqdajzo9FrUSPiKIowCbCrnGc5PwXY0LXJuh51j4yI1cAdmbmxXHdvRFwCXAzcX9nHm4DXAC8H/o5iZPbzmbkLSZKkEZptFLVXQO23/pUvfP5TczUfSV1TWZ7pKlvdp353XYDVNUFz/3L+pcq6t5XtOwg4Afgk8J2IWFXXwIg4NyKmO9POnTvrqkmSJGkMNA2pB1WWs6tsRZ/63XV71X8VcDdwEUBE7EdxWv/9wKXAQ2W9Y4FP1TUwMy/OzKnOtHTp0rpqkiRJe2VY16J6mcCeGp3uBx7tU/bI3tSPiJcCLwROyMxHATJzhsqoakT8HvA+4MMUN2NJkiRpgjUdSd3cZ5s7+9Sv2/9T9SPi2cB/Al6dmXeV6572WKvMfCwzL6B4IsDjDdssSZIW2sbLYMd9xbQXNq1ax6ZV62avuIgsthHWpiH1Lnbfod99Hn19RLw2Im6MiFPLdTf0qLslM+8BiIigeKTVZ4CfRcRxEfFW4F+W5YdHxIqu7b8NfKNhmyVJkvbaYguHbdHodH9mZkScD/wpxY1S/xs4ArgtMzdExCaK60WPAG4s190MvDgi9i3XA3yksts/AN5QTlVry2tS7wb2iYgLKe78D+DFFNepSpIkDU33XfbDDKqG4HpNr0klM6+KiOXAeRHxFuB24N1l8TXAkcDVlU1Op3g4//XAEuDMzLwCICLeBJxf8zU/BzZl5kwZTs+huA71dODrwHsy80fNf54kSVJ/nZDoo5/apXFIBcjMzwOfr1n/h8Afdq17GDijx36uAq6a5bsuorzbX5IkaaGN8pmlPi91sDdOSZIkLSqeih8dQ6okSVLJUNoeA53ulyRJqrNpR7kwZo+NqoZSH6bfLoZUSZIWs20bdy8fsXZu+9h42diF03Gw2G/o8nS/JElaePs9s5ikHhxJlSRJC2MOobT61qnjHri5GLXtWHv2nJvi3fPt50iqJEkanc6I6gKOqo77Nafj3v6mDKmSJGnRqLtRahyMU1vni6f7JUnS4Kqn3fsZZIS0Wndm12Dt0cRxJFWSJC0KkzAaOQm/oSlDqiRJmh/V60tbdPf+uAe7uvaP+29qwpAqSZLaZw4ht9/D+BdDqJs0XpMqSZIKnQf7N3yo/6ZV6xZ0tPRpj6OqsZjfGjVpj9UypEqSNAnm481RTfa78TJYPlVMoz6dX7l569ZnnDjChoxOdzCdpKBqSJUkabGpBs9ByrdtLMLpQvKO/556jRZPSlA1pEqS1DbzOSo6WyAdI5u//yPWPOeQp62/9eFVsHIEDWqJSb20wZAqSVKb1YXManCtK5+kYLp1O2ued1jP8lsfXrWArdFCMqRKkjQqcw2TCx1Cf/qjhf2+Gpu3bofY96nT/5tWrWPz938EB8LBj02PuHUaBkOqJEnzoe4U/QSNaEoLzZAqSdJc9bvBaNy1YPS0Tt3p/wcPnILHi+WD9x9Bo1psnG+iMqRKktRLrxuYJiGE1mlZMN28dfse8z3W5xN7Vn58ZzHfMQ0r1yxE81ptEm6mMqTOl5b9hy1JamBYzxYdNwP+Ddv0rF+Z9yZ0gmi/m6T2EPvuXq4E1s6oqiOq48+QKklaXOZ6in7SRk9bOrgycFhVI52R1XE69W9IlSRNhtle6TlpIXPCdZ/iH9SDj9evX4wjrON66t+QKkkaX7M9I3Qxn8Kvs8Cjp/2ecVot29tA+tSp/85p/871qfsvfVrVanitC6yzlY+7cRpRNaRKksbLICOii3n0tCWn87tP389rON1LvUZbZyvvhFdHa4fLkCpJGp3FHCKHYYjBtN/NUtWwueZ5h9WOoI46kM6n2cLtbNtVQ+ykj9zuDUOqJGn4DKPzoyWjox39wmi/UDq0wNp9x3/ntD/UnvoflrmO0C6kcXh+qiFVkjSYQQKn14TOXQsCaa/R02rInKQR0lGrC6+LeXR1oJAaEWcBvwXcD8wAv5uZj/Souwz4NHAgcBDwlcy8fJD9RcQxwKeA+4DnAh/OzG8P0mZJ0ggt1hHUasBcckj9+ha4dfszeOVhP69dD7DqnxSfx/KxUC0ZVd1bTUZdJzXINg6pEXEGcDnw+sy8PiLuA64DXtFjk2uB44FDgZOAWyJiJjOvaLK/iDgY+CZwe2a+IyIuBNZHxNrMvHPgXypJ0jDMFjxbFkxn89UfHvZUOniw7k1P46j77v8JM9cR2Lbf6T/ISOoF5XxrOd8CnBQRJ2bmhmrFiDgBOBn4XmY+GRH3lkUfAq5ouL9zKAJup/xeYAnwXuCsAdotSVqsxiwg1o1s9hrt7JQBe5R31nV75WE/f1rZg/sdwld/uDctHjOdsPrzx4r5PvsU830PKOZP/Gx33c46jUyjkBoRRwFHlx93lfMs56cAG7o2Wdej7pERsRp4osH+eu3jlCZtliS1QItCYl2gG2Tbftv1CpcdnbJOvWpburetC5m99lVX/uB+lcsLKr76Q7wTpduTT5bzx/Zcv88+ewbW2VQDbWe7EYfcQZ4a0NYR1aaH65rK8kxX2eo+9bvr1tXvtb9e+zg8IpZk5k+rKyPiXODcyqonIuL+mu/X3CwFds5aS4OyX4fHvh0e+3Y47NfhsW+HZ2/7dmWvgqYh9aDKcnaVrehTv7tup3402N9s+9gjpGbmxcDFNXU1DyJiOjOnRt2OSWO/Do99Ozz27XDYr8Nj3w7PMPt2n4b1Hu1TVnd3/2z1m+xv0O+UJEnShGgaUjf32abuTvtO/br939lwf732MZ2ZDtlLkiRNsKYh9S6Ku+uhuPagan1EvDYiboyIU8t1N/SouyUz75ltf7PsYz0aBS+lGA77dXjs2+Gxb4fDfh0e+3Z4hta3kVl3yWdNxYg3AX8K/EZmXhsR/xfYnpknRsQm4FhgU2b+Uln/JuDFwCHAS4BvAWd3Hujfb39l+cHAHcB3M/N1EfFB4HzgRZl5x3x1gCRJktqncUgFiIh3Am8FHgAeBt6dmTsj4nzg/cBHM/OjZd0VwCUUN0AtAb7QeZD/bPurlL8A+BzFG6kOBd6bmd+Zyw+VJEnS+BgopEqSJEkLoek1qZIkSdKCMaTqKRFxfESsj4ifRMQ9EfHmmjrvL2+SuzYiLo6IZ3SVvywibomIyyLihoj4xYX7Be0UEesi4rsR8dOI+JuIWFdTZ2NEZNf0G5Vy+7VGw771mJ2jiDg0Ii6IiO/1KPe4naMGfetxu5ci4tia4/PHEbF/pU7fflZvEXFWeQxeGRFfjIhl8/4lmenkBPCLwN8AX6e4PjgpXl97bKXOB8v1xwEHAo9TXGvcKX8+xVsnPlF+/hOK640PGfXvG2G//irws7Kvspx2Ab9QqfOCSllnehA4wH7d6771mJ1b3z4P+BjFS1OS4tF/3XU8bofXtx6389PXH685Ri9p2s9Offv2jLLvXld+vg/48/n+HkdS1fFLwK9m5uuBF1G8jnYfytfURsSBwL8t627NzMeA7cDbIuJ55fr3Ac8Ctpaf76V43dk5C9D+tvp1ilf8HkjxdAqAA8r1HWcC/wI4pjK9MDM7L462X+v17VuP2b2yLTPfD1zep86ZeNzORd++9bidHxGxL/DL7Hl8HgN8oCxv0s/q7YJyvrWcbwFOiogT5/NLDKkCIDOvrvxxuQf4CcULFW4q170UeHa5vKuzGcUrbl9Vfl5XUw5wyjDaPCY+nJlbMvMJ4KPAjnL9/fDU/0jPAK4G/oziKRnPyszvV/Zhv9br27d4zM5ZZv68XPxxXbnH7dzN1rd43M6XVwMnAX8FfAl4A3B/ZnbeWNmkn1UjIo4Cji4/DvUY3G8+d6Z2ioglwP6zVNuZmTPlH58LgBXAOzPzp2X5mkrdma5tV5ff8Zxe5YO3uv2a9mvX5/0p/gFwXfn5FOCwcvmocnpzRPx+Zn5mMfYrzFvfeszWGOT/B33KPW5rzFPfetzOomE/v50icC6jODv4IuC3I+LkzLyLWfp5vto6oRas7xxJXRz+K/DQLNMJZUC9lOIUSABXR8Rvlvs4qLOzLC9AqVhRLWf3v6iq5ZOoUb9W6r+c4tT0eZn5cLluO/AuipHAb5Xr9gU+GRHHsDj7Feanbz1m6w3at3U8buvNR9963M6uST9fBbwH+Czwg3K7KeAL5fJs/azeFuwYdCR1cXgrxfVj/TxR/od6VkRcAlxPMVJyHvAV4NE+2z7SoHwSNerXyvIHKS7av7SzIjP/GvjrzueI+GWKN7GtAU4F/rjPvie1X2Ee+haP2V4G7dun8bjtaa/7Fo/bJgb5m0ZE/D5wNvAZ4KVRvNHSfpy7Bes7R1IXgcx8MjNnZpmyUv+7wO+UH59Vzjd3yiOi+7i5sxy96lwL+LTy+fs17TFIv0bEuyjuvv2d8nPtozrKP/7/nOJfp48vxn6Feetbj9kag/7/oOE+PW6Zt771uJ3FHP6mzWTm54GLylWPM0s/D/9XjLXNleWh9p0hVQBExD+KiKisurmc31rOb2P3NYBLK/WyUufGmnKA9fPVznEUES+huAv6E8ALIuJFFP+iJyL2jYg9ruHJzLuB/wfcUK6yX3vo17d4zA6Nx+1QedzOg4g4JCJWdq3+NvCXWbx+vUk/q95dFE+UgCEfg4ZUERG/QPHH5ZsR0bkg/yXA3wMXAmTmoxTXn0Fx8f7+FI88+XJmbi3X/xHFaYDOH68jKO5g/S/D/g1tFRFTwH+juBv3b4FNwP8CDi6rvBe4OyK+HRHHl9ucAnwmMzv/E7Bfa8zWtx6z8+KZ5bz7JhWP271X27cet/PmW8APIuITEbGsvOfiVOBfQ+N+Vo1ylLrz2L/qMXhbZm6Yz++KAc/qaAKVz4u7jOKRHftRjIRsAf5jZj5UqRfAv6N4PMdPgP8DfKDySBUi4p9RPKh6muLi6nMy8+8X6Ke0SkQ8E/gfwPE1xX+QmR+KiOcDnwNeDDwJfAP4SmZ+vWtf9mtFk74t63nMzkFEHAacTPGP1OeWq/898LXMvNPjdu5m69uyjsftXoqIMymeifpcigfNXwd8rhpAm/SzeouId1JcH/wAxUuA3l2OUs/fdxhSJUmS1Dae7pckSVLrGFIlSZLUOoZUSZIktY4hVZIkSa1jSJUkSVLrGFIlSZLUOoZUSZIktY4hVZIkSa1jSJUkSVLrGFIlSZLUOv8fOMwqt124o8IAAAAASUVORK5CYII=", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + } + ], + "source": [ + "plot_ours_spectrum('hlff', [-150, 0, 0, 0.06])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## SEM" + ] + }, + { + "cell_type": "code", + "execution_count": 133, + "metadata": {}, + "outputs": [], + "source": [ + "# llfs\n", + "llfs_id_score_list = []\n", + "for id_dataset in id_datasets:\n", + " feature_dict = np.load(f'./digits_baseline/digits_baseline_llfs/{id_dataset}.npz')\n", + " llfs_id_score_list.extend(feature_dict['conf'])\n", + "llfs_id_score_list = np.array(llfs_id_score_list)\n", + "llfs_id_score_list = np.log2(llfs_id_score_list + 1e-100)\n", + "\n", + "llfs_csid_score_list = []\n", + "for csid_dataset in csid_datasets:\n", + " feature_dict = np.load(f'./digits_baseline/digits_baseline_llfs/{csid_dataset}.npz')\n", + " llfs_csid_score_list.extend(feature_dict['conf'])\n", + "llfs_csid_score_list = np.array(llfs_csid_score_list)\n", + "llfs_csid_score_list = np.log2(llfs_csid_score_list + 1e-100)\n", + "\n", + "llfs_nearood_score_list = []\n", + "for nearood_dataset in nearood_datasets:\n", + " feature_dict = np.load(f'./digits_baseline/digits_baseline_llfs/{nearood_dataset}.npz')\n", + " llfs_nearood_score_list.extend(feature_dict['conf'])\n", + "llfs_nearood_score_list = np.array(llfs_nearood_score_list)\n", + "llfs_nearood_score_list = np.log2(llfs_nearood_score_list + 1e-100)\n", + "\n", + "llfs_farood_score_list = []\n", + "for farood_dataset in farood_datasets:\n", + " feature_dict = np.load(f'./digits_baseline/digits_baseline_llfs/{farood_dataset}.npz')\n", + " llfs_farood_score_list.extend(feature_dict['conf'])\n", + "llfs_farood_score_list = np.array(llfs_farood_score_list)\n", + "llfs_farood_score_list = np.log2(llfs_farood_score_list + 1e-100)\n", + "\n", + "\n", + "# hlff\n", + "hlff_id_score_list = []\n", + "for id_dataset in id_datasets:\n", + " feature_dict = np.load(f'./digits_baseline/digits_baseline_hlff/{id_dataset}.npz')\n", + " hlff_id_score_list.extend(feature_dict['conf'])\n", + "hlff_id_score_list = np.array(hlff_id_score_list)\n", + "hlff_id_score_list = np.log2(hlff_id_score_list + 1e-100)\n", + "\n", + "hlff_csid_score_list = []\n", + "for csid_dataset in csid_datasets:\n", + " feature_dict = np.load(f'./digits_baseline/digits_baseline_hlff/{csid_dataset}.npz')\n", + " hlff_csid_score_list.extend(feature_dict['conf'])\n", + "hlff_csid_score_list = np.array(hlff_csid_score_list)\n", + "hlff_csid_score_list = np.log2(hlff_csid_score_list + 1e-100)\n", + "\n", + "hlff_nearood_score_list = []\n", + "for nearood_dataset in nearood_datasets:\n", + " feature_dict = np.load(f'./digits_baseline/digits_baseline_hlff/{nearood_dataset}.npz')\n", + " hlff_nearood_score_list.extend(feature_dict['conf'])\n", + "hlff_nearood_score_list = np.array(hlff_nearood_score_list)\n", + "hlff_nearood_score_list = np.log2(hlff_nearood_score_list + 1e-100)\n", + "\n", + "hlff_farood_score_list = []\n", + "for farood_dataset in farood_datasets:\n", + " feature_dict = np.load(f'./digits_baseline/digits_baseline_hlff/{farood_dataset}.npz')\n", + " hlff_farood_score_list.extend(feature_dict['conf'])\n", + "hlff_farood_score_list = np.array(hlff_farood_score_list)\n", + "hlff_farood_score_list = np.log2(hlff_farood_score_list + 1e-100)" + ] + }, + { + "cell_type": "code", + "execution_count": 171, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAnwAAADFCAYAAADOty/CAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjMuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8vihELAAAACXBIWXMAAAxOAAAMTgF/d4wjAAAJmElEQVR4nO3dzWsc5x3A8UeyJVgwiV8UsAOlbqF2oVQ5uJceTA0hECgpvfYv6DF/i4655U8o8aG+GBRa0MU5RMUQueAXDLJpFbspDQLJlnowT/p4OrM7u6vdnfnp8wGh1eyLxpfV179nZnbp+Pg4AQAQ1/KidwAAgNkSfAAAwQk+AIDgBB8AQHCCDwAgOMEHABCc4AMACE7wAQAEJ/gAAIITfAAAwQk+AIDgBB8AQHCCDwAgOMEHABCc4AMACE7wAUAPbe6l24veB/pD8AEABCf4AACCE3wAAMEJPgCA4AQfAEBwgg8AIDjBBwAQnOADAAhO8AEABCf4AACCE3wAAMEJPgCA4AQfAEBwgg8AIDjBBwAQnOADAAhO8AEABCf4AACCE3wAAMEJPgCA4AQfAEBwgg8AIDjBBwAQnOADAAhO8AEABCf4AACCE3wAAMEJPgCA4AQfAEBwgg8AIDjBBwAQnOADAAhO8AEABCf4AACCE3wAAMEJPgCA4AQfAEBwgg8AIDjBBwAQnOADAAhO8AEABCf4AACCE3wAAMEJPgCA4AQfAEBwgg8AIDjBBwAQnOADAAhO8AEABCf4AACCE3wAAMEJPgCA4AQfAEBwgg8AIDjBBwAQnOADAAhO8AEABCf4AACCE3wAAMEJPgCA4AQfAEBwgg8AIDjBBwAQnOADAAhO8AEABCf4AACCE3wAAMEJPgCA4AQfAEBwgg8AIDjBBwAQnOADAAhO8AEABCf4AACCE3wAAMEJPgCA4AQfAEBwgg8AIDjBBwAQnOADAAhO8AEABCf4AACCE3wAAMEJPgCA4AQfAEBwgg8AIDjBBwAQnOADAAhO8AEABCf4AACCE3wAAMEJPgCA4AQfAEBwgg8AIDjBBwAQnOADAAhO8AEABCf4AKCnNvfS7UXvA/0g+AAAghN8AADBCT4AgOAEHwBAcIIPACA4wQcAEJzgAwAITvABAAQn+AAAghN8AADBCT4AgOAEHwBAcIIPACA4wQcAEJzgAwAITvABAAQn+AAAghN8ABDA5l66veh9oLsEHwB00dbGbtra2G3z0Bx7oo8mgg8AIDjBBwA9ZqpHG4IPALqmXMptuawLw8QMvkeb9xa9CwCwCJt76bapH1Uxgw8A+spEjxkQfADQdSKQKQk+AIDgBB8AQHCCDwC6YtjS7ZjLuk7coCT4AACCE3wAAMEJPgDoAmfiMkOCDwAWTewxY4IPABZhksgThkxI8AFAUM7UJRN8ALAoJnbMieADAAhO8AHAIpnyMQeCDwD6xCduMAHBBwAQnOADAAhO8AEABCf4AKBvnOjBmAQfAEBwgg8AgnOmLoIPAOitjS++/GzR+9AHgg8AeujC908+uvD9k48WvR/ztPHFl58JvMkIPgCgc0aFXVP8CcJ6gg8A6IyTDjYB+IbgA4BToCsnbowbYHWTvDavIfTeJvgAYN5cR28uRN//CL7s0ea9Re8CAB3W0b8T45y40ZUpX51p40zcDXd20TsAAOGZ6E1kkuXfpu2f/u43fzyZveonEz4AYCEmOTaPyQg+AJilOUz3Ttv1+KrahONpv4SL4AOAWZlB7H39/u8vTvP8Lh/Hx+w4hg8AZmGK2Cuj7oPdP73I28rb0+za5l66fWstfTLNa4wjT9LycXTlZM00bj5M+ADgJG1t7J7kZK+Mu2Ghd9qXdRlO8KXU2VPtAeiZGR2vN+1Er84slnbLiyRXv3fBaf4sXsEHACdhyiXcHHWTxN3SymB10t89T12MrS7u0yw4hg8AOmKaSV4ZfedfvbyZUkr/OnvhL+Xt6nPmfSxfV5yWyCsJPgCY1gmdoNHG0spg9fhw/6DuvosH//jt0fLKvyfdl7byhYyjhFPdhZmjXazZki4ATGOS2Lt0bW2SXzXu0m2e7lVvl9oeyzfOMXldPH7vtDPhKz3avJd+cutXi94NADoq/53IJ/s9//r9xsdeuraWvn2wN+zltn/8h8tLKaXqxC6H3fHh/kFT5DVtXz46fKdpyldGX90Sb51hH1fW5vl9EW2iV2XC5wxdgP6rvpe3eW8vHzPO87c2dtPh/no63F9Pl66t/fBVp3pf8XO5lLu0Mlgtv8rtdS97EidplPE37pTvtIh0Vu/pnvCJPYDYyvf5vILTFHp1fxPKbYf762llsD007r59sFd7f822WZ5ZO2zKVzr/6uXNPOn7/K/3n1z9+S+2U0rp1lr6JErojGPUsYl9ngKe7uADoB+q4ZZ/Lm+PmtKN+5/8uscf7q8PfU6LY/O2z92Yy9/eavTtPHw6uP7TH+1XH1dGX0opPf7m/vrnKT1JKd1JKaV3L659/N2LvTvz2OcuiBq6lnQBWKymyVr51fT4Wa7U5GXb/BXAzsOng52HTwf5dv5+/tXLm4Pl15cff3P/h3/nuxfXPl7UfnZNhI96M+EDYHp1J72VJzhUl1PLyVy5vQtmGHfzmu5lecqX466qGn+D5deX94/OPM/35+jL38tJn8lfv8QJvnxa/K8/bT5jqtSlNxeArmi6WkFdtDU9v+7naZdXZyEfk5dvz9AsQ2/n8bOl61evHDfdv3x0+E5K6fCt5zQEYEpvoi+llMrwy+rCr7zvuxd7d6KHYF/DL07wATBa3UkM1e1NcTcq0roQccPURV3HQ29YzI0KvfL+5XS8epSWai/W3KQ67StVJ3+jto+SA7GMxnJ7k67EZR9O5hB8AH00Kq6qS6bjvkbX461Ox46zO6mp3s7jZ0sppXT96pXjHHF5W/Ux+XHl9jL6UkppnPDL076U6id+J6UMxKbbpTIKmx7XhRDsktMZfH18IwNimeRC7+O8d0V4n8sBtzLY7lrMNdk+d+Ps+n++elXGXtMkrs2Eru7n6va2z8smmfalNHziN29tonDYlLHNFLHNlLFPy7vdC76tjd3Wx+EB9F3d5UaalloXqTzebdhjqspj5CYNtw7H3t1nq2fe+9kv3wqqpsleGXhlhJVTu/J7m98/Kv6a5Glf1jYAy4lftn905nmXYrCNNsFY3h9hWti94BvHFB9W3cjHqwGztrWxmy5/8Ob9q5xiPfjzQVoZbKdHm/dGBtIipl7Txtoc9vfus9UzH145eD1se75999nqmZRSytu3z904+8+//+34wysHr6sTusZfuJLSeyn931Jrddm1aTpX3d52infSqgFYGhWDOQLrYrCNHIo5GqsnjZTbFxWVbY8pLC/c3LVj+paOj1v9JwIAgJ5y4WUAgOAEHwBAcIIPACA4wQcAEJzgAwAITvABAAQn+AAAgvsvWY/i5wt5OqUAAAAASUVORK5CYII=", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + } + ], + "source": [ + "font = {'family': \"Times New Roman\", 'weight' : 'bold', 'size' : 15}\n", + "plt.rc('font', **font)\n", + "plt.figure(figsize=(30, 3), dpi=200)\n", + "\n", + "n_bins = 500\n", + "\n", + "alpha = -0.03\n", + "\n", + "figure(figsize=(10, 3), dpi=80)\n", + "plt.hist(hlff_farood_score_list - alpha * llfs_farood_score_list - 10, n_bins, density=True, \n", + " weights=np.ones(len(farood_score_list)) / len(farood_score_list), \n", + " facecolor='#FFC690', alpha=0.9)\n", + "\n", + "plt.hist(hlff_nearood_score_list - alpha * llfs_nearood_score_list, n_bins, density=True, \n", + " weights=np.ones(len(nearood_score_list)) / len(nearood_score_list), \n", + " facecolor='#FFDEBF', alpha=0.8)\n", + "\n", + "plt.hist(hlff_csid_score_list - alpha * llfs_csid_score_list, n_bins, density=True, \n", + " weights=np.ones(len(csid_score_list)) / len(csid_score_list), \n", + " facecolor='#BFEBFF', alpha=0.7)\n", + "\n", + "plt.hist(hlff_id_score_list - alpha * llfs_id_score_list, n_bins, density=True, \n", + " weights=np.ones(len(id_score_list)) / len(id_score_list), \n", + " facecolor='#90B1C0', alpha=0.6)\n", + "\n", + "\n", + "# plt.xlim(-160, 0)\n", + "# plt.ylim(0, 0.1)\n", + "plt.axis('off')\n", + "\n", + "plt.savefig(f'./ours.pdf')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Expected Case" + ] + }, + { + "cell_type": "code", + "execution_count": 182, + "metadata": {}, + "outputs": [], + "source": [ + "x1 = np.random.normal(loc=0.0, scale=0.5, size=10000)\n", + "x2 = np.random.normal(loc=1.0, scale=0.5, size=10000)\n", + "x3 = np.random.normal(loc=3.0, scale=0.5, size=10000)\n", + "x4 = np.random.normal(loc=4.2, scale=0.5, size=10000)" + ] + }, + { + "cell_type": "code", + "execution_count": 183, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAnwAAADFCAYAAADOty/CAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjMuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8vihELAAAACXBIWXMAAAxOAAAMTgF/d4wjAAAM2klEQVR4nO3dPYsd1x0G8GujFIIQG1uVu4BxuS6SxoVJquAqXyOlPsuW+hpS4XZdbaPCGASKi0gQWBIwKJ0gMlEKeazZ2Zm5Z97P+Z/fD8S+3Lmzc1U9POc/Zz54+/btCQCAuD48+gIAANiWwAcAEJzABwAQnMAHABCcwAcAEJzABwAQnMAHABCcwAcAEJzABwAQnMAHABCcwAcAEJzABwAQnMAHABCcwAcAEJzABwAQnMAHe3lx9fToSwCgTgIfAEBwAh8AQHACHwBAcAIfAEBwAh8AQHACHwBAcAIfAEBwAh8AQHACHwBAcAIfAEBwAh8AQHACHwBAcAIfAEBwAh8AQHACH+TmxdXToy8BgFgEPgCA4AQ+2Iqmjg1c/XR6MvYzQB+BDwAgOIEPIBCNH9BH4GM715c3R18CACDwAWRLWwesReAjBm0iAAwS+AAAghP4YEtTt2axlQs91ljatTx82+Xj7x4dfQ2wJ4EPACA4gY8ymNEDNqDpoxYCHwBAcAIfQDDm9YAugY/jWKYFgF0IfHC05s5cd+jCrszvUROBjzJpBwEgmcAHOZjb7mkFi7F0rm6tuTzzfVAngY88aOwAYDMCH6TQpLGSpmE7smmrseVr5vXM7VErgQ8AIDiBj/VYll2PRrE6Q61bjW3cUlo8uEvgAwAITuBjvimNnvYPBmnxgK0JfAAAwQl85K9pB7WEADCLwAcAEJzAxza0cbCbvhnAlP3+zA5CPQQ+WGLJ9il7bL1iexcATgIfANwydx8/+/+RM4GPdHOXacfe130t6lKwpo2JxpZpAaYS+ACoSmoTp7EjEoGPbUVt7ObS9AFwAIEPgCqkNHZjxzSvXT7+7pH2j9IIfKzrXKNXY+O3RavXPafmsCpm+YCpBD4AQlizdZt6Lo0fuRP44AgauWpMbeO0d8AWBD6mWWNJdugcNS73ArMtncmbcxyUSuCDkmgGi5Ha1M1t9LZoAofOmUPrOCeQCXHwnsDHOqa2c+3jp7xXCwgAkwl8sNTRrdvRfx8ycESbp0GkJPeOvgAKpGUDgKJo+GBvUxs5DR6MWqtpMydIZAIfAEBwAh9E0m4DNYP8IuUu2xzuxO2jQYN1CHwAAMEJfPRbcmPG0ps6cr4p5MjWrPnbKdeg3dtVLu3YGtdx1GeZ2+QNvW/K+dbcwHnofZpKjibwAQAEJ/DVLuc2bcvHuAFZWtqE5dSk5XQtIPDBGuYuoVp6BWAHAh95apq5sYbu+vJGCwjxld76Hf334XQS+OCdKU1byTdNvLh6mu21scjUmy3ax+dy0wmwHYEPgCK0m7LcW7PU68v9cxCHwMd555ZVj3buGnK4xi0MNXWpDaSmbzOpjVlJzVpJ1wrcJfABUIwcG7Ecrwm6BD7ei9qEzbGk/dKcVWtps9f8PmJDCBxL4AOAFWn8yJHAx372aBDX+Bs5NXR7XUtOnzmgo5o4DSDQEPgAYCXaPXIl8MFcW7dia55fgzdbjS1ZjZ8Zort39AWQmevLm9NXDz+79XNucrwmICSNHVFo+GCOvsbM3nbVidyERf5sUCOBDwASafwolcAHbbk2dLleF1Waul8gcDyBL7Ips25L5uLM1AFA1gQ+SJV7y5b79ZElLR3UQeAjnqZx1DxCEczFwfYEPoAJNGJAiQQ+GGOZFFiRNpOjCHzEXPqM+JkAYCaBD2pms+hJli7nHr0cPPXvTzn+3LF9zZa2K43/J9bg0Wq1itiARfxMALACDR8Au9Nawb4EPuphuxYAKiXwwadfPDj6ErLQzPKZ6WPA1jOI3dYvWgvY/jzRPhv5E/gAAIIT+GqTspxZ05Ln0Gd98/pi5yvZjsZuU03r1f0aVd/ni/6Z13au3dP+sQWBDwAgONuyQCN6Ezbl8724enr6/Z//+OtXZoncfM1tNLVX7/XN9D3865/+dtwVEZmGDwAgOIGvBteXN1XN5fU5126NzexFb/6gMFpCmE7gAwAITuCrSfSWb2g/ve7vU/fd+/Hb/y6+JkKKPJvXVdNnhcgEPgCA4AQ+6mIebx7/b0zUbgbN3MHxBL7o+pZxoy/tAsUQBmEfAh/wnibvlrG95sy2ASUR+ErUNHSaunSRHpUGABMJfAAAwQl8DIvcIL64ejpr+VJTWC1LuGnGbtYwrwfHEfgAAIIT+Khb6ibMVEGLN8/L589Wbb5rawJr+7wcQ+CLZqvn5kZe3m2zZAtAQAIfwEm7t7am9Wu3Vx998uCb1PdrvW5r/3/4v2EOgS+SWlq4rWn5AAhG4AP6NXcxV7AZc7fd0/b1mzur19fsTWn7uEvLx1QCH3Vp2jstHgAVEfiAeYI2f9q9afpaP+3TeuxlyFoEvpJMmdEzz3det+XT+gEQlMAHzFd4y6fNm27pnnvt2T1zfHdp8NiKwAcAEJzAV5pmqdaS7XmeorGvQts+Ld86Xj5/duH/EvIl8AEABCfwla7Wpm+v9s6NHLcV1uJpnI5hNm8d5vlYk8AHABCcwFeClBav1qavq2n+ul+738+h7YOzlt7Fyzt97Z49+VhC4AMACE7go2zuxD3ei6unpc32sQ3t3v5SmkA4nU6ne0dfAGTPUi4AhRP4ACAAzR5jLOkSg6Xd7bWXbQtawu3bmiVluxZbugCRCHwAAMEJfMSj7aNjbssHEIXABwAQnMAHUw3dtdv9fa1399qmhR5jj1vzKDbYnsAHwCT22yuDu3ZpE/gox9Bsnpm9fRXa3pnZK4O2D7Yh8MEcb15fjC7Z1rqcSxgvnz+70ORBHAIfx1rS2mn28rbzvn2pDZ6mbz8fffLgm77Grv07jR7sQ+CDPWj8CEgDmAezeqQQ+IAwtHcA/QS+XF1f3mxyLOtqmrvu175jIDN9DV1qa6fdK48msG4CH3kzp1eWsVm9nffn83SN8tirD7Yj8AFwOp2WNX5A3gS+Uli2fSfHxs+S7bC+Vm/llk9rF0O3wdPowboEPoDKtFu7lAZPy1c2s3ucTgIfR0pt67rH5djyMc3Ke/Rp+eLS9ME6BD4AzrZ4Wr6yaPXoEviO1DeXZ1avfO2Zvu58n3k/AA4g8JGfZsm2vXRrGTeeBUu5Vz+dnljGrUezrGt5d9gajZ5WMDaBDwAgOIHvCJZysbSbZkYLqP0bZ689qJPABwAQnMC3h+vLm1v/xo7r+xrF0Bxe38xe7WpsAHd87FqtSntOrpm95Zq5vO5X6iPwAUBF+kKfIBifwJeDaE0e89XY7LV1NmT+/v6Xn48dblYvJs0erE/gA4DKXD7+7tHcVk8bWCaBbwvtxm6ovUud09P+xTW1zYvY/jWN3rn5vdTjqEJ7X76+NlBDCHcJfAAB5XLjBXkbauu0ePEIfOzr3B257tSt2vf3v/x86lzexz+/+nqr66E82j3oJ/AtFXULFY4TcemWQ2n7SNVt9s79TDkEPuBwd+7Gbc3qff+vV/8ZPI5qpczunWv7tIHUROBLob1jDXOaO20fv1ja0mn5GKK1q4PAxzJrzdyZ3aOlr8lrN32n0+3Zvb45vnOvU4/u3byaPWok8G1JM0ifdmuX0uD1Ha/5o2Nqg6fxY0jfY9iW7NtHHgQ+1vfpFw9+/Tf0OgxYu43T7sWnsYPzBD6AnW3drmnv2IPGrywC3zk1L8umNnFjbd6U89TqzeuLTZdoVz732nfKuvOWXAw9uQMiEPgACtU0eWONnraPLYy1e5q/PAl8fVJbve5x15c3NmJmtrEmLvXGDTd1ANBD4KvZ0mXYKUu1lnWLlrLsmro02z5u6Pux91z9dHoydiNGymvdY3K4saPdxM1t5brvq6ndS1mK7VuybX62lDtP945eT+bI172jLyA7Yw1d87uvHn7W+3s4UrfVO9cY/ub+D1tfEgB50PBFtmar1j7X2HYrmrxqTGnnmn8px587ZqiN+/jnV1/Pbf721J27O9fKDbV0qe+PJrWJW+Oxalq/d861dEOva/fyUkfgG2vrhl4/dw6tHqXpzvcNzfstnf8zPwiQnToCX1RNmzZnFq/93r7vp5wr5RopQu8jzRLau/bPP/z2D/faP6eea46xtm/o9e4j186dY861bPXUi+jt3VH65vr6HsXW1/h5ZNt73SdzjB2X0v4NzQUyj8AHABBcfoFvaKl0bMuTKduotM/Tfe/c7Vj21tfELb1jdmhGz+PRNtduxG5Jfc5ud+Pm1vftc3e/v9PEvXl98eH/3vyufZ65DV23+Tva3//xz/vd3zWNXPNauxXsNoT//vGHv8xpA18+f3Yxtlfeuf3zNHrLLW3d+tq9sUZwauPXvHfs2NKbw3ZT19cC9t3ZO3T3b99rQ+dObRz7rnWpHFvJ/AIfAACr+uDt27dHXwMAABvS8AEABCfwAQAEJ/ABAAQn8AEABCfwAQAEJ/ABAAQn8AEABPd/6kWhTJXLAJwAAAAASUVORK5CYII=", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + } + ], + "source": [ + "font = {'family': \"Times New Roman\", 'weight' : 'bold', 'size' : 15}\n", + "plt.rc('font', **font)\n", + "plt.figure(figsize=(30, 3), dpi=200)\n", + "\n", + "n_bins = 500\n", + "\n", + "figure(figsize=(10, 3), dpi=80)\n", + "plt.hist(x1, n_bins, density=True, facecolor='#FFC690', alpha=0.9)\n", + "\n", + "plt.hist(x2, n_bins, density=True, facecolor='#FFDEBF', alpha=0.8)\n", + "\n", + "plt.hist(x3, n_bins, density=True, facecolor='#BFEBFF', alpha=0.7)\n", + "\n", + "plt.hist(x4, n_bins, density=True, facecolor='#90B1C0', alpha=0.6)\n", + "\n", + "plt.axis('off')\n", + "\n", + "plt.savefig(f'./perfect.pdf')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# T-SNE Plot" + ] + }, + { + "cell_type": "code", + "execution_count": 22, + "metadata": {}, + "outputs": [], + "source": [ + "idx_list = np.load('idx.npy')" + ] + }, + { + "cell_type": "code", + "execution_count": 41, + "metadata": {}, + "outputs": [], + "source": [ + "color_dict = {0: '#90B1C0', 1: '#BFEBFF', 2: '#BFEBFF', \n", + " 3: '#FFDEBF', 4: '#FFDEBF', 5: '#FFC690', 6: '#FFC690', 7: '#FFC690'}\n", + "color_list = [color_dict[i] for i in idx_list]" + ] + }, + { + "cell_type": "code", + "execution_count": 55, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYQAAAE+CAYAAACN7GfAAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjMuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8vihELAAAACXBIWXMAAAxOAAAMTgF/d4wjAAD2sklEQVR4nOydd5wcd3n/398pW6839bKqlmRL7rZssBYwGFNE772FJBRBIAGSXyAJEJJQLwkECAmEXkIA0Q02K7AtG3c1W3XVdbre9rbNzPf3x7N7u3e310/F9nxer7O8OzPf+c7szPN8n/Z5lNYaHz58+PDhw7jQE/Dhw4cPHxcHfIXgw4cPHz4AXyH48OHDh48CfIXgw4cPHz4AXyH48OHDh48CfIXgw4cPHz4AXyH48OHDh48CfIXgw4cPHz4AXyH48OHDh48CfIXgw4cPHz4AXyH48OHDh48CfIXgw4cPHz4AXyH48OHDh48CfIXgw4cPHz4AXyH48OHDh48CfIXgw4cPHz4AXyH48OHDh48CfIXgw4cPHz4AsC70BHw8udG6fcergNcDR4D3btu6JXeBp+TDx5MWvkLwccHQun3HG4Cvln11I3D5hZmNDx8+lNb6Qs/Bx5MQrdt31AAPAisBr/C1AqxtW7d44x7ow4ePcwbfQvBxztG6fUcV8BkgDnQAe4A3U3r+irEsBwgDqfM8RR8+fOBbCD7OEVq371gOXAcMAv8LhKZ46Blg7batWwbO0dR8+PAxDnyF4GPO0bp9x+XALwETqGbqyqCIXwLv3rZ1y6E5npoPHz4mgK8QfMwZWrfvmA98G7gBsBHrIML0XZMayAEbt23dcmBOJ+nDh49x4dch+JhL/B54GhBEnq0aZhan0kAA+ODcTc2HDx+TwVcIPuYEBetgNUqhlJrtcMXnMj3bgXz48DF1+FlGPuYEsXWXPS0QDBIIhdGeR9fZM5w5dmQ2QyrgF3M0PR8+fEwBvoXgY9ZIdLKkuq7+X4PhCEopDNOkoWU+VbX1sx36HXMxPx8+fEwNflDZx6zxwz2n31PX1PJJwzSHLU7P83DyOSzLRqPpOHmCrrNn8LSH9qZcd5YBXrBt65bbzsnEffjwMQK+QvAxK7Ru39FoBwL7Vm+8qtmyLMUU4geDfb2cPnqYzNCU6s96gddt27rlZ7Ocqg8fPiaB7zLyMVssyedydvup4/1TXVpU1daxZtNVrLvyOqrrGyY7LAq8dJZz9OHDxxTgKwQfs8UhoMswzdA0XEEA2MEgy9duUI3zFoy3i0aK25Kzm6IPHz6mAl8h+JgVtm3dMgg8e6XZ/diV2V16Q3ovYZ1BFdJPR/+NhlKKRStWU988b/SmouVwBOFB8uHDxzmGrxB8zBrbLtMLr60ZWFLtpXSD28Ol6d0wTmxqvBqFeUuWgygBr/Dv3cBfAut9XiMfPs4P/DoEH3OBKwA8ZXhaY4S8LLbOk1eBijuPVgpaawLBIOuv3qx6Os7S19WRGxocWAe8advWLflzPvvziWTiWcD7gT7gHcTi7Rd4Rj58DMNXCD7mAg9qUKZ28h6GlTFC5JU95YOVUmitsWyb5oWLVfPCxcHTyUNOZ9vppcDBczft84xk4ilIsV3RMn86yUQLsbhLMrEQeDqwh1j84Qs1RR9PbvguIx+zRqI6bj0WuoQBoybQY9U7e8KXUp5+GnFT1Dk9mNqZ8pgtS5ZFkfjBEwlvQd65oj+tAThGMvEgcBL4BvAQycR3L9D8fDzJ4VsIPuYCXzlrz1dn7fndQBNSUKaAwPzcabUyewQNOMrmkcgmssaU2LA1sJgnVobRscK/qvAHsKjwV45XkEx8h1j8J+dtZj584CsEH7NEohMF1NlezmlyOmu0UqrdajE9ZdpoTSx3FPCw8LC1w6rMAfZGNk46bveZM0OUBOgTBf8APAe4Zgr7fo9k4hXAU4A24EvE4oPncnI+fPiVyj5mjbvbs5/blH7knQEvZwKkzCiPhDehUWwevJsApbiwh8FDkSsYNKvHjFP+LLquk7x5vr3iPEz//COZeAC4cgp7aqSdqAPcCbwdaAYeJRbPnbsJ+niywo8h+Jg1rk/dc0fYS6c0eB6KqJsi6qVAKU7bC4f304CLQcjLVBynvF7Bsuzpdlm7uJBMbCaZ2EUycZJk4u9IJspTq16JuNUmg0IqtaPArcAJ4AHgAZKJurmesg8fvkLwMWsY6G6FdjXKKEo9R4k38kRwKWkVxsXAw0Qrg4EK1kEFPH7dmcmEDXwPWIo0C3oX8LzCNgX8C1NTCCBKwUYqto3Cv5ciAWofPuYUj9+XzsdFg3vOcteiloaBBaqvXgNHA8vIGGEAPGXycORyFuVOYuJyxl441aDy4/nZrCv8pRDDKAgsL2xrBJ5R2OYx80XZVFxOPnxMC4/nl87HRYJ729Xz1y5atehoQCwAV1mU+0dyRpBkaOVEQ3QAtUjbzCLuOhdzPU/oQlw7lyMr+iyQKGwbRDrBBSiln84EcZKJHcCbicUPz2IcHz6G4buMfMwFNmWHhry8snELrqJpJCs4wBrgEWTVnAe6kQDq4w/iLvoyUA/sAf4HeD6x+G4AYvEMcm0RRFnMFAuBm4CHC+f04WPW8C0EH3OBB84cT6Zq6hvqil9Mo6+yBWwEng28GQgB34w3cXquJ3me8BtEUBfxGLH4PaP2mY9YCSYSH5gqNDD6xlYBK4D905ynDx9j4FsIPmaNbVu3/MJz3Q96njf1UuQSNGDEm+iON/GpeBMfizdxdI6neD5xfeHfoom0tcI+NyIxhumu7BWV3Uw90xzHh4+K8BWCjznBnz/7+i+ZlvVnSik9DetAA/fz+I4XjEYvI1fxp0ZsTSZMxBoafZOywBAwGZnf6OM84MckE+M2lfDhY6rwFYKPOUO8ia8Ar0dSKjUTB0018EHgxnjTpELw8YStlJRCG/CiUdttJOuo/N7owvcu41sB48EANgM7RtU6+PAxbfiVyj7mHIlOTMSvfT8QprJr5CfAi+JNs8q0eXxBgr+twBuQoPJcwgEaiMXH7x0hCiMKpIjFnzz33ceU4VsIPuYc8SZchKl0L+IGSQM54F7gr4FNPJmUQTIxj2RiN2I5/RlzrwxAFML4SSLJRDOSCtsBHCKZWHsO5uDjcQ7fQvBxzpDopB54B1Jj8M14E49c4CmdfyQTIcSFFDzHZ8oBu4CnEItnK8zjV8AtlNxRu4nFN53jOfl4nMFXCD58nEskE1cD983xqJXSTweQgPSziMUfGDWHeUi/hXILIg28ojDObYX6CB9Pcvh1CD58nFvMRYtMF8kmAhHkKcQiWEopCO0V/r/S+bYyNlDtAd9C3FcuycTDwBeAbxKLu3MwZx+PQ/gWgg8f5xrJxLeAV5d9oxGBPhU3kga+Dbym7LsvAh9AejM/F1iNxCf+DrEUNPCdwr9vBl4HXFs4X1GBZJEiwNF4ALjGDzo/OeErBB8+zgeSiSLBXRvwVSCOrM5tJk7uqKQ8skCEWNwrjG0jfRIeLvwLYkn8ArEOLEouJo0oj/AE53wGsfgdU7ksH08s+ArBh48LAUkBXYqs8m8FioVlozOQivUc5UrDA8IjmuQkE+8FPjNHs/s/JGV4AOnU9kSqE/ExAXyF4MPHhUYy8ULgvxGhX81I4e8gxH9XlX33ELH4SPrrZOI9wGfnaEblQuEQsNZ3IT054CsEH08eJBMtwJ8i7pKvE4s/eoFnVEIy8XLg+Uim0EuRArI8woz6Q0Rh3IBkLL1yTGppMlGD0GRUnYPZ3Uwsfvs5GNfHRQZfIfh4ckB8+DuRCmqFuEOuJxY/eUHnVQki3JuB4xO6a5KJaiBALN5V+PwD4MXMfcGpH1N4ksCvVPbxZMEqJKg7hKRtBoH3kEwsu5CTqohYvJ9Y/PAkyuBPgNPAWZKJYyQT1wIv5Ny8034B25MEvoXg48mBZKIJeJRSb+Lasq1/RBrYdwIfJRY/NXaA4XEU0tNYIdW+5/8FkkKzA4h7qJg9lOLcuItAMpaq/fqEJz58heDjyYNk4rkIudxyxnYrSyMpoApRGC7w58CvkIY3J4HLgI9RUiYXJmc/mVgDPMTIOoJzZe0XC+JuIhZ/ItGU+6gAv1LZx5MD4m/vA/4F+I8Ke4zOy7eQVpgZSjUALiPfmSuB+0gmViOZOYOIQkkivEK/BX5yDlbWh5G00KfO8bhFuJQUpkKsD59a+0kA30Lw8cRGMmEA/4Dk+1vMro/xdKARQfo9YvG3zvnoQpr3JeCVlCybyVAu6CdCHyUFOQTsAF5KLD6Tjng+Hkfwg8o+nujYhlBuBzl/yqAIG3hZIWtorvFuhJIiwNRX7yZS1zARssA+4JNIwdzz8ZXBkwa+y8jHxYVk4iqkCGtXheb0M8FL52CMmaLIGTS3TKISVP4IM3PjjPfOZxCLxgO+QCz+zRnOzsfjGL5C8HHxIJl4GfA1RJDmSSb+lFj8e2Xbm4AG4OA0ArmPApvJDcLAGdAaqlogVDe3cxecRVbhxcGHgD8bQTExNyhmF1WiwZ4IHuN7BQzEoskCc6GIfTwO4ccQfFx4iD/8VuCblLh8iiRs9wFvQrKDnlfY1gcsIBZPT2HsG/CcO+ncr9DFhBkFjavAGkH2OXHHsfFRFMoaCfQ+B+gC5gMDxOKDMxhzYkhc5MfAsxlJXDeVuYIoqmjZ9y7wG6Su4UvE4n+cm4n6eLzBtxB8nH+IQHsWkr7ZCXwfWfmXQyGBzZuQFMtyP3wt8AOKCkIyiP4aWIMIym+WWRCvwM0pdEFuKyVWgpMZrRBm+i4UhXEWoaH+FLH4G4EzMxxvcsTiHsnES4CXINe8CqHXnixGopH79DOEDvvPEKvgs8A3fL4iH75C8HEh8B+Ib18jK9VKvPzlqBSUfQbJRLFX80FgJSKc40hNwf8W9mvDDLoYponrFE6pPKxwFrEKIsw+2Jym1CZz3SzHmhpi8TzJxO8QhXobQnNdO87eXmE/C7iDWHwv0u/6q+djqj4eP/CzjHycX0gc4FWI8GpkcmUAJQrocoS0/NVoCUJHEAEfRqyPIj6PYd5DfcwhVOthhQ9Tt/wYVjCIZOh4zB4GJRfMT+ZgvMkhVBW7gZ8DdyOKsRI00ENJ6Z0995Pz8XiFH0O4mJBMGMNNT56oSCaaEaE0q0KnSk9t2YA5YCOx+P7CORUS6B0qnLu4ktbAcWC2fEb/i1gIfwT++5xTPCQTr0LaX052D4cK/7oImd/7icW/cy6n5uPxDd9ldDFAmDj/C3gOyUQK6EaqUd9OLN5xjs9tAFcUPj10HhTSBs6BMhiFol/8OQAF33gPycRPGetWMRD//1TaWVbCQeAtxOL9Mzx+ehDl9hUq30OPUje0FKIEIkjM4HV+jMDHZPBdRhcayYSJcN2/BGmOshjYCLwI2FMQ2HN5vgUkEzeQTHybZOI0EvxMIG6H3QXldC6RYkoyfVZQiDuoBMlkenaF/X4FvBb4NdNzH90B/AViicxcGSQT7yaZOEoysYtk4uopHGEyvvJyiMWjiMXiFPbzgPt9ZeBjKvBdRhcCIuTXIkrgL6kcNC3iamLxB+bovC8Gvk7J310pLz0FrCEWPz0n55TzPhMJJFcjdAt/U+G8U8HwfMd7agvLZg3cMKKwLZkII+mq5WmafUAjsbhLMhFACOyamNyCKc///wmx+AundxnDc3oB8KOyb7JAw6TptMlEAtgyzryuQ6zLzyCsrL9AGFz9Npg+JoWvEM4nhMLgG8AzmbjJeTlywHJi8dmnMSYT/YhQngx9wNI5cYNISuhBZMVezPWvZuruSo34wI8hBVOvKX45HhS8l1j8cxXm8i/Ae5FVdhq4tpBxU9z+NoTQbrq4bka5+8nE14DXU7ocA3gqsfidkxyngNuBp1XY+nVi8TdMey5PdCQTyreSJofvMjpXSCYCJBOXk0wsKnw2kNX5s5m6MgARpK1zMJ+lTJ0vvxY4RDKxedbnlUyiMOLbdhDhPjThESPRB3wYaSd5S/FLReVl/Blrfl9FZQAQi/8VcD3wXKSwbe+oPX7BzNxZl8/gGJAgNJQux0XSQSfDQmSufaO+18y2/iGZiJJMbCCZiEy+8+MAyUSIZOLrQAfJxEMkE+sv9JQuZvgWwrlAMvF24HOIMM8hmSzzENfQdNwRRfyKWPzWWc7pGkoCaKrwgJcTi/9wFuc1EbbMDZTuRxQJ/E7l/BlK1ctjUP70uij9QPSanrQRaYo3zTBOkUzcDjy9bPj0qPOfRX7LIvLAamLxYzM839eBlyP35Z3E4l+fZP/lSB+GYiFfudtvPxJ/cpDn77WI0njdpFaHjP1mxKVnIq7DG4nFd03nci46PLb9H8ln/gLDdAjXOyjjILH4NRd6WhcrfIUw10gmtiABx5lYX3ngPQhdc2PhOxd4OrH472cxJ4UIi9UVtjqIUAlU2AaS6349sfh0VvWjz98E/B6IUWLnnBN+fQ14KJ1WETcZiqW7rKbWeBN/O6tBk4mPAW9GUkn/CmhHqnqPAR9Fit8+idy39xGL75jl+Z6PxFX6gNcQi3dOsO8HgU+M+vZriBV1klhck0y8B4khFO+xgzT3+VskxvAwIvj3E4sfL4xrA/1IILq4KHmYWPzKWV3bhcTOf12JYTyG1hYoCEShbrmDUkuIxdsKVvvfAG9BajVeTyy++8JO+sLCTzudezybmSmDLLCJWHw/ycRXkGDzEoRb5qFZzukTCL1BJUz2DFwGnCSZeOEslNLliJsjy9QK0aYMBTwUuXL9oFl9I3AKyRaaHWLx/wf8v1HflltXtyPNcWaPZGIrQrdRFN4nSSYaJlDAlTKhLicWP1H2OT5qu4W4oooptiuAFwO5AoHg1xC33mir7VzQdp8/hOv+hUy/hTKEriQ3CNq1UNZxkokrkQLJvy7svQx4kGRi+YQtVJ/g8BXCXCKZWIK0XZwu2oHnDhdSCTvmx+doTkHg7YggmSlFQx2SGjueUpkML2Nq7rIZ4eqhB9qIxf/rXIx9HvAhRt6XIJJ99o1x9r+jwnePDf+fUGP/EXjBqH2Ki5TylNUg8IVCttM+JPX4KWXz+czk079IkUzcSqD6FjL9ogzQoAz5E8W3G7EAy2EhSuJT53WuFxF8hTAXSCaeh7yAz2f6q6oMkuY5OkA4V/AQV9QAJVrm6UIBTYVcfgPxq/8JwkLaDrxj1Aq1BHFFvIyZK4MME1sVPcTivaPO2QB4Y74fuY9CLJcIcN85oKieKtoqfNc+wf6PAkcR61EhK/5/KFzPZ5AFicXUu6OFgRcW/jTCLnsU+C6x+I/GPepCQ4Lef48UHx4B3kosXk7LcS2hOpfcgEumz0QZULukqBCKqKs08jmb8+MAfpbRbJFMvBT4LvBWRgYbh/eY4OgB4E/OoTKgkH/+HsR3P5uAURWSHTSICIwvIv7o5wNHCnGC8TBTZeAhAu9jCDnbAPA9hP00i9zbS4f3fvBrBru+8zP6T54h3XOaI7/7wARjfwpx/WwHflWoU5h7SH3DRHgD4rsv4h6ErK4yYvEUwtX0bYQ36RZi8UcR9tI/odROcybWoAKuKYz9MMnEO0kmnjKDcc4tkolapGf1+4H1COvtroJSLGIPSkHtUoOWDdC8HoKTrtXagZ3nZM6PE/hB5dkimdiOCMWZYH3hZT73SCb2In78NLIyKhZozZWVmESoNn5T4dzvBT7N9BTDIJJ5E0CC7N8dYYVI9tIXEX/5GeDf6DpwE072HcP7VM1PE22+ctgVVzp2CeIyyFAKqL99VtlUo5FMXI/0GIgirokbiMUfm2D/q4B+YvGDMzzfC4EfMvtFnkbcVa+i9Gx8llj8fbMcd+6QTLwJCYqPjnmsRiz1FoRS/XYkhboo5Kby/HnI8/QbpIr9B094frEy+BbC7NE7i2MvO4/53icoZfcU3SNz6TJcBnyXZOL1Fbb9J+LCqIQik6nW5R+gSsvLrJDA3+9JJuaRTLyEZOIfkfv+ViSu8VTgu1rrN2pAK0MEQKbPpjIldCXBMDfxDak/+Q/gTsSq0ogCnljZxOIPzFgZCCpZpzPFaxkpbN89avV9oVEsViyHg2Rb/SOSGXYf4mosZkxNdf4GsAh4I2L59xaywJ4U8BXC7PE3jC/sJsNXgNvOmbtiJN6F1EMUaaInqgNwmb57KY28dG+psE18+iPk/fBnAE+LC2gEPJThySo+gwjXf0Eqif+CUUV2GgysUBUohUY0gml3IimWo3ECKRK0EetgL1LoNRf4M+AVlN6t4r8tczT+WEiznDch93C2TKuK0pyLgtQCls9y3LnEvYx1iZ1CSBotSplYc0FvXg38hGTilkn3fALAVwizQTJhIZ27ZnofA8AllAqhzh1k9bkJaSQzmVUykf9ZIy9fOYpBawNpwzgap4AHKQkrB3gd8Dvk5b71rNUygpZZlnVaKanHmIcEP28oO37sxKoX0R9sybnKIBNoGAB1fcVgsVAYbEMqn18MPGtWdRYjsRa5f6MV3A/maPyREHfcD5AYQhgRgC5iBXYxN/0e4OLKvLmCsVXay5Dn2ij8FZXZbJsfURjn23MwzkUPP8todvgYElidKVxGunDOLaRoKYcI8BATm9HjbfMQ+oc/QTKqfo3ch1XAIWBsIFfI455dOKYe+FaBNkJesmRirYnX22E25ZrdzkCFkyvEF1/silaR/K3Tnpfft+TmLkQw/DzexPFxr06Uwv3jbp8IUtCkx+HG+SXwSkq/aS9SS/J3MzrXxPNoBd5d9k2xavlSoAMpVntnhSMrVcNPhosp2HicqVW6zyVqnwx8SH5QeTYYSXMwXWgku+RXSHXquW2qUo5k4p8Q4RxFBNcXEMExlXjGHcTizxg1nkJcOIPTfmGSiecCP9SykjOpIKjGkVwZRBmtBfIpFb73vui1t6JUBOkn8dx4E/srHzoLJBN/ifioswiJ3tiVvxSbPQ8h9fs3YvHMHJz3XYg7qgqhAvlbKmewDRGLR0kmfogkO8yF4Mwh1fJ3zcFYc4PKFdtFzEThTYbjxOKzbaR00cNXCLNBMvG3SAbMVDGACL1ehOXyDPDYec9iEAF+I5LL/pcI/81kpnUPkn3yHqTS9XVI+un/zGrVlEycoRQQVeUDTfBGe4iV8A1i8T8rfpnopAa5pqPxJlIzntP4c70eaVlZzh+0flT++9wjmfgy8LaybzTyLI3Oo9RIDOdBxPqZCw+ABu4mFr8Y00//HXjHpPvNHieBp8yYr+pxBN9lNDt8Engf4zc3Hw0HsSj2zaHPevoQAX4nycRxRICWMNQF6W4wbKhZCGYAiqmIsfg2kolNiN+/mF//eqZrJUkbzUuQ4G4LZbJ/isu6op98RDA+3kQ/U2MLnSkWFc5bjGGEEGV2rvsUv3bUZ0XlAsi/RRTBXCoDkB4RFx9i8XeSTPwF0uBpE3LNxUDyTOJ6Gins+yViiR2ZtDfFEwy+hTAbSEXsKSpX0lYyWzXwtouCZkH84CPdVNkB6D3GsBwwA9C4BpQCKUprAL6JUCuU53bPm3KrT1EoP0de3CqmRwUO4r7oR1765xKLTxwHSCYW4+Y/jnbnYQZbWfG0X46zn430WpgH/JxYfE+FfYq0EHWF8x9GGEHHZEjNKZKJNBNXa7vAfxCLv4tk4jtIDGMi5Jm6K0kjAvIe4GfE4g8V7lUYGLgofOoynzcgMZNaxJ3YxNSVQh5JMf3ylFhhn8DwLYTZYaIshtOIj75u1PcX/gUCiMU9komRgiGfZpjzRWtwc6A9UKZGBMAK5OUZDXnxhJo5ASxFfOxvL6dzPnti3xvqsD9pk69T0KUKQq7SDVGAzg7A4FlRSFULUIEICOPoh4F7iMWPjjgomXgtUr3ah6SmPkKq4y4G24TmwY4+jUP5m1j1rHsrnPLLwFbk93wfycTNY6ifY/GzJBNxxF2WAf7rPCiDa5icEFADry4og3UT7ulk0vQey+I5NQRrDWoWFRV+JbiI0tuMcBy9q1Bj8U5Eof+aZOK1s4qRyMLEIBavmDk2JUg1/lcKPT+Kc5sq8gjV9+4nuzIAP+10dojFuxDf+mg4iCvmbkauwjuYrEDp/OK1lMtjOwwoUQJoMINQLPKSlXk7ElBNUUrr+2mZD/3HSPqfQoTYV0kmlgF0Hd/1xgan+6s2+WYFtgfNelTMYAScrFgrThryQ9CbRHsOSM/p9ZQFwBOdLDlw+tTHPNR/aFFGm1yM2x4IbdylB88u1SilMdD5oUA+3ff+MecSeokXIb/bEEL69naSie0kE78gmbhpeN9YPEks/g/E4v9S+P3PNW5k4kWEi9B6GIjQ3sNEqaa9x8M4GRPP6SbTq0l3T3TuXyEWUw75zasQt1Q9cv+fiazMZ4Zk4q8RgZwnmdjN7PuHfxSpiL8HiTENTrJ/MbHD40nOYVSEbyHMHr9AqhrLsbeQ4vlWpMR+C2ItVAEfIJn4m4vC1I7Fv08y8VMkyB0gWA3VCyWGYNpQvcBFKRN5cT5NLN6FtMT8d6Q6+DbkJSxi+agzGMDVwLGAzr1FoXGxPAvHMCpYVjmUF0BLz2QnU1i5FrskFywWwwohqa0f1MnE/9tRHd8CPMdTBg4mKJU3tZPSqAalvQaZuiFjaU1bcMkLD3eyNj6QOIi4F/oRodRb+JxFBMSrSyfnf0kmXocIwIOIa2GmDXiWI3Qbp4DfTmGc2xnLVPtRpC7gQcQ1Yhf22Y9kXj0NUcgmYtmVVsxujsI15tHuILnBU0QaV1PZ0t2AZGzVI0qh3L2nCp9nFl8Q7qtyRt9Lgf8upNK+ufB5U+HcIJlVT5vwfoml8PHC+J8sjDMRTiP37WvA/033Ep6I8BXC7PE3SA+E+YXPSYoN0GPxM4XeBs8rbAsjPX1/UqClBsngmLm5XIRQYPwDwlf0DSAxpYBYLJ4eQUsQaZC/UnFTsdDneci1fg0JIhuIe6KcjG0H4nYpwqXQR8DDOAbcKJwDpvZA27gjVoR7w5tULHOEOt0PVqiMzAJQJpiBYmCmKOD+ecPgQ/pA6BL6DYmxGtqzNUZd1ggyYDfQGV1JYyoJWuOYIU7UXGaGvaHWwn1ahyiD9yMr3W8gSuGPlNpiViO/2y8oKYgXAtPvYJdMXILQV9uF+/fvwEcmPCYW311QRp8oHPP/EMvzC0gAvRkp3vsvYHthIXJlYf7vAUamCAerIdMXBS0rczuyk8qNk0AspZch92VRhe0eYhXOBGsrfPcyxB1XyVLYgvQrsBHL5UMFBTAePoD8ji9BOtKV+8U08Ali8b8ZcUQy8VQkXhJBlMWGc0o8eRHCDyrPBUQYrwFOE4u3l31fj3DVF2kLijd7J5JlA9JJ7GWzUgrJRAxZuY5e5TnIg/1iYvEHJji+UiAyhVg1RWSQ1dppRIi6iMXzBWLxDxbGsZGq2VsQk/1tRcK4fPLO6qwRvD/kZVbnVCB/2pr/gaX5E5+ycUyAPqOGhyNXeIAxL38mvyR7XIVynZYx2EHOCNJeu8Gdp/oI4JnFN7v8yT1pzKMrOJ+F+VM4yuJ4YBkZIwzaY2XHH2gZPEjeDHOweYu7mJ7OZrerPLtJI0L+V4V7WIWstuvQHmitMEwouWIUEJxEIJXf33WIAnkKIqBTyGLMAFqmlXacTCxA6J6LC4oUsIhYvH/UflchBG15ymkztJem78TvyPQeB37C/E0fBa6icoLXMWLx5YUFw4OM7R39SaRP9fSQTNQgVs67J9t1FHKUKuI/QSz+ySme7zJKRHcu8HfE4v9SYb/RwfvHiMUnjsk8weBbCHMBSSF9ePhzMlEH/DNispbfY4UI6UsoVbLehLhV7pnFDH5BZZPfQnzqd5JMxAptA1Xh/EEkkOYi7pGbECunGPOIjhrLRlpgHkZWlPnCX6n4SwTkCytN0I49ZcCGtaljf7Sjy67NrwLuPdv0yxq3/0dZI7Su16wDpQzDc1iQb7PCZJUXqPG8hlpsXGMJPZWD91qDUiz2znKKJewLXzpic1W2g/kDQigbzPezoe0XBo1rBjBGkMEp4EeIK+XzaK+Boa4omX5FvuCGlowrhWEWzZYJCwkTnSwGzJsGdkQNsQoCWu65reW+VQOugperZOJ703BBvZFSm0uQ3+lliIVQjqKyKvrJq4FBlHGaumVvJfaGMwAkE69CBH0lWZAAihXu5VZTERP3fx4f/w68dAbHpRFlYANXFGIO1hR6WcSQ6ysuZDaMs9/o4P0TvhBtNPyg8lwjmdiABPn+hMov2eiXSDEbvhlJhRzP5C8iBLyhQJF8FKF+vgv4P5IJuyCMPkmpcK5SSqJC6ClehfQj6EdYTCsKhUQnLYlOnpHoHBlXiC67dnhVfd286P6zgQXBXqt+ONNlfWYftV6/MtBYeIaJO+Yh1YBO98LZPfLXfxo8jyX5k2OsrHC+D42Bpyw8ZWF52UwgP9jMWMvYAEJo/X469r+JgdOBYWUA4nvvP1kManx2vFV9ohOV6OQTCI1H8mhg+cMeqkZDNo8VQBRCo4KAxgh5GP8N/GmlscYgmahCOItGs3dWak60G0mlDCBC8JeIEL6OWPxM2X7vB35b4fgUI2kvPogkUBQVYqJiau7k12AANzOyc9t46ESUwBByjdVI1l4UEfIngHaSiX8v0KGPh1uQ57ofUfrPHGe/0e6hfVOY4xMKvoUwVxB3yZcQX/R4itZFUvh+RckH/XNmwqlTao15A+LOGb2iH41/YmRtRAR5MV9MMvEzxOU0UV2YNMeJxQ8hQctxkejkUuQaA4CR6OQN8SZ+Ps7uI4R4lTsIoFUhGqwr3Us3B31lRaNDHZAfYmD+ijHP80BoHigwvZyMq3VQ9xwJY9pQtwzsiKS4Fu9Nph+8nIIKN8PJekiB3thMpRLWIa6QYCjfx7z2nbbK99o6VBewq+aZBTJWHA2eMjHxgoig/o8Jxizia0gsp5zSeTux+NiGOrKqfwfweUQY7qmoxKR+5NbCM3ALcr/zwF8Qiw+W7ddDMrEIsUbaicV/NYX5CqShjUEs3oMsIOYz8bNWbIxUW5iPyVhalesRZTGAuDtvR6y8SngUeffswt94fSk2IQul5sIx8Qnm+ISErxDmAhIrOIgE9yZCMevjdQhjowYentSHLG6ey5AV0n3I73aK6bfEHP0SRpAXVFMKdFaCRjqL/X6K53l/Ya4pxDr5KIyrED4E/G9xbv1mTa7Z7bR14bNGFfKEylb0mQpxvvwQZAbGGP0Zu5bdC7eyqPcRHcl1UZXtMpQywHOg7wSZpssIifdOoTX0lRbbYyoLww0ZoItk4tu4zk3ALkzrTaOoK6IU3Hfrz/ySYK4HjYca6jQxbcgNYWYKC23DVm5dzCMQnmofhBcy0jWYA+pJJtZVbLQklt9UK7dfi/QS2ID81l+pMF6a6bqJkom/QvpZKJKJbzMye2s0ij+ywdQKFsOU6K7XM75C+BIS49uKxF8qUbRToKZYPIXzPmHhK4S5wU4mVwZFDABMGOQdi48jboXiC/4oM++PPBqji3gqVVjngddNKfiZTNQsNJde6ZnBSIfd7LjGpDVC+5B88SrA2xdav2dL6vfHPdSzUkY0vDt0qXKMALVOHzVeP31EWNP1izHLRY3C9irXiKWCzVg1C1VVd2chk1WBNsBzsYsGiudA50EqhgbMEEQaM0SbTuB5VXQffBlOxgIWEay9l9gIt9jDwD7luVeE8z24ygRliIWS6YVcmRvKy2P2JvM0rR2Z7VIJsigY7RYJIH7975NMbJxVKrP0n/7zGR9fCcnEGsTVlKXEszQZw+50r6Ho3vwHkonvV2wyJLGtdzP9IPaTDn6W0VwgmfCYOrtiH3DNlLtjSYzgAGIeF9s9djN53GCmqKQQ0siL/W8TCp1kIjToqEcDlrFcocgYQR4KbRryzMDL4k38gmRiFZI/34K0v/zJPdHr3pIxwn9DqX9AFXJtn0biFcNWi+nluOL4d4nmx9YCuirII4tfSDjfh6ssuqPL0Urk54LcaVZkj6Bzg9g9hwr7m2QiCwhVNWDiQc8xyPaOGVdhuczf8O3CfHfTf/o+hjpGpkzWLP4TNrzkP4dv1v1fW69Ru0L5PlOh8ZQlFo5hgzumqFcruSd/X7jPQTZvq5wunExkGOt770V+r+YpZz2dC0hb0iakBidX+O4axHVY/G0XnONZnAKeTyz+0Dk+zxMWvoUwG0hxzfeZHtVuMetjqqg09kOcO4VQ6XxhJGuqG+EyGg9X2AbLim6eoJelxu23e82m3xaCfj9BTHLh3oEvXJv6ozphL1HJ0Iqij3fghsG7Lgdu1SjjpL2YE4EloBSr239XURkAmDrLFSe+j6dstDLoDS9i74LnYrtpaoZOgjeEDtaQb1iDme3jYPQybQfCaln+BC5am252LO22FemhftmLELqMU8Tiee7/z9OMzqE37M+TTNxOLH4EIJzv+7iLaajCYldpj+66DTT0jY1RFk76VsRVcxMQZmfrXuB65m9ahSQnpIBWJFA9OkMmgCwYFnOhqm2TiTdTaqBzlGTiH5AagEcQC3Ad56d/wSLgPpKJLRcVVffjCH6W0UyRTMxH6KsnDLCOggY+Mi265Fi8DfGBBii9/G9FMkPOp3kXolRgNx56y9NfFJDHKloc9QidRwoJJIcBrdCDi/MnVbXbbwGdUXfwrbbOf9PQXr3SHktzx6lzewGoyZxhIig0ps6hPIf69Anqho5z1Ynv0tT1EFb3AcxUO8qO0Fa/UZ+tueTvTgcXH+iyGnJaG46ndb40DihMaFxVjxlIIIL2CMnE26lZNFIgKwMCVTaws2DNATQbuGUKRtNnRF1HWdrDGO12K1p+z0biDwZwGcr4FlJ1/HrEXfhrJLNmNMJIMHR/obfE+YUkU/wLpbarlyNxhgeQort+pKHNeIHcuUShLJ0PnYdzPSHhWwjTQTKxFvi7rgwbB3IsWlZd+f4pxUlEeI/uo/tTYvF/ncGZPwB8DwnU3lMgE3tmwa+c4/z9jhMzmsbij3Y8euc3WkL69QZw3Fqoe7zgp57VRJYB8khAbzmlhUhWgWvipa4cevDlKha/g+T9MQ8V9VBCQaQ9tWZwt7J1BtcMVKbWGwUTB+0ZNKcOY7kZXCOAg4ZUB/sbbqTTagJ4u6fMqsfC6081OYeWbXAfHLk4ql44mvRtMfB57IhJwyoh3TNM2c8wQDJibkACm59HOIgA0BgY2tMn665IL+u+V6uRGTNhUDYFyo4yXA5YuPk02f4IylxJqGYQZYTRWuFkwc1ojCCYpkYZFob1r4XUVANhJh2Y/G7NGsUsoGJtBYhgrkee27loYTkeRtNcF3+wlsK7YU9YoyAuzO8jFkwvkiF4FOgoZEQ96eDHEKYKqUbelXNZ7oFpAtaoV9jx4NEeuntyXLdlIS9FiOCKnDK/YLYVyZXndS9wTdk3/4f0aDjMzF7GPYjQrqqw7ZXE4t+bbIA/7NqzPhNqeIYXqb/vlsXhUsGdsFF+HPElL6UQiHcxTu+Mbv4/x7D7aty+b2wc2nUHsAE0RqpdqcE2ALQVIqdCBHJdk/ro8irImbrLWNS3CxcLAxfHCHJv7E0gAmsQsVZaLm//pVHbd2DE8apmSZHCQ5AdBCetscOKQKVbQxZx+dwHrMp0Hf2c7Qze6mHiGba7d8FzvmTaoTs2nfrRVeQGtyFWUhVgSnatN/KSlPlrmtZeR/ehOrxCoNsKHqF+ZT19x2rI9o/9bc2gpmFlD6atEffS05EUzwyxeKVe19OHCNp/Rris7kToUt4HvAtRCCaycGji3CoDmLgzWh75Te4BXj5GwCcTL6Iyf1EKSeN+A7H4eJlxT1j4CmGqEPqBuwfzkt1jALZRUgqehu8ehs6M8oBTCl1zRRO5SxvYXx/ke8AXicUdkokQYlJfgVSC/sksO46FC+NtRDiF/oJYPE8ycR8lX7eJvLzXMSyEKkKEZCw+H2ne/mlKL5yL9B/49YznOnLeNcBLhlS48cHole91lN1Mwc9se7ncktyJIdPLhhaeviNUzjCRql3FKWsBqzp/jzFOPZ+HSW9kMQdansGmU/+HXQjk7p/3DDqrVpVf6wAwb2P3H4z6rgdHjKGaN4BZMLyGumHgNMPyp3YxhOrKd3eBzyI+7Jcg1qH2sgOPpIYGvmBo9xfRhiUr0PoHpDqiDLZVTcHb9xoCNR8h17+mbFYaK/B+nOynxz0qUJWnYWUnshDpRCybHPBPxOL/NNlJx4XUErwecQMVM+pcxHJ9LVIXEEe4umBmvS6miuIPPxWXdxb41zEUG8lEG4yoVh+NIaB2zhdwFzl8hTBVJBPVWrMv67G4WMX0k6PQFBJl8FivpD5WgAs8a9tl+hEkqHoF4jIo3vjvEIu/5hzM9zKkUnURUrTzeiRDpQXJ/FiKCIo2ZMU+iCiKXcTiNxV8ww8hSsVDgoM3UqHTW6KTq5Ec77MIhUIQcZtcA9y1oG/P+9dmDr6XUPVzMaxTSC/iQ4VjX4cQtY1ZdptuNnVj8iuRQnBWAbTXXUp9zz7sCr4jjSJvBEkFW9g/72aydjWmlyOa7dJZq0plbfFoVKfP6LXttyvLzXqnay/ND1SvDG7s/C0MdYqbqGYxKlAlaaIgXeScTB4YQKkG7Ag0rCyeth8hytuF9IsYjc8jnb12kOq4hsG2ENqbzMAB6efwmZFfKbCj3eQHGyoeAUKx0bwuy8h0YgdZ+W4iFj8+hXOPhDwLOxAG0upRW3uIxRvK9l0ArEQSEO5m6t0Ep4JyF1GxYnoqSuE4cAuxeCmOkUykmLyH+E4kmP+jKdBjPCHgK4Rp4PjuxDPQ/NY24IFOONw/teSikKlb376edzDS11+88R3E4hOtVGaHZMKacJUjL/s/Iv0ADiNNbY4WthUDyRbSRWyMTzrRyRWIZRJEhPZRJIB4M6CimbPRq05+31TaE4987dIhwvWHEeGkE53cTIlUbgxWdPw+v6hvj4VC5c0IZ8IrWN5fOavQMYI8sujFDIZGh25KgsR2hrzrj37VMHTplnRFlmJYEV2fOa1wc2CFUE46jZeXaxLWVY0o0ADKNKheoAk3ZFDqNiSIuYfRwik7CPmhHINtW5m/8W9195HryQ0MX+ckT884bSCnkKpvhaE+lsO0i8+bQqyh60YIxalCFhc7Cp/qR23NApeMaVYkxz0H+BYlV9JM4CEKrUgGWEQXUkD3GiZviJNCFj4bhxc0ycRtjE9hUY5u5NpfclFQ1p9j+EHlaeBHSfVH5MUavUoaB/L83LyQZsbe6+KbfXSu5lcRk5m8krv+l4W/0dsySBXxRLiVUpVzFFlFXopkz4TXtf0apcXC14DqPxkmXL8MsQgGEOtliHHu6ZGmp9qdVatc282Y/cEWbjhaufuoq2wcI0jarrggVYjgCkRz3aqoDFSBGq9x6Hhxn8JgGYAQykRW88MWitRBa1d4jTzHomreh5GA5EgBnu6B/lOADoD+npvq6jWsoEmupFMncoAz7sp3CjLJSUP3YZPmS1xKgvgPSIbaTNBZ+NdjrKLKITGFV1Q47kHESriEytZTJZTfFhe5t4OMJZrzkJRcE7F+J0KocPy/k0y8vfDMT9gZqOwcOSQWs4xz/a5eBPDTTqeBbVu3DCAc85P6FTc2aN52CfzperIraivmxhSVwQvnco4XAKeQF3eYS0lrjdY6rLXG8kZZ2lorpNK6WLK7gonkolI4ZsjMmyEuPfWTirtooLNqJbsWvUC75hjOtOJ9tgHlKLtcok5WNasmEsB6sN0a6j39m3163rcGVORrlJc5p7sp6Js0qKgx1LHEc92prjBnvxJ1c0VOohQS33jRmErzna0GD3/jHez5QStHfnfzuGMJGd67EQHZhwjpDLLqdin1AilBYluPIQuGGPLO/AH4KiKgF1H5Ogvt+tCUiu72V9hvoLDYeTOTv48msiB7I/AQycR4Cmw0DKCmMJfzkbF1weG7jGaA1u07WpAahIoKdUFEP/yyFawBwoXMRY28SEVz20U47Kdej3CRItFJFfDroJe5IeqlGFQRnTVCw4J2aff9rOi+e3h/ZUUO07T6KcTibYlOFiI5/uOa/It7HmBZ930o7WLqyu99W9Va9i949jgulrIVrdZceeJ7RHNdFK2ESqt0VfyvUoV2omNR6a1xVOA3dv2SN5AdXEOq/aegqxDBGR7vLZvEbTSJETEBDNujZX0v8Bxi8VIP6Z2tVyG8WLvA+Al4wt1jhRwa17yDFU/78rhjJhNFt81LEArrIt5JLP6dUfu+HIlhFS9dIYHtvy7b52akZ0M5HkLelauR9+RHiC//i4y8Fy8nFv9BYZzfMroR0PiQWBCMH4cZi58Riz9/Gvs/buG7jKaJ1u07bOC/mcC6CpmsB2ylRrwM25Hiohrgq+clSCWpsulz7Pv8Up3Tc9n69HAVrtoTXk+vUYtSiuMNV5O1q5jf/1h7Q/r4P+MMfbZsPv+PysogC5ihXG9+RefdYTUBO7hGcar+Sqj8e2jKFILp5Yjke3BUAIyAZ7tprdCVfduNq6SntOeUejtPAkvnnkn34T+l1AfZ1RNk2kxB0s9MGQBoT+NkNbnBW9nZ+iibt/Wzs/XFSNDfAmyEaVXgZCzS3X8FjK8QSu7H75BMnEESJB4iFk9U2PvkpN/F4r8lmfgmpY5maeBTxOLfLlBh2MiC4XmIkijKqzwj3TcvR1Jdp+Lx8BAX5XQUwjPGJRB8gsFXCNPHi5m4OlmfGcLVYKuRHbk6x6yizhWSiQakc9nVwGmSiZcQi1fmdpf0z39CMoLuAP52qsoq0YkJPGdZ7lhQ4WlPmcrQLstyx+kLbxzer71mXa69Zt2N8SYOjRrikvIPjYNHWN59jza9XH9bzfqvt/Tvf7XCG1egagySjddXCiIDtCPCJIcIaMM1AmSsKkL5wSKphOliatcMqoA7BGiwo1C3vJRyioZAFNzsY2hvBbLijzD+u/MhxJWS0xWyWGYu4acJ7Rp0HWhAe+8B3s7O1k4k+yc07jQ8NzXl8UUJJCbYfjfJxA8QumyAe6lM8f1WhLBxA9Kz4TuF40u0s8nEPZQsbI0olnIW12czdfd3FxK3esMU9wdR6j8jmbisUpbdEwm+Qpg+6gr/jmfOq4yreLRH/2RDA7cU9jsL/Nt5mV0y0Qj8D9J3IYxYJLtIJl6DrMB2Fjjwi/gU8tJ6iMAYQojWJoTe2fqKK4MtnxwIzqvVkZrhml6JlI+4LcXUwEoNUYZ5rMO5Hi45+xuUdh3Q9Yt7Hn6fGsddA/DgwhcxGFmEoV2Wdd1LwEnRXr2Gvsgwe3Fz4c9DhEAOpex9858zuK7tl/XRXE/IVWbQQBtKO56nLMMIRlF1y4r+60G0tulOBnGGFJJ+qyhYNIWMgBFuqsJV20BIy/VeyPdLFdxd1Ujq51g//4i9DZdAdLLg7PQQi7+CZOKdSMVw5cK4WDyLLEgmGqeDZOJpwJsQ6+DLowTz0mnM6mXA305j/yKakJqOmQbmHxfwFcL08TPE1bGQ8Rd8wd+eUm9fFNV1j3TxJ4f7GRjIq7ptMY6d05klE1cg/tbRwtcEvo0I+yzJxFspkY49FfHV5hAFcv2k59nZ+mxPWf8TzvUGw/le0rlG3PrFKK1xlcnRQCkhRCmlkD7MjyY6eR9S2BRCKmlvL+4XzveiUXiGbYFSSruk7Rqqc2PZMgYCLQxERQasP/0zajJnUNqjZfAguxc+j7wZwdAuSns6FWw0tDIjwIvjB1t3anGJvAlQSisNGs8M0xds7otGqj8REf94G/BXpM7mcIZ2ISv98t9aM9zEp1IaqNHoWaGfGM7Qixh5kKtEgYxvKNhVh8gPLmfu3s3JVs7dwGG09wW6D+2ao3OWMHLxMZtxksCHx9n6hymOooErEcvm2TOYRSU32BMKflB5BmjdvmMh8nC+jfFfuN0ImVstJYbTW7Zt3fLHczaxZOIuxDKYimfCRYRBNbLqKvqH/5lY/J8nPHJn60ccZX/ENQIK7WHi8sflbySsswwZEfIjeyA4iOJZhNB3hIP5frXm7O2EnH7dVr2OEw3XqICb4qrj35HAsQbHDPLQwhdz+ekfEXJK5LCusti34Dl0R2MYnsMNyf/ExQKlMNwcnmljeVkM7eIqm8FgE7sXvTDtGoGXze/b+8mVnb+/xCp0RAOhuDjadIN3um5jG/CKeBN3ll2nhdRmjF6BaiCPYduYQcgPjr3f9Ssct/e4Z2hnxM1Q41uWJWvDDIAmi5ebSpvJ4fEqvclTdFEV00k/zeZtH5zaIRcZkonHGM1CWxn/gQSoH2T82ojxfqOTwDcQa78NUS61CL/YE8KV5KedzgDbtm45jWQZTfS+XUbJvaQQwTuVVLfZYHz/8FiYhf2ziGJ4EHEffWaigwrYJ+trB0O7ZM0oq9oTLOq6B9sZHC2XPl2IHawsnJPLT/4vDenjRPK9KtZ9j5rXv1fnrCpv98KtuiuynM6qFexa9EKywTruXf4Gdi18ASdrN5Ksv45HFr+E7qiQfnrKJGdGMLWD0i4GLpaXw9CS/Wloh2iumyU9DzwE/FjhrRPGvNItUmj6Q/MUUq19W6JzhPB3CdV/ATVmsa4ABzPgVVQGAG7ONKoX7NYScxg+iPF/n9K76OYhELUww1PpbzCuMpgGpFk9/CU7WyevLE4m6kkmJmvZer5xPeKanQwrkFTpPirfNlH2pR4O5ViMxIhOFfa5C4nV/b5A7fG4h+8ymjkOMTXhq5DVl2IyttDZ4yMIPcZUFb2FuIr2EouPT528s/V6xLV0sDD+/6YCjV82vdzb80ZQVec6iaQkHNA0dFzdt+TVh9JWVQDhufkQwIbTP8tFct0B08upYFnsUqFpGTjI2dpLle1mtKnzuZwRDTpFK0MZ9ESX0xNdPnpWHkoZ++bf6q1t/60RcIYYCtQTzfeOGNvysr2OEVgNWN2RZSxT9+IpjaEd8kaQAy3PYDA0TyntYrnZoGOGXgzG59jZajsqsN3M9D4dZaCqFmq8POQGFG7uLNqzyKfGX8FrnVfRxpOq/8SjCNfPNKAh02Ny7snhRsNA6ga+W3Gr9LT4PNK4yCGZeC+x+PRaapbGehNS09ODUJlULj+fKmLxXpKJq5Eal4rsg8h7eJRYvItk4nUIrfwCSve5mB5ezERaSOV3SRWOMZGY0SXAO5CK/8c1fJfRDNG6fcePgRdM45B9wOZtW7dMpznO9JFMXImQ3a1ncoUlPYLhBeO29NzZ+kzge6CiKMMi3HCSmoVvB76sYSHpHoP+UwplFNgdNJ3N1z66t/baPUjT9rPVmbNvvOLE93/iKqtJq7GtLs/UbOB07Ub3ipPfzxrazbvKrsrYNcYDS1+ttTLUONdRrMIdNu/X9j/A/I57EHZQjUaRMyN9Dyx9lZW3olGAUL6fpsFDuMpmyK7FNUMo7bKh7RdYbpahQEOqJnt2cVv1ur9uTB15v0Zh67ycv3mdVobZSdsj7chqMULlxi8aO5LDCr+VdNf/cB4s8SnWOWjMSB53aLy6Dw08m83bbqu4Vfot/A/iBiwKxNXE4l3Tmmwy8RRkYUFhjF6E/mJMO7lpjvs9JAV1POSBS4nFRwaGk4nnI67WlyHEfRpxB78WoXSZDBopALyVWPzOyXa+mOFbCDPHRKmno6GBT59zZQAQiz9YCC6/G3k5rmasQPKQSuEvAP8yIfe7Yb0L7dWhDOH0yfYthYW/hEI41SoskrVLsZgrb0ejiLK0geq8Efy+Vkatp8TX7ymnQGehyJshko2baR48bLrKjoDqNXW+I5LvbQjm+xOZQF2Msd3hNJJWOq/82iwrgNOwBvJplOfi2GHur4tHHTM0LC8zdg2nay9j46kfE811FQYzUIWYQzjfE/Ew/mIg2PyaxtQRtDKVA5g6T6fZ0NGi+yT3XoTLeO+PSz69g/zQ31a49zNHtKWPVPu43BxTWNop1IS7dQG/m2B7A6XaDo1kXNUUjpsO1hXGKLrEGpDfcrZJF1dNsl0BryaZOIm4dPciz1Ytwsf194jb9PnA+5FV//OZXE4WM9P+DHhcKwQ/hjBzTNy+ayw+1rp9x52t23e8bPJdZ4lYPE8s/mmEg+U3lLpZ9SHBsAHg58DfTdoIJFC1mhLBGxij3g0zCOEGD8NCm0Fy9avdk8HlbYiwUIDK2jUL03adY+BgejlcI8C++beyZ+Hz+OOy15O3oqQCDUj/YbNWo6KGdk9df+x/ng98rcKsXEo56cPmRpfVBFYYFa5HR5s5G12NY4YKRVglQdiYShLNdeNh4mFge1J0ZuksppfDwHtmZ9WKfscIogrxiPboKmdf9PJQojrejWRsWYxvgVmg/wdJVRwBPc7fMNToxbvShJs0jWs1oTqXCaAq/I2BdiyUMea0iHB+P5u3TRS3uA15hoLI7/sHZibE7y1ML4pYWUeRTLTZYjJ6CROh5/43pAvdlxDB/zaEUuNriBJYisQkdjI1t10PouDO/YLvHMN3Gc0Qrdt3XIoEYqfaKzaNuGg0cGt5tlGik3cCf428lH8DfCveNEftMaWhSQvQTyyeLny2ptyQ/dBvfkzvsa04aYVhSdGWLbVi2s1D9xHw8mgUnbXrdjnVi15wILT2V4zM+HBtJ3VoUe8jay0vR1vNOgZDYwle5/ft0Yt7H/ICbnpvwE1/BPg3DU19oQXe7oUvCLhmUFFI26xPHWNF510oPI42Xi99DrSmxWmnzu1hUFVxJrAQrcaueZoHDrC6/XdoDEBjeRmMkbfbGYwu68rWLGlxchnVYc/zBiMLh1alD2Saex4Z0Jk+20V1m3h1oJpAhwrzKhfCKaTI6i3lX3sordBKA3kVIqBHeUmCDVqhNdmewsQNTbA6D9hk+2Zf12ZXnSrQZ7uUCue6kefvK2zeNvFzJ/TWL0IszO8V6gimj2TiGciKuhv46IhCNNmukNV7ENhHLD6hMiSZeAkS4J3NPdJl/46rUysco5FA81NmRC9+EcFXCLNA6/YdRxnLwohhmrq6rt7TnmcM9vdjmoaTz+WKjJE28KFtW7d8BSDRyVXIystATNdi8PnyeNOcrJpmhbYTj76oMd/xHUvngygDKSuQF0cPnlWk2nGUjUahlcE9sTe/USvzvYhJXnypBpEXf7ICoiGgs3ro5HVXnfrhUQr1FBoYCDb/7sGlr34qYAWcFFcf+5a4nRSA4sElLycdqEdpj9XZAzTnO8kaAR4LrWPQHEmkanh5Np76EVXZTgztkjOCBAtWgpL2FoY2w7jN6zQodcJe3LPAaVOhngMRMj3DS/isEXEHQy1e/dBxyxjV8UwBNKz6AtnBN5IfUtoOO16w1m7zQoHm/gOGQqO0RmmnYqOfCSTRzPmNSjTSGolpfRtlfpmWDR9FqWsRmuf3jyHBmwjCb/R2JIvui8TivTOcW3G8eUhg+x0INYaLpCu/fNxFjNBz/wQh0RuP02oqKMalRvddKN3voW7pm2FYULMQrGF3pAdsIRa/a4bnvijgxxBmiNbtO4KI2yJDkRYZMEyT1ZddmQuEgoaSgChaa8t1nHlHH9s7GAiFjJZFSzYeOPTIC9Z0JN5/kzIv7YiuCO2f98yQZwwbG81AMtHJA8A/xpv42Tm6hlrk5Tu8beuW+0ZvT3RiEV73j4HgimyVO5ADtKG9/1PK61ubOXiD0qwBo9bDQASch+HmP+Fa5ieQ6tNiIyBVuKbx8FvgfsBaffb2Oxb17zlB2bOpgEiuZ0Xxu1C+H9B4BfeV8lyaBw+SCjQRtCya851oFCEvy7rMo9wXvXbEyTzD5mDL07j8xP/iYowIcutiGqcdxlOmNrRLs9vphXUmQG7QLF8+BbyMqdBGpcZIGlDKfD3V84IaOvNYDQpsL2eg8PCwUEpj62lTWs1UGUgvh9IY62i6pB8r+EMkgwxEADcxOZ20IJkwEMVSbEP31yQTsWkHmUvjLUR88PMoWS8m0nzpGUjfjPL9P4W4gMoVwGzc4J1IPKPcTVS639nBUuc8Jws9R6FprS703jaAlyKpqI9b+DGEmSMHHFTKyNbUN2aq6+pBKRatWO0Ew+GgUoatlDIKf8qybWPVZZfXLF291g5Hq97ePHjoRx7qSqVdp2HoWHBxz4Ojxw8g/EJfS3Syfq4n37p9Rwwxc78N3Nu6fUclUrMGYFHOCKa67aZ0t90U7Ay0vFxjvl2jrtHB2loDsHQe08thaofNR/97/k0H/20hWrdTUgZRykjeIrluFvTtpj51rJCZxDxgte2m5y3s3/NTKixUXGU/WhiPVKAB1whgenkML4ep8yzpeZBLzv6GpWd/h/JyaKVwMQh4ueI5RiCa60YrE21Yo1foKmfXal2zBFM7hoFWWRU4nlFB27PDI/zJGkVvaKE2cMcIaa0sjRXUiHBpVmi702oia5SXiuiy/446vuxv1ojOyzDWF26g1N8jfaDLpzF++vFYPA1RBsWpVlGpr8bU8QLESh6dzlukoS4hmbgJ6eU8HqnhTBBB3omjFccoEhwqQ5hwvTx4Tvlv/8gMz3vRwFcIM8S2rVu0YZrfWbPpyupla9eHYpdcymXXPYX6ppaxVUxKlf0ZQSAQyvcrxwhGHCNUrbT2ImX582UovsSXnYNL+GRtY3Pk0utuVBuvf6qKrb/sbd+498BoIrku5OVoQVb4QSDS5HSGlJPG7D2C5OiIQHWVjatMPGW+f3Fq/5LmfLsaTVldnWnj8hM/INZ5N+vbfsmSngdA4g3PDOd6XwSMWIVrwEWl71/6qnyRito1g+xa9ELaq1fTE1mCVgYeJlprgrlezPa92D0HMbRLl9UISo18ubUmnOvB8rLYXtGHr3Cw6A4v5ZElLx1QuQHMzkcxOh8lMnRm1a7wZfm+ho1Zzxg2BjlTvS6D5sRobaCVhdey3kGpqsIl5BxlsT+4hraa9WTsGgw8FJpBu2muCe8KwllBsFYTaXYZ6ix2sxuJdL+B1t6obdNJlqgUP5habKoyikHhSi6rX4z6fFOFfYqK6fdMXynkkRjEM5BV/tj7ZReMFl1IslJmL4blFM71fWLxr03znBcdfIUwQ7Ru33H1wmUrPxYMRwylJN1Sqam/2l3RWMHN4ga1MszOqpUT7f7wbOdbxMmTB689cfLQ59cualy3dPUlyjBMUIrq2noWLl/5D+X7xpuG+ehdypq/pI0wKt0jqabKQKMw8NBaa1AYYC3PHDbXZA5wxdBDI/oYzO/bh6FdrZWhNbCo7xGNxFUiWatqeBVbWiErOiIrg9cd++bzr0/+t6qX7makA/UcaHkGjhHC1HkC3hC2FvnkATo7SHdO6/2hsWwG8/v3sbj3Ybyyxz9vhFCGgWdYNGbPhIy+Y1Ix7OZ0uPdgte2k3KPm/AEPrT1lOhojPT91YNfKnp17GCV8MtFFOTPdbZIfUkBKw6CtHQylccwQexY8n+N1V9JTt56qfCdzgGIqKJQHQ0O1imyfgfaKCQ0jMXiqjvZ9kO7L0XdCMdg+iJt/8TTOexdwX9k5O5DuaTPFDxCX0Wiu8a8Siw+O+u5OKgv9/0FSRW9HrPipwkZotq8EPlZx7EAUapcII26geghlXI1SESBMLP7KaZzrooWvEGaOtbWNTeM2dplMORxufipHGq6nrWYdj857Jh3RldhuhsuGdnHd4D2szhzwlPYeQvh1KnWMmjbSR+/5fwvzp+5dlD/55zc3dK+f74ys9DdNc02Fw4o8R8PPyonAEoasaorZqK4SOW7rjLJ0TinDxLWr8VAEvCwNTqlboWOG0EopNEpprR0jUHQrWVm7OpxsurG44sJVAQ433khj5oThKhPTc7ik7bbhlpyNqSTNqSN4o7whBh5Ku2QwVaUso/qh46A1rhnEUYECO6vGUTbHGq7DcDP5Yk1F4T/YTsqqTZ+qVuJ+GjDw+iwvdwmwqyANh3VYOHXCVgNnFN2HofdYUOXT4QGz2rHcTGZRz0NcceK7LO2+j+buqRXnaiBHMD9SQhWfL6ULP40x5qjsAKC8wu2sTCOuHYO+owHS3TB4poqOfa1TmhRQ6GtxPeLqeR2wtILgnjokY+mFwEbEjfg0pJHUmyvsmwD+Akn1HAS+giQt/BYJcr8HydibKnKIRf6lwjiVLYxQLTSsgPrlJs2X7AR+jFQ7PyHgB5VnjvtUBak/VStBK5PTdZuGP9vuEJcO3E9UORim2bMwf5pF+dPfJRa/fYJhpo5kYlEQPlKcnYnH8twxztrDrMiaQv/kRCc1wN8v6nn42Td237vENazIkcYbaK9ZBwiH0MONT+ey7ABVuS5MzxGXjVIowI00owxzOIBQVBgAJ+uvoH7ouI7kupVrBNSBlmeMcA8cr7/aOl5/dQatQyhVdCmBEseUqfMY2sFVAYKOyB7HDGG6WcyyTooKTXdkWbGAaoTGGAw20TB0TGoMFPSEl3K08XrSdh2OGSJrRaNL1Z0YZe0/Fw8dus/EW27iLlXaK6aZtt2z7A0/33jqxy8OO30rgbQygxG8vDHciznTa5Htz7U3Lf71lWe/d6vtpoe7tU0VHgYWORtloRWeGuG31mrcOHNuADzXpEJfhgoo1rY9k52tQTZvm1o6qSiFn05p36mPV6xtaJ9k388Bnxv+nEz8B1KMaQB/haz47wGuY/JgvEGpXqUdSRueqHd6MankKYhVMp1C1YsWvkKYIbZt3XLgt2fyRxDStmForaflOgKoynZw2entBHUWNHjViyxDYWJaN8zhlJer0TVQ4Cml7kOCx5+PN/GNwqb/COd6Xra8+x5bozA8h9UdOxgILSAdqAPEj//I4pcSyvfrK49/V3mFlFTTy5HSNtGCB6PLaqTHrB9O3cubYR5c8nIVdFLkzbD2DKsHoQsov2lGYWFOxqrC9HJYZPEw6IyuxC3wHHVHlrKs+15MLydLdC1xDEO7OGaQvvAihwod2U7WX0nQSdEwdIyhQENuf8vTrbwVlaIEIG9F1UCgkZrMWcRppanr2/80Qg0QaYRMbxjDCnbUrvtiJlD3kz/G3mgo7fZH3dTnrz79ww+QzRlltzqvtJdf03FHDtQA6DAQmGJlMS4mOTOKTV76UxeLJ9Bl92ucLFFveopn+JSziwNcGCQTNvBKZKXvIgHuW4AbEE6iHKJoxmu4ZCGB6wFEgYxr/Y86JseoRk+PZ/gKYRawbPs2rfWfjf5+ukphafd9mDqPh4mh8xj9x6q1MrXS3s3sbH0Rm7f9aA6muxdZ+SwqftFtNnwu3sT7yndKdGIDLwk6g7aIwqInwiPoDA4rBACtDNKBOjUQnkdN+gxmwZUTsWBfaB1DZpSMCo01m5RB1q4GUQJNlPK/i5CXUXus6LwLT1mgJbuvqyo2vFMmUMcji1+imwcOKsewdXWmXTUNJcmbER6bdwsoI0CFvH2tTA61xMvPpREheD/CAVXjGbZylYlVXM1rDzI9ohCa1wEYtUq9E61dlEppZQZMvDdQNX8vTuZy3ByU3EgBUca6uAqlePEj5jXqc1vVWiLuINW5TlAmWpkoEfJzHIceMYUPsHnb5HUIycRWpGn9IeCvy9prXig4CCdSHRLsdpAuhZpkohtJgz6CdGabCNXAJqbuTrcp6+vxeIevEGaHLyBl73IftcbUeVxlF13PGF6ekDNA1qoaXtmOhkKD1jiGhU1OuJUNW+F5UW2Yn7m7kyySjmcCd8ebODLtmcbivSqZeJqGD7gYi7Iq+NmD4TVNBzv5NPD9eBPFRuwvA6zBYAuuGcR2Jb6XM8MMBCuXEjw67xZ9ddt2bTiDhhdqcFWw1lyUP81ue2PF/Sug4mLZ1A62l5X7aSgMnSfojOzyOBRoUMcar4NiiZp2RYkpMRkW9T2iWgb2k7WqOdR8EzlrJBGm0h5Ng4dU0BnMdkVXXJYO1AUBdazhWjac/hnofGHgAo9efpj23jO1aym0o1Eq6GXsS9N7FmCH62ha20//GZt0Z4RSCuVzKbHeToqMWUVd5jQhZ0DSHHUeTBvqV6TpOTRuW9EpoHJhmxnahZu5lc3bJi+GlO573yj7Zguyqr5wEMH/RuCbyEr/N8BXSSbqEMughpGVyDD+b3EnEhMZzUIw+t4dQphhZxNIv6jgVyrPEj9P9v+5adv/Wuv1q03tvzIC7hADgWb2LnweAXeIy05vx3LTuEaA3Qu3eqlg85iVR3XmLJee3o5CY7pZUAaOCmBoh1SwkYeXvLz4Iw0iGRjPiTdRmZ10ikh08t/ICq849qviTXw/0ck2ZDUVCuX7WdC3G4DTtZeRtWsqjgU4G4cesard/mIxlxowa9gV2TTe/qMxbvXtpae2U5s5jdIenjLZtehF4/VQHoOmwUOsPXs7RbdPKtDIQ0tGkmGubbuNlsEDKO2hlclDi1+mB0MtGlDBfL93/anvteNkFsjkFESb0VXz0JBylH3H3VU3NgCXNeU7Btdn9oaMYipm9+H55AYpu64JhdBU30I1VRo7AAyHUK2HmwMr7BCI/D/6jn+YUkV88dRZlDGf69/VN6VhpcfxtYy8psBFYCUUK6fDxOIDhc9HkArmcriU/Gyjhf4pJLPuMsR6LbqDUsAHkBTsDQhtx//N9fQvNHyFMAdo3b7j+a9bePpH1W6v6WFh4HCm5jJCTh8NqWO4ytKmdhgItaQfWfzSigE+Oz/ghNPtKNOyLmm/A8vL4imTvQueS394YXE3F3kwvx1vouSqEp76GNA21SyPRCc5SjQGBvBovIn1iU5uQloMTsstUev0siFT6nu+N7SBPqtuvN0rKYCKSsHw8izs3UXAHdId1av1QGj+VDlmWN65k8W9D+EaAZR2XQPPvGvF24f5jQwvz41HvjTcUAdgMNDEA8teU1zJK8PNc0lnwm0YOnbGctM7HWVfmberYu31G/NHqy7t08p4BfDIjQN3Nls4d3v5dFQNng2qfErhOeUTHVfpTecNnMqF67LzleJGagj0cxAB9xmkWFAjC4xfAS+dlMeoiGTiZ8BzKF2TAwQLAeGLB8KHVKDhHYEupBPeuwt/L0AUQ7awf11hPw/JekoDx8fQZj8B4buM5gDbtm75aeaPX8lqjAhKoTwIOoPYbjqlIaKU0milDO2O21AlZ1WpXHWVGfIy7Fn8InCyZOwaXGuEd6AY+CwJ/WRiEVKn0AR4JBMfJBb/5BSm7VL6/bXSrpvoUG9FGfOQVL7a0QeE8v00pI6SN8N0Vq0cQRzXZ9XxQOQqIt4QQ0akUJE7LirJtYqyzjNsTjZcVdyeR9hap9RUfSA0D60MDM8Bpc3BYPOIOVdKSRVajJL/2DNtb9+8Z5qRbFf0muPffAbo+kC2Wy1u2xHUdQNNR5s2fzzebNxI01N6jh/de9vinh2vQnt4EjAov6by+z0aw8rCxcQslXxU3BGG01zLV/mjqu8KFBzDzksdRQT/F5Dq4mcDn0CE36UIkdxUBd5bgT1IMoALvOeiUwZQdCPlGRkg7gOWEIuLL1TcTK9HOMk6gH8t29cA3kUs/qzzMt+LAH4dwhyhJ7L0foXG0Hm0UpytuSTfF170r+L/9ZRWipN1V/QClc1qrY2V2cNcM3Q/l2d2s4huHGOM/iiuji9LdPKdRCdLkKbxjZT80/9IMjGub6cMHwY02lOrz97h3XjkyxuuP/rVL9Wnjn2oMM4IZ30o38flJ75PrGsna9rvYHX72Dha1gjRYzWMVgZzKSgsJlEGtelTXNL2a1a1JxgItnC46amkgw3oYB2h6hZimSMUqSy0MmmrXjdifqbO0zxwIIcEKLWhXcK5Hn3pmZ/VAfWGzimjUJ29rPd+44oT3x+uIh/Q9moHUztG0HOMII6yHUcF/nsg0PT5IaP6SDHCXIbiR6/4wZviK6kxwI4UU0MVhu3qSNNkqaIBRJhHgJuRjJseJOf/U1M6MUAs3oa4TlYA1cTiX5jysecff06pqLIfWD+sDKBIFf9fxOIfBk5UOH52TXseZ/AthDnC/nnP3N2VOnxTONer+8ILdX94odUdXf7wUKDhwwFn6GVDgfq93dHlh4B3Iilx5b5LL+INOQvybQG3IBCanS7O2v30GXWjT1UNFFcsz/XgQJkIKbp/apiEmz3exCcTnfxkTfsdfzpv4LG3OyoQMd28uuTsb6L3xN7kamV+FukY1QyYTYOHJRNKWZhenvn9jzFoN3G6/vLhNegoFKubi/16p4RgfgDQZK3qSuNOKC0j2S42nP4ZhnbRSlGbPs2DS17OUnMQQ+cAxcL8aT1oVnkddosBqIMtT8vWDZ0Ihp0+pTHRytBLex5QnVWrvrs6e/BPW/Ltyug+CPkhjwozqsmerWZn67XM33Rfs67KmbhKey5ag4ljuob9xpAzYAxZtYRz4tYuW9pLmQYYxe/ssozP0cGHETBtRd2KQbzsINqLYgZSqvNg/WT3l1JdRjMjFdJE5INjIYyoyWkdcyEQi/8XycR3kOs7MQmT688Rd1Jj4bODKJQnDXyFMFdQ6pedVauKD49C/JFqfWztx5BSeBKdhJCqxmchro8mJEf6TJjsQ8AwbYBIdk+jtVrS8wAtgwdI27Ucat6iclZV8aGuOmMtvHuRc/oKSvJjDxIYmxTxJg5wcO8xR1mmFH5JHYHp5U3HNF+CuKZsIOcagTqltWV6ObtYABbrvgdlKE7VXV5p+C5EedlMhZJYa2Jdd7OobxcAZ6vWcrDlaeMpm4qozQgNj2sEQGvC+V5WdfyeUOYYbrAWHZ2HgaeW5E58NeqlnhvQ2XndZmN+MDQvGEylcZWFiaPyRtBqzrW9viV9IqdNWysnGyjEcrspCQtg2HVzCfCZZjW4mdolMNim8h6D2tMhjWGiNdFc95j5IlaYBqoqhYrHDTqA0GrkBhoI1/UAvyE7sAXtTWZepJHK2kPAF5FuesW0289Pcux5Rev2HU1Ij4ZFSObQL7Zt3TJxT4TxEIsPMZVGPrF4nmQiVjhvGPgUsfjJGZ3zcQo/qDyHSHTybSQI5QK7gWfGm0a6Xsr23YysSDzAUNpznjr4h4c03KAxgikjajwS2TTQPHCwblVHonCUZjDYwiOLX1J0D2nglvhAIgC8Ccmz/sh4vWkTnaxFAmjdwDfiTWTZ2brWUfYDQFQrg77QQvYsfH5REBfdW67yHLW+7ZdWUyppgBZ3ltYMhOd7uxa9yABxKy3pfgADl9O1GwdNLx+tHzqmMnaN21az3tTKrDQtQFb3V5z8QcFlojBw2b3wBfSHp84KUDt0kkvP/AyNQmkPhYfGwMRFAV7VQrxoC/1mdb7GHbCkUY3iOHUsP5vA0C4uFmer19IyuH+4BkFheOAZiAC3dcEnrYo/ChyjYdUyAtGSiz/d+6Dbf2qToZ2Ki67Csb2I4Bnu+zDevqO3KYBQnaZu2fuA3+Hk/kDn/io9fm+FLPBM4K7hOoOdrTcgbSd3sXnbjnFOf36QTNQjcatjrbsVSGLDJoRaPoi8Jw5wHPgHpOjsuTW2zl0/j9+uq+cXCMHc46+o7iKCrxDmEIlOFLLqqgJ2xpvG9z8mOnklEsAqCt1wyEtvuT5177L9wTUfOmvP2+ApM7Wi4/cLFvbtKax6PQw8766Vf1qUET8BXjLcXU16KX8BMY+/DHyyGOxLdLIKYYGMIAorAbw03oRu3/3zp6UCjbflzbDVVrNhuM/AGGjN+tM/pz59XIqk0Jyp2cCR5qdq08upq45/h4AzVJBg2tFKWUprtDJor1rDwXlPTyMv+JiFb3WmjY2nfjzMS6TweHT+s+mJLpvq7QdgYe8jLO59GMcIEnBSGDrvaWUalpcjF2pk//xbvPWZfYbyXE+7OWUoQ7l9JzHzEqc39Vh5UjZZBwlse4jlU4q1GHYdzet0gVlVob0fe2f3PFOhhwsfPBSGBDCUKrnThk8xkUKoCCvs0LSmCqnQ/TyZvgi9x5QujZRT4h5KAq9k87ZZpSqfMyQT3wReU/g08LXHeFdfXv0XckvG9WJU2ZqXrwDbQNsmA6biV8ArL8oA9+MEvstoDlEQzGMazYyDnZRK7D3gaMYI7ycW33Wmk9dQqKgcCM1P6/59IcvNYEhsrGNp1x+vP954bTreRImdTvKv/xdxaThIzvQ+GG6ucysijC0k5XAr8I5EJ/tY8NzfIf1k3zrhjJXi4PxnsPrs7VRnO+gLL6RYFBbO9WK74nZBKSwvbSoNrhHUoFVT6jAHefroZgDDsm4w2MxAsIXqbAcAQ4E6+sKLRs9gUpyu20RH1SoW9O2lZeAxHcpntGcoPGVyOrKSXqveyHgG1R27DApcReboyZRf8siPFqVUXQ9xwaQBhZfP4Tk2hmXQfypLuutGAywNqYK/LGIKb3IP4iosap5Q8WZMhpH7KJR2dxCLZ0km9uG5ikyvwjBRnlvYXQ8C/8TmbVPJOqsMea5ehrg2f0Es/uiMx6o8fjMlZYDWVN+8mC//UKITE/YzjlWDZYCjUa5DTcDkxd87zIe7du/47LatWx73/Y0vBHyFcIEQb+JYopNnAW9GVpn/Fm8a5pf/OHAjEO6oWt2/suP3d9lkby5sa1nRvfOOFWuvXTFqyBrEMij6pUNIKmER7YXviilACiEG8xCu+edUmOYIOWl4DrYzxP55z8Q1R2ZAZa2qUoqn1hRbShpeWnnKImU3jJa5Y+gk9izcSsPQUZTWdEWXj2+pTADDy7Pp1I8IOgMorZVC65wZoTu6nJP1VwKQSqfy1V5uREFS+WQ0SqrHx4dChFUEWeWHAY+OR/dhR35LPvX6wj5KQbRMqpkIIRqUfocJQwXjBZaHrFr3RMPVny1UTd1P/ymXTF/5njmE7fNLE13IFPBvlBPGJRM3E4vvnuWY5RiuMiw6K0LmlHiEyJRFFAwFjofVl+XvgPfcfmfiA5saGWgK8btCVpSPKcBXCBcQ8SZ2Iy0Ax3yf6GQjsBaljoTc1I8Lm4oO4uXsbDVGcc70ALsQy8JAVqC/T3Rix5vIAz9ESuzL0zbNwt8LqMyQlqPg3w7m+9l46scE3CE8ZbJvwXNGrODzVoR9829lZecfCOX6RsQC0Jr9856ZYXxiMbk4w6KzatVEu5SgPeYN7CeU76M3smR4LtFcF0EnhYeFqXMDnmFFjzQ/he7ocLGqDhRYUst987LkN4vxhrxSpo0R0LjpiaLaxbqQLCgHWEV+KI3c00pdymCsQTIjXiLLyxjt1WvWXyJxqCqcTKQgUovj2cCtbN72xeGDdrauBf4WeTY+wuZtEzeEH0sYF0WelblUCPuQRUy0+MXDU2wRcbhPrIQVNdII+45T4GjF6lpdd8M8vqRBux6dZjIRJxbfN4dzfsLCr0O4SBFvoifexD3xJtoRH3A5TUB2DAGZ+E1fAPxQQ2e/Ua3vj1x1D5BJdHIM2Iy8yDkqC6HRz8KItPml3fcTdFNCwOc5rOz4/ZgBeiNLeGDpq+mqWolWJo4ZwiHAULCRVLAp3DxwkGuPfo1rj36N5oHZFX3Guu5mVccOlvQ8yKWnfyrtOIGcKW2cFR6eYVUDRs6Mlh+qU1ZduniBRXgYoAxcrLyqXthO8yWa5jXQsgFqFhW7ZVUyG4Iow8YwoygVIBDdQMkVONGCa8T9HfGDNI5t6jMatpdVLQMHivn0gwSi7aOGySPFVrCz1WZn6/eAxxD3zBuAffzxP5omOY2DFHIFkOfDRbLH5g7y3C4G/tCbw73jJOztnZqO9FDcdlLx1f3wX4/BoX457vqCzeFplKtpQFK9fUwBvkJ4fOBtQNF3mwZeNc5+HvBMD2NJ2Btquiy92wp4WQOxCnYg7QEn8oWU+12Fn6YAU+dlAVroDje6NWY5jtdfhWMGhWjOMEjZdVx57Fusa/s1tpvBdjOsaf8doVzvZNc9LuYNHECjCrQUmuZBUTBZu4aDzXG0kk5wRxuuH8F9VDd0YnBJ/yMjNMSQVeMdbdisz9RsYM+iF1j56LxaxF2lMCyINEHDKqheXFlSaQykW5ymal6WaMs/Q+XssgJU2d+ILwnW6mKrUBm6bNvI3y6/uiPxfUCEaqTpJuzoscI+TuH83yaZWE20+dfAS0adKkqk6XaSifFLykVYv6kwVhj4HfDVCa5rZojFe4nFb/r6AfXlPb2jC64nR9ZVuGVs4KNM3SJ9hY8pwHcZPR6wedsQk9P2AqzXEHGUZQMoNFXeIN1S8awQv3U74k4Y7c7QjG5kXrZgOFW3iYbUseFuZcfrrhp3EkPBRu5f+movmutWDYNJtbB/TyEN1MXQCscQZRFyBsiU0WlPB1krSiTXjdYKrZQUshXQXnMJ7dWFVfaoOoYlPQ+EGXXtYWfAGAo26JNVV3uAOpl1zFju6MgTKgXRRghWQc8RCvTWLqKgq4i2eIRqFFYojJvbx6h6hSnAJTrPwLCl09pYaCX8OysBFHovuqxRzupbDiGuxOchPQB203Lpr4EHQc0ffc0AWKFliFX5vUnmth15bj45XkrzHOHdSMD9ZbMZ5K42eNZiMBXkPNLJAf5ncpvLB/gK4YmGJGAIWZvIirQRJur0U+/2klVB1WG3BFCqH+mPcB1gKe2qgJMiZ0bQ4wRyB0LzeXDJy6nJnmXIrmMwNG/CiThm2OgLL9JLuh8ArXGVPdztzPQUeTPM4Dh02lPB/pabWd/2S0LOAL2hRcMB42GMLWjTAJ4yi+mewwJSoVl39tfqrqo/A1DHA0uD83OnzobIzx8zihWE6oXQdwLAQbtpFGGCVQZWCMDFjjyVcbvWjIssqbMTxViylPWyQNpMfhF40Yi9Nm/7GcXMsmQiDkQJ1gww1GWj3ZJHIFCdIVgtpHRFJBNPQYLQ9YV/f4tkrtmI8ttIWfHkXGPb1i1O6/Yd1zBJoH0yHB1QfOugpiZAuiuDk/PU5391YseN27ZuufBsrBc5zrtCaN2+Yx6yUj2+besWP194LhGLn3aSd74uawT/T0PwaDCG5eW5LL27kDWjqHH7wofDa76CFNDZ4WwPl5/+EbabVjkzzK5FL2Qo0FBx+EygbroretUXXkht5jQGLo4KkLWq6Yks4VTdJhyz4K3QGtPLSa2FUkSyXdhehoHgvHEzjYaCjdy/7LXilpqg4K18LoA+1nDd0Zp028qANzTiINPLo7Srlnffx6Khg4Zhh+cTaaxcKW1H5HvPC6KMFgwbrNAQ8j4FMKzNTKIQRrmCYPI2l0U/fjH2oIDLh7cKs+c/ANcAvyUW/xRSnasIRA3qY4Nk+mw85yDR5qXYYRc4iaz+IZkIItz+UcTl9G4kS81CKtYN4BaSia8jlc6fHaaYPjeYlVJIOYqUgwaGgHVI2uzEQXQf568wrXX7DgXcAcSRH/ss0sTi77dt3VLRRj4nkBfn/Qjj49eIxXeet3OfIyQ6eRrSZDyCFKb9EAkgGysyh1iYPy31AZ4LWuudtTcNUugXe9Wp/6Vq6AwoA/AYCC3gwcUvndqJtUfz4GFsd4juyLKKykJplyXdD1CfPs5gsIVk42Y8o5TxGc12suHMzwm4QwwEm+kNLmBx/25AkbZreGTRi8ekuM4CLmCYblav6LxTLezfMyxwUnaD7qxaqZb2PoQq8gRWzUNFx7FinIxmqEuhDE2kSWGWZbG6eZeOfRlKQn5kiu2ooVQZtUelt7EsfjA6C/WrbN72FpKJRkTYRcq2/zux+LtJJl4E/GPh2L9BuP6filQF70FW/CbwS6QxTJFJtw6ZV4givXUJQ8DtxOJbK0x3VmjdvuPFwHeo0MLSsm3sQJBMegjtjdS3hmliGAZOfkRhYR6JfwwBq7dt3TKEjwlxPhXCWxChVY7iyX8OvHPb1i2T842UI5mYB2QT1fFrESK2R4F/jjdNsDpLJn6PvBDFldZziMV/Pa3zXkRIdFKHKNfyF+gxhJ5i8+LscbUsd7xQJeuRVUHur7p2eMenHPuaVOkKKyt5u4q7l71x9GkqLGhhzdnbaR48BICnDB5e/FLSgVH8alqzsG8X8/v3kbOqONR8Exm7dnjzFSe+TzTXhYuFqR0M7eAYQXQhZfVI01M4U3vpjO/PuNCa5V136ZaBQ7hW2DvbdI03v32nHc33yOpfe2BHUA0rKx3tIVTJLZTuTdEdo9Ear+vAfpxMTBUEaYHzaLg5wegBp7AULieiK3ISvYXN23IkE6eQFXA5uonFK8cxpNjsKjznZ2ivAcPWKHUSsRg2UKpXmWyVvo1Y/F8n2D4jtG7fsRIplHxKcQ419Y0sWSWRgHwuq4/s25UvCP9AQ8t8FixfgULR193JiUP7i0P1A0eBd2zbuuXOuZ7nExHnRSG0bt9hI4Rr4zmNc4gZ+sJtW7ccnHRAaQjzJeDlHso8FFwZPh1YXNx6e7yJZxZyqD8BbM26nPnxURLdGfb/6Xq+NcoLsJtYfMq9Hi82JDp5BmJpVXxxDc9x12f2mXVuH3llsy+0jgGrdvhFv7rrt0R7HqOYld/VsJE9DTcVD9fAKdPLBQzP2ZW3Ik+nIPiU53Bj8j9xkWweQzscr7+GEw1Xjzh/QyrJujbRt0prMnYN9y999bAr5tqjX8Nys2hlYno5DO2OUgg3cqZ2mGF6cpK8KaIudZyaXAf1KkOVWXgHeo9hZbuH7wXhBqhZJJW/+ZQoTTsKSmWRZ+sjxZsEw81otAa1J3DJqabePU3z+veFCu46T7iTKv9OU1QIDvBfbN5W3hwpgjSGH31fjhCLj9RmO1sbQb2e6gWvxXUuZahdFhGGBY1rXUzrlQhR38uRHgmTTcsBVhKLz7krpnX7jkaEm6sa4JIrr1OmaWplGJ5hGIbWmmx6yOntbM+1LFoa9bT20J4yTEtlUoP0dJzttgKBpldesdx3S08D5yuG8HkmptcNIH6+P7Zu3/G5bVu3/H35xkQny4FWJK/6h0/FeMDAe6GHcj3MaCx7VHVazV5OsmmenuhExeHtwJtdeRxW3TiPG36crEgJvWR2l3bB8TAiKCs60j3DMneHL8PULi4GyhB5Xty+r24zlxkGgVwfQ8EG9tdcX3547tqjX//3SL7nAxqu7YyueHTvguetRymllTHoKitqaEd72lRICmgO+S2Hx6/KdgIaT9mAJuT0Y2in8BnaajawpOcBlM6jlcnZqpU0pZIoFBm7ho6q8mLruemtsLDnYVZ13TVcjezWxXCDdRiRRsgPgOeAFQInA91JtJtFeQ5ae6BMCNcF3EjLUcuyX9Vj1n2u16ydF3KG9HyvAwUMqOgfugLzNqxL/SSolBToFTKCxhOwjhYGhoBCjxDsSgq3epHeA3cCHxp1bAZx85RniGlGZ+rsbI0BB0BbDIxqm+w50H/SoH55CFmYPYq0kJwMJpIVNOcKYdvWLV2t23esAr4N3GhaZsgwTKWUBIyUUoQiUXv+0pgNYJTiK0Sqa3SkusZWSr2Wkb2ffUyCc6oQWrfvuBr4LGL6TQU54L2t23d8edvWLWdgmKFzN6X+AZuSwdjOWDYZcTDDACYeJm75i/S7tAr1hHXGdD0M14P6IIYrLx5lbaQAJrdILmLEm+hKCPfR/zDSzzsMpRSesipKoyEzyr11T8PExcXU8/v36iW9DxmOEeBYw7WBgJv6R6BbQa45dWTBvIFHf322Zv31KMPdP+9m65KzvwmZnqN7w4uctpr1Zijfh+nlSQUaQBn0h+YDCsPLg4KU3YCnSo/d8fqrSQUaiOR76QstpD+8gJPZDmw3Q39o/oh4AxNw2zQOHqExdYSMXcvJuismpL2I9dyLGq6jUJjpTlw7itWXRHuupNbmhwpuNPEs6qLVoB0Y6lJZbX/mzsUv/yrSXEYTRO0Xyo6rPWVejtZfMrQrt1xSdVXRLKug1XTaqrHCTv8Y60fDOgU5lPmfXP/OdwGws/VdSN9rC/gm8zc9F/gpohROIyyh60kmzpLP1JPuPovEDMa/KW7eRdw0U4rQF5BGXDLnBNu2bulo3b5jY+O8Ba5SBqrw0iqlGO3ZKGwTkkCtFVDtus5bwPIVwjRwzlxGrdt3xIB7mbzxRtENkEPoF6oQ358B/POmG7a8EmnoPQxL53NXDD0UCHg5NNBv1rAnfFlJymvNmu673fmpA0pbYe1VLTA7jAb9m1Pm0HqrPXd1M3VKupVngM3E4g/P2YVfICQ6qUKqSo2AM8iqjh2Ec710VK3meMM1lbNlRqEmfdq77PR2adaiNVqZGLiYXu5MYZdAzoy8b+eKtz21Kn32aU2pwy15MxzuiK4czNnVwWXdfwwt7n0QUPSFFrB34fPQyqR54ADz+/eRtaIcbbyBnBWdYBbTR0PqKOvaflWYM/SFF7F3wXMrZh+Fc73etce+bowUywbaDqOdLI6ysb0cClesAe1R5v5Xxf0zVhX3xt5UHKAYs1II1cOlwAcvOXubNa//0bHFZyXkkeY4RnFTJaf98OdA1UfJDX4NaXVZfnFvY/O2rxQSJuoQi/wVOBmD7sNiAUyG2iUe4YaZuOPyiDJ6zbmoUWjdviMZW3fZokh1jaUA09CKAoFiJZTLs1wu6z32wL1v2rZ1y9fnel5PVJxLC+FaCv6/SVB8CC1ktQWlBuCf9jwvZRgjn1NH2YGHw5fT6HbhYtJpNY14QOb376O5d7fpaRcjN0RaB/RjzdexaKUZfjRpvOUa1fYQ4qK6k1h8bkvxLxDiTQwmOrkZ+PG6tl/XVGfb8VAs6X2QnF1FW816AEw3S1W2g6xVNSIrSGmPhuwZyXRRFlp7WF4W1wh4BtS5KhDKWxH14JKXf/7S09u/25hKrgChfKgbOll7sCWul/Q+KDEFFLWZNhpTR+msWklH9Ro6qteci8vWgKpPHUVpD60MLC9LYyrJFSe+z65FL8Qxgizs3cX8gUczwXy/BhUYvUb30Bon4ymtTaU0rjKwtFsYXnuKUr9ijdKeYaq+0AKWZo+xMH8aR1nGgeAa3W/VppFsnTiQCeT6I0y84s4gC6BhTKi2c6kPIoHf0WO+HPhKoYfwZxCiQoPsAOVVz2MQqtfkUnupnn8p4fqRL1l2QFxmdhgCVeMMAIjl/mLgNMnEU4nF90608wzwrmw69b3qaNC6rP02VZdtI2tG2bvguQwFx6/9s3SefC7tAf/Sun3HN7dt3TLdupAnJc6lQjjIOC6McVCJS8cYGhywo9U1w+ZicQWQNwK0GZWbp9QPHZdiKCOIp10CuV5P2cEOE6ylqy85S9MlB5h6Q/GLAoXg8T8DMeQlTAJvjTdxHztbl+aNwG9u8vLLc2ZkwNQunrSDxPByRDMdUAPhXA9Xnvgeliekqm3V69k//5kY2mVjehdR3Y2lHbR2tNZaGXgYXmbIU1a0O7qcQ803aVCR+qETbynOy8CjIX0c28s4Mq+SSFP6HDMGaK2Xdv9RLejbi4kzLOc1BuF8Lwt7d6OVoZd336sM7YxL0eAaQQWYSknvBq1MfbJqfa4hddSIOH1Fn5UGdCrQ6A2E5qmuukuNS3KH0EBQO1ya2ePsCm+86up51QOFRkkvr8meHW9BVLSKB4CgkqY7U7lgE3kGRiNX9v8bkfRaB8OyxlUx9as0wWgGcSWNxFA3I+IMNYsgXD9mt9EjArcD8yfbcTrYtnXLz75136F1q7vu/k1ttm2Ni0nQGdRr23+rHlryirEHaM2KXJJF+dN4WlvV81TVH8/qyc1jH8A55DLatnXLg0j+70xhABw/8GhQa03xr4iJXF2DwWZQCqVdDO0xGGjSSqmIUmoQYQR9XCHRyV8iVaNXAQ2I5bUe+EGiEztjRn9jebk1Ch0IuqlG08sKTYSXRyuD/vBCDbCi885hZQAwb+BRotlOmpxOom4Kz4rofP0qTbAaE69bQY+jApanUbabVnkrqmw3M+bl8jAZsuvpiK7EwMPAI23X0B0RbrVgfoBQvr/Ebzw1aLRmft9eLmn7NQt7Hh6mzSiiLn3CW9L7EJ4yh5vTaxSeYWFqlxqnx2tMJV2l3TGrQ23YeIE6wMAoxA36QgvYufwtemfsLfpwS3y/Qv+skDnkKaGOUNFQ+NeB6nk/CBXaGRTUJrbO568eeqAPIN7EncAtSpfd7ErXJ1QUe5GxqfCXkm5tI1Jb60aNMYS4bIq4rXB4ilBdHjs8tuOPXQ3BqIO0cR0rwIc6B9FuH9rrBq0ZmpIR7QFNBbfVnOI116w6vnhw311Kex5Kaa0MHXAqU0XVeP0szJ/BxQBlcnWLEb3p2k2fnus5PVFxrrOMfglMscqpInQh51it3LDRA2XWO92syR7ExOOEvYiTwWVjDjpZfwUBd4iGoWMYnnPX4eab9iIBsH+PN1Gxue3FikQnUaSwaDRMRDnUmTo/ojeCQjMYaHa0Yeq+0IJ7OqpW9QBx08tVA6oY2lSA5Wa0Mqzhl1gHqiEQztK26w8a7+mWzoUAajJtVGXaSQWbGAg2U5M5O1zAdWDe07Q2LPbPu1mfTZ9UpufQG1mMq2xinXcN90luj65iMNhMy+ABMlYNR5qfiu0MsaLjTiyd5UzNpbTVbii6/9TCvl3EunaC59E4eISAO8TRphuGrzOc63UML2cKm1Fh/hiYngNoGlJJ5Wk6FLqBUdaq8lzQeXqqVxNOnyHoDFKXPsWa9tvVgXk3p4BndFat/PqS3odK0UrACFY9tcntcsLe0MPARlO7GHgZBY8gfD8AxJt4gIPeT5BGREFAYVVpnEGFCPY0Qlp4BfBfjKlUVn2gn0eo2iLTV2x8VAk/KBxfxIeRGpTrUCpBLpUEvo6s4OW3r1lQNN2WU8mEMANVOOkh0Gm07sewIpSSOsrhMpLO++Q57Fb2HUN7L8HLV2tlqPbqtTlK9O3DsHFHUJorwFXmuxOdfC7eNCoALm07A8TiZ/EBnHuF8AmEZXE6q4ZiwdhwoC3V36f33nfP+xcuWPDxG+tOR6RXrmJZ7gQDZi19Vt3IAZTJ4eabOCyrlhUI2+cH4k0cmoNrOt9YQuX75yEpp10pu6G3Nts2gsq4q2qFc7L+yn8C/qWw70vbai59cV361IuL6ZZpq9obCM3rG1TKWmoc00EvF1ZoJ6NCP+6sv+Lji3of3mloF68QnF3edQ97Fm1l98IXML9/H5aX42z1GjKBegXYKMPtjSwdfkEjuW4W9u0ejiu0DB6gZfAgWhlEs51Es51E8l3DZmpNx1lC+T6ONt8IQGMqifI8DByU1izo28PRxs0EnQFqUyf1ou6HbYOR7gCjjNhSuVlloZqGzJq+oDcUVNpBo7RC4SpLKydnWLkebC8DeJjaY/7Ao6SCTb8/VX/Fr48033RlwEnRMnhIYgjV83OGGRgErKhOL0Aoxf8aeY/+iVhcTr6zVbF5mwbehh1tQKktBGs9Io0BPEfj5vuxgl2073kt8BdUoq2omq+paokAz6Jz//04mZsZGztQwLOBD7Oz9Uds3vZQoadwqUNa2yM2cDewBWVECdU7Bc6lNJXcRQDVC8DNhnEyLpCkesHywrnLU2eHEBK99yP9jY8jdCjnBpu3/cbY2fpcT1m3pO26zqON1w8Cn0LSnIfvX59ZS84IEiwYZykzSsqIgtBwHB0eL5nYhsgni2TiAFJT8lNi8XL325MO57wwrXX7jv8EXk+pB+1M8M/A1+oC+qFXrVYhbVgFl5DmcGglZ+1J3ZYaWc1cHm9iroNe5xSJTiLAfoQzvogMUvX94XgTPfee7tl0+ckfPBxw02ggb0Z4cMkrydrVN8ebuL18vNN7f3trTabtzz1lde5a9MKrHDO0EPjp2vRjf2HhvOykvfijfWatiVL5K45/LxLJddd4ho3h5RkIzWPX4km5zYq1CFRlzrLp1I+G+yRbXhaUwlVWQQiPhWOE2LXoBaxuTxDO9WDp0vupMTjYHGdlx+8lZlDAaD6HUfDO1GwwDsy72UNr1rb/1mjpfyxr4HU7RrBZK8O03KxSI4vbf7lj9bZby79YlD3J8lxS27n+XtLdUZzMEKj7CFTfCK7CDDlkelrJDb4LsQh+DHyIeRufi1IfpVxwaa+b7KBHbzKIuP9Gvhdm2KFxpcYo8GG4eU3XwU68fAOVWWoHkWfi6WzetmfMHdjZagFXoEyX5nVHMcyDiHU5PrTWDJxezlDnSeZv+hJS1xAtzPWLwHuIxS8oWVwhiWITpRRcACyd1835DqWVosNqxlXWILA63kQbwKHTJ25akT18uyocU0gFzgJHlFzXbef/ai4OnA+FoICbENre1wJPm8bhGvmhrgW+aKA3v2wlqj6kMA0DVxk8FLmSjDFhI67ysd4Xb+Kz07yEC45EJ+uQ1dAK4OvxJj4xavs8pb0zjYOHleVl6Y4uJ2dVgSiMj04ytoo3oROdXIewZDYi74hXnzrmrmv7lV18ZR5dcCs9kaUTDQelilqltJu99PT2aE1GPCk5M0TATWN6ORS6Yopl2qzBUB6Wm8XQ+TJhb+IaFmmrRlfnOipSP1R6kjWK/S03c7Z2vQZ04+Bhtb7tVzlTO1kNuju89Hhd5vQlhnbswhAecNuO1dueXT49pT3W9/+Rpo77tNLeVBc2GVC/Zd6ll6CMFYCB52q6DyvcHOhi7+NRt6F5g4dpQXmML5fK0n3oMSRoXL6/h1CXhIGPJVZv+yki7B+ON5EhmVgBvAJIMXD6duzIzQRrPzeFNORjxOLLgSLVxSuQeMMvL7buY4lO3gt8puyrg4hlbRf+/7XAGeDpgBdxU5+/Zui+ukpjaVTGQL+LWHw0zc6TAueNy6iI1u07kojvcirQiF92DWLu1YZMzcYGVE11RKea1mWGrOphczGU72N1e4KAm6Ktej2n6jaBUkTcFA1uN3VO7983uV0fu9Arm7lGopMQpc5WReSAP4s38d9TOH4p8ABSdToC4VyvjuY6dSrQZKSnyHRanWnT0WyXNxCalxmy66KNQ8c02lPd4aVcdvrHui7bpgA8ZaK0OyzdPGWyb96zWXf2NgztUN7X2MPEMYOk7AbqMyeppALKRZz4HZU+U3Np7mDL02ykZFi39D/qrWn/3b2WzqcRDq3PI01gim4WG8hlzYj36Pxb6hwjRNaKkreienX7HZlFfbsDTL14K4cyLGqWKMJ1Mr3yDB5d1D9lMIMuzZdoSi4aISXsfEzjOcOkhGXwNHR6mNGD856eaqtZ31wY9ND69N4XtjgdvwGqSXXYDLaFAYVdBfXLy1O188jCqwqxpPcBf4e8e3edw7jAnCHRyUuQ5j3FXtdFZBFW2N8ii50AWhubB+8mwNh4u4OJgd5rxm46BwRaldG6fcdS4JnI/e5FFs93b9u65bxnQl6IfgjvQwJh42U4eYgw60K0+3u2bd3S37p9x53A8zKu4o8dQEe6+7KWyC8McUeB1lx6+meE8v1opVjefQ8Zu4ZMuIVN6UdQ2tM27ruQVc6fnuuLPM9Yi2SavAD5TV3gm8A3Ep3YiGVRh8jJh+NNjPaTbmScFOF0oE5NRRGYbpaQ0091uo2VXXcpjTINvMCeBc/TnVUrFUg1cW22fVgKSXzCItlwHYPBJnrDizG9HGm7lqpcqbGuBobsOh5dcCs1Q6eoz5yiqAwqsu4BGSPy2N5FL1gzGGoxEYFnrWxP9C7ue6QRuEEUhvGQgfc+V1k35c3QbbYzlDHxng9kg+5QYNOpHzmustHKMA82x5MNqaM/QHzmU0UA0IxnUahCfNkKfxXPvYVgVQPVC3qQFX7p3fTyGs8tUlSMVgiuRtXnzIjZ0H846mHQXr3WQKk1WRX8ME42Qt+xMPl0abGQ64ehTog2i7LxHAcz4KLUd4H3MNj+MEMdP9AoiDT9SBUTQ3a2XoesspPA98e0cb2wWI489+UyzUN+gz9H3nuJSyrFA5GruCy9m6geKlGYYKCAfrNm6aRJtnOE1u07VgC/R8gEw4V/FeC1bt/xLYQDrgv48ratW84l3ThwARTCtq1b/q91+46bEUK28pVWGvGV/wL4v21btzww6tBXI77LZwAngDcZhrkP8SFuMrRLyOnHVaY2FMrw8jRmTpK1DQztYeD1IK6MV5BMvKsQfHvcI9HJ31EgWSvgX4H3xpvwEp3UIvfzWuS39oBdiU5uijdR/nDtZ+JV74T+hapMO5ee3o6pHUwvj2tYUtzmudbi3oeyvZElIQDbTWvFSOHYF1oghHjaY0377bQMHmIUszEK6KqK0TR4iOXd91JUA0N2HbabQdJsdbm9oPfPv2VBJNetHTNoZexaC2BB/6ONpRE1Gt7nYHloDNPL0R+an6rLnA6qQrGYAsPSeY3GXX/21xZSdT8dagepa7PDHoNnDdwcBKvBDBQ7rkGkKU/1ghWIovk8YqWNzOiRQHaWyjn+hkKbIXeAUHqAxvQxtDJVR/VqhoyIRf+JavKZse/5UJfMo+8EoMNY4TD1sUvJ9r1Pp9rnyeV7kGp/SduBu2+Z33VfBqFVLy44Lgc+OK17cW7RR2kxWfQhK+Sd72DU75YzQ9wfvRq0ptrt55LsQUI6Q9YIciC0pjrdyVHk9/hMvGluW3AWiPvqEWru9yALgDwjLRsTWewW26E+q3X7jmef6x4yF6Rj2ratW37Xun3HM4DPIRf+oW1bt/x8kmMGEV+gIJlQDCTe5KH2tlnz2w6GVrdr1KaAl16JMiKgWEBfKpM7c5eJe52SGx5EHpzz6jJKdLIR+dF/PyE19/THVQjHfTneAfwVIkDegry4xd/ZQKyBlyC8NZBM1MThaKI6/nHgo0y9NmXY972i624snZeuaOQwvXyBS0ipvBEaLgjriSxVLiZm4f3SKDqiqwBoSh2hefAILiZKuaDBxcJAKpDTVh2rO39XXM1pQIXz/dLnQZnCMSQb9JBZl9rQ9osajQJlsGvhCxgMtWjXMKVfWkF1GHgKPBEUHlRn2otB03Kowv1bCry5/Pp1yW81sUe+64DcU2VAuhdMW2OYinBDnmhLmkwf9J+K0rByECs4NvPHc4Ogi3MrjzkUXUvDMHBp6dtHZ9UKztgLVqzNZ1Ioo5bRRYLag/6TpeGcDKS7N4Bqp7CKLmpY0zC+iKSQBxArxUbexYtJIXwTsZC3IItLkMK/tyIr8L+hzAoedpUrxYBVy33mVVg4uMou/prLkGB1NZLKOydo3b7jFUjPkipGJtqMFwi1EI6qpwA3tm7fcde5VAoXrIXmtq1bdiA52DPFnwEfNdDmQueMt3DwzBfxsn9JsPY+tBsg3DBIsJow2QbEf/gshCPpTefTJ5ro5MdILroCTiY6WR1vYkqcL4lOmgAVb6Jjgt0qCfDiQxZhbKMRAwgX6MH/E2nBmN4ykHjbjur4bUgaI0haoYGYsJUwzLBqeHlxhytwMTHwMHWerFXFscYSe2rWruahpa9gzdk7sL00bdXrOFMnrlrbLdwSZaC1KAtRHBqlPeb378EsCTVV/Odw043u8u77DNPLq6wV0b2Rpd7/b++94yNJ6oP9p6q7JylLo7B5tfnC7l7iuL20QzrSsWDAZDA4Y4wFxgHzYpxfv05g/QxO2JhkG2zAh+AAwwEj7o694/Je2tukzVpJozgaTejuqt8fNSPNjEZa7a4ucNvP56O7VU93dXVpur5V3xjPHK4TvoeNSS531Yn/RCGEPEuyVAtv0Xm9HD03xqL4ewXzGxLKzDS+MSgLAZkRB9+VZEevAW4m9ZSgcTXEWo0qZ+IYFKZhbuIouWSLBW8DtGWP8sIjn+Fgx4u2o/1/wKw0Y5SrBZVnLhfFH63MT7jxboR8Mco3rVshsKMNmKR5pfKjDqYaGwwkmzEqpZLgXI0pnvODJQ7lspCIk0um2IPZRU0l4sxGriVTvBpjL1rYRU4IPJzZjAhFJCY31bIIhGIZgL/DjNW8AkCLIDB/v37ga719/W/p2bP7aUkD8IwblZeNgeTtGD/oLGZwT9Gd2MlA8rMYf+gZzCDeSXfiNQwkG4GZ5TQoF//Aq4GTPXt2uwDJFK2YynCXYHYjcSpV3f83EeejtdpLpvglzJcviplwmzEr/U8Bv5eIz5/Rkin+HaNOK/HFRJx3Fj97IcYHvVpoxBPp5C3AJzE7J0eD96P6m1u1kOWBRv2Ykoy1stHlKAoLU/Pgu2ghkL5bXFwKhhq2crDjxWdbPwMmmvnKk/+FpVzQmrxVR9SbQguJj8TRBTSyIs7AR+psqEVFvIylAV+GOdp6LduGv3fW+y3G2Xq71DemRjtlk7koO1aVWiG+TZMZEWRrRghrmMutVN5SrX4JY6T8F4xQ6Kj4MNwAhWIyASGgdVMaO9yd9vjvWPbMiwBUrFNnnKYfNXvjm5k42kJhWmP02m+ma+cB4EdovR4vW4dWNk5sBiFdTLK779R6gGeSopD4HGalP7ubWmjeE/O/q0OJ+PKk4+jt649gosM1lRHn50IeeE3Pnt0X9iVfgKctdcUzQMnf2sH8oUu//z7G6FWPWdV8CIDuxNQyC4NXYYTOESBTtIsA/BdGLVPSORcth7PvbXVlKwCSKW7E2EhWYbwh2inV6IVfBq6qdV0iztsxqqEvA28vCYMiv8/8v3EOE8lainyd1bta+CVhQLG/LkZATTOfCbRyw+4Uk9FVPLTmZxlofSEIiRI2Wkg6pw/QnD1Zq9tI5bJp+Idcfezf2Tz0AzwrzMOrf5bTjZehpUXUn0Kg8LGNqgWBkhZumXrdQon6wqgliik6bJVj00i/ueY8WeIW4XzUflXupaXFfo08O9NDmvz0cNmJ5RepPKF/1GUy4SwCqgmzC7xv3id1XdC8zqdxpU/r5kPY4c10J0YbbG4Zb77sr9NNmx907eiPmtTUlUgrTOvGLJ3bs3TueCtdO/8Is9jYTPp0iNHDYcaOWAw9Vs/kyTqUupAMBcvJa4GoowpyU+4gl2Yfo9UbrTXx1zwGdCZTs0k3L4iePbtz17TrH71tk2544wZNV/S8FuM2S0sael48ayqjZeBPMNlRd2PSBnwAgO7ECQaSpZw/47PRo8tEb1//1Ri95C8yN34O8N3evv4dO6/fvY3KiM4Spd/fkEyxLxHnb6s+f9UCt3QwE1DNqNKiHWEvJtX4k1UfX8p8P/fTiTiaNLdjiq1EMbPStz3sqzAubxSv+8dEnK8lUzwJ/DtlqyzLz3dcefK/ZF3BVBg72bTTlLoUYM/WGlCsST/GdDiOZ1WqSNeP7qUz/RS6GMGsheRQR4Lm3KCpmiZCODqHrfMo4XC05Wrq3ElaM0dLxQlmsfCRfhZfhpDFQkDnYvldsp6oOC7CpKxe8vnFW1R7wJTurqgqikNuXDIXOFbdPZkPt9w06jQXVkw/VZmOY+E+tAG/B3I3KJO6tH6FJhQTmEVNBmPkfDkDySeBbXF/dD/GZhLC7BBngCmEDAE/pJSl1XcFM6O6TCwJsqNhsuO3MvzoezH2rDqMc8OngSfZ1fNMpo85iNb68uxjIqaMFqnFm+DR6Ham7NqB2jX4FeCPL7gnA8kd13dyo6tQCqxXr4MvHNC6oJawhZ5jBqMCf1r46RUI3Ylp4N0LfKaAVM3PzoPevv5WTGDOboy+tHzFX0KEI9HvuIVCs+2YVewCK44I8IfJFJ9NxJkoO34X8410pYlkkpLOtoyiMPgU8FbMRH1nMsVrygzXXwd+o6yvPsbnHroTAwwkb8bYN8aA/0QIS2r/7xRyJUL8S+Jg7/c4yHsSYN+z/t2fzTlNv45R0eXWjD/QYoQBgGb15MOMxtbllBWJWF6GUh2BlqnDXJUd5uHVb6Rg19E8c5IVk/tozhrXUS1shPZoyJu0FfX5EVO8RjgoYTMZXcGh9sRsrea2qaf05UPfEaXBmdvOaFOEB42zvE4hJRfdks5X6FrBZLWPlY7Dgu+aXmiXXn1+qR23zh3f5LdutrKFlBcpjNkKS9l4vwH8DfPdh13gS+zqeZTH/vty7MgjSKcRJ+IX+ztDKVWMUq8gN/b3+K4g1BAiXK/Q2i6eV4cQNmbXvR5M8kGy49Tc5aA6MN/N0mc/h1FtnmFv7yvY1fNMBbf1Otq9vk5lXmMS3gmk9mlSk0xVrbEWKLoDLM8XajjLTQ0OTb42u3ZbQIODGF0sBeJ8Xt+zZ3etyo/Lwk+vQHgG6O3r34nxpngPCycXA6CpLc7azZesWkAIlCOLPxVGpUScbyVT/BnwQYzto9SQhUl5/VAyxSsScX5cdtkO4C1t04frOtP7nZzd8KqTLVfdlkzVv7Zob/gdjLrnTZho1vcn4jwye3V34jAUI7cHktFEOnk7JqpcZQjl81bdX4b8mTaBVi84/h+FqfarKNj1YiCyQYa9zLwHvWzoOxGpfeaympp6y46fpX36IBPRVVw6eDtSK0Qp8EwAQjARXc0lg98BrYwtU7so4XCi5epZYQAw1rBJ5kfqVFhl5q3SJQpPhJC6OszivPDMA5DBTJitZfcT1bO/NtbZgkApUUpmZ8hgahhcUvx9ti70QjEUi5AFYUk7LBv0jKVb13HCvi47EN30wUScf2Jv7wZMwjwbIwi+jcl++nkOfLuJQubHpAebEAIiTZrGNfcixCWAQOssk8duJZ+OgRZkhkHYxWjq4tOG6hwizR/Aif4PwhbMjMDM6EJG9VqP5WB2Ir+LERBPO4k4WQZ+/DqFeEqiVnvaioBgRlamj6plU9BaI4QYxdRvv2DuHGT9q9YinOISIOvB5Ll9VTXGrve0EQiEGvT29f8mJlJzSbq61o4uVm3YPG9HUP4lE5VRoV9IxOcyY5ZIxPn9ZIo7MC9xucAQmC36x5jzAgJwOiefiG4d/r4DCi0sGvIjr3pk9Rs2AIcTcVyMHeH3l/AYv0MxrYgGK+ZOvA6Vw5Vh7aj8jFSF1uahe9HhJqeuNR16qvHyic70k82loB5j6VT4MoTwc0X3UB+0QGqN0oLmmROYOhUOKAtb55kOtzMRW83xlmu4fvJRPBFCCI2lXIbrNjJRTJUhtML2s8Y+IYVcSItfHt280JJ9idgYodBE1XxXclotNyUI4EjbLm/NxIN3hfzsFRrSOVn/nYiafqUwAr3UHaHLDPLn0M8scIhQ/QrRtMax8PMKUbfKPX1o3ZpNpQnrtzDeNK1Akq6dY2j9IqZOfoKxI79IqSaE1pCbsnHGJ4i1/joQJz/VTz79YMVqX5eb3DQUpgWF6X8vPUfleFSyyDNJzjWW4zwp6v4/RkNiRVdh8BPb8k+9wcFbdTi0YfOo1ba4/VRrPM/b64RCLyv3WLoQTmbE6duP6+mdbdS5PvK+EfDOrVTDOMu0W1mIQCBU0dvXfxNm4l20TFQ5nWvWLaQeKkcDdxfbXkzKp1j4hal+905uGL07XErMJrRPY27QihbGHbMQOycq614XM5za2hdFH3hAIgppYplBmWu/qumh1W/U3am9BS0sQn4mHHHTaAQKWdSpqVnBsGpqH1F3Cqk9PKUQCFy7jkdXvW62/vFEdDXN2ROgQUmHkUZTZS3iTnL56T4ibhpXRrD9LAtRvtBb6qtWy2OniMJ4ddSVnSbyVj25ULNuyp4S5iRLZ5o3u3XRmHUq+pL/ahv5yVEt5Luln/8VFHbV5PgdDTdqZFiWBen5yP0WKsvCrtgj7OrZyUDy4yj/3SJ9psHKjkZR/iUMPXQEk7tHA18B3kHXTk0h81/MjL6G/GS4vHJaUYSL06FVLz/Y8MKtwGsSZ3oPY1STiyW9Ky1MLoRpKvMOXTDJFDZgl7tzl9nW1gH6TGjFi86EVrwmEeeukyl+FeilctGltNYyPTmeT50+mfNcl9xM5m09e3YvizAo8plTGfGmUxl2UCvD7eL4wM8/3ZXfAoEwn/Wcw+JSCEHJZrAYWuucEKInEefBxc5LxHk8meIe4Iaqj6aBPyo/IJX7LruqBovGynamn3oKruMcqax9bYehvhMxfaZ4oOivjsLTCE/YKh1dyb41bwgBum36iN525rtCKA+FhRDgiZAGsJRLrDABaKERWNpnKtLJofbdKDk3X+7vfBlrxx8g4k4y0rAZoRWXn/o6scIYIT+LLxwcP0et7YFG8ujKW1kx+QRtmQEQJjXGuVL1hy9gVuYx5nL/M9R4iT4avx6pvJmVE48Mrc88ub7eHXfqCo6yZkY+7rvjdUL5UlIzZcUawPFkWFvK1RJfnmrckbXw/mrl1OMLqSY00MpA8qN4+ROMHlJorzShhDE7kBJvBZ4g9VQK330D2q/5PfaFQ8PkQfvm1L2bfOk84Qv7XZb23gr0cW6VDs+FM8DV7Oo5zd7eFYBiV88F1SIoupX+ExAtumC/v2hD+xiV49KMWfTclYjzj8kUDwCvwQiMESHEgaP7H1s7NT72y5id4e/27Nl99EL6Vk3Pnt2TvX39N2MyC1QHlJ6Nr/bs2f315exPLQKBMJ+7MKvCJbFi3QbKpxAhRE19pBAieTZhUMZLMIm6XoYx+P498OVS+t4SSjpiKtzlN+YGLYkSGoHU7q+t33rd+fizrZt3pK4dFWlhbCbrtWZP2EIrEDb7m65FC1lu0y1kwm1hzwoT8mfwpKNzVp0Im2IwiOK+YQ7NiearmA53mI+L+DKEJ0NEvClWTDxKY34ItMbSBeO7a+oZ4GNhlwWba+BU42V0pA/SlhkwsQrnMQIelrLxZdEhNAciZPpugvBE0XawZvwBlbWbHmzJnfi/nekDn5uNsyikfSGtMEotJAwAtghwHJUTGokbblVjrduPXH70P2ultgaKVYisSBT4bQppC+2dLajpF/Fy+aK77vw4B8BTmgZl4hxsVRCYIjv3U7sQznJxNzDI3t7/j5INYW/vx9nV80eLXrUAyRT1mH5LzDv7Zoy69etAT41Lxkv/SMS5jypX3MRN2+nt6/8YoJ+uaOCePbvd3r7+vVSpDZfAbzwd/anmpzcw7Wmkt69/B8ZNrpMqoRkKR9h6xTUIWSzZqFUxkWalV1HVuPpCiG3LXaAnmWKV4830rxu7d2XEm7anw/EvrN+66xfOfmUVA0kL423iUAp68l2YOApejrzdqI837SCi8qIQbWPCaadr/CFaZk6QCbe5J5uvfNeqiUf+rTVzNKKEjYXHdKiNdKQLx88ifZf2mcMVt/RkiCNtNzLSsAXPMgvSrqkn2DT0w6Lw0Ag0vghV1EQAs7otz4Y6p9OnSvAszOmGS7USFqumHhMCmIx08djK12Q3pO7+fENu6I0Rd6q1bIfxFQv/VRgX3fm1v4VV3FhoiLVnVWZYCHR1KoJS4MGUMJqtZkL1gub1GmlpMXrkH3DT76txDUCBpnUeynNxp8PkJs+W7z2NkGlgxZw8m21PLPDGK1HbXXpJ1DAqL8RBjIOGWzzNAa5nV8/+c71nMsVKYD9zhv8Q8JFEnH9Ophhkfu6nhkS8ZkzNM0pvX38zJn7pbHrd0rDe3rNn92ue1k4VCXYItclhKkDNi1DcvOPKWWEAzAoDmPVKKB6ffe+MS5+pzvSzy9nJRJxTyVTsBYc6XnQdcCYR56HzbEpgVFIxzErY0dODJr8NgpCXFh0zR6lrjIN/hu6JByE7itaiEHPHsx3Th96Ts+rSQqsIUqAVhLwME9FVeqxuvWjMDtKSPY6ti3WIEQit2DySZOPoXaRD7UQKE4RVZt5EUi0MgHmpsc01SxMEGlDCoSV3Sty/9h0ci1+PpUyaDYRwTjVf8e0rT/zXryhhK1/aCO2J6VC71ZI75QJ1NewNAu1rhFQIyyfW9m6ZGfpTbSp0CUBn7UZ1oP3F05cPfasRpSI2Xk4gBE6dRlrm+xGpfxHuvGSWArNDjDF5rFa0+EI0oFUDQhbVfA6YsV9wnhbnF6Q6q2E7BymyGbOaL/ks29SySxz49vuYSf0ZfkEinU9y5bs+UqOtQeBOTOU6gXlvr0mm+BDm+1yuATyMSR3jY9RK/7H0Li8vPXt2TxQzmf76Iqc9iKn//ig8czVcAoFQRW9ffwdm61zTeCat+UNmaY8WfxyFxbjdWkrVUOkmX6PWQAUDyTgmzmEYuGup+ZYSccYx7oXnT3fCYyD5/wEfxUwMGq8gjIXWdKPeHUPQikSDN2PcEbWeKrpYviTkzxSEyUiHxEN6HpcNflPk7QZ+svZd+lB7IrcxdWfUVnl84WDrAgrLeB/lTi3YtdoO/6JCIJwLvgwjtCLkZQj5GXJOE541u3PPa4SH1trSeeFja6QgE44XQv7MR2PueCkPTXXfBFo9glZ/TX4yBmwquV1qBKn6zd5E/bqZVLo71Dn9VFRDWKCZTTinfE1uspEyO0UZi1c2M5TXM5hFawFNq6GQQWTnwnJKD3Aebq/VnO+lpQh8MGVgH6n4dCC5g6lTn8DLll62D3PfP+/jBb/8pfLTioWd3gi8E5Ni5d0Yt9sSHiYWYhj407Ljn02muDMR58R59n85+BNMXEb537f0J+kDfubpzmxai0AgzOcG5jwA5s1HhVyWcHTOQcDWHldmHyak8gggZbfxVGRbuVAAs3z9BMx6P1CRl2gguQrjedSEmZD/kXM3Ol0o12IMqD7g4NSFcDMOFPUIXh6Znts1YCbleswE5gn0hMKylOWEpO/PGn4jXpodp29Tj6x5Y2QitoYrT/4XYa+0a1dIvfjKXgsbr5jLCDBuqWioum4hLwAfaYRY8YxyI3y0MKE8GZarxx8k5E0zFVl5euvI9/+41JSkIFxl6dbMkZ892Xz1q9qnDx5pzp3aWN5+8b4HMXmCvsDUqaqVthYtImsBomBFhcLSEn8SCJGbiNGwUjB60MfPr6p+hKLy/2yTrsKstisMwbr49KRPgfJrjs8FCIILRWLcukeAPnb1VKaiV/5GIwyECe9QvkCrFwGzAiGZYjuQxKhdUpjvbvV8ZmNsFeVxIbp4fAdwgoGkwKSGeQcm+PN36E6cXr5HrU3Pnt3DvX39WzExQpuAq4EVwJd79uxeipv400IgEIoUE9WVxqO0Sqt4Z4QQeK5LuEyD2+qNEVL5osYb2r0UR3WevIgAPI6pPvXpRJzvFT0iPglEkik+nojz/4rNvAmjV80W7/1rDCT/hO7EkrKiLhNbMKtMo9uV4rjC2qaFLHoGuZAZmdv3hOs9/MJBvJwLbNSIJoGypXKnBLqiTnBj/owFxi3W9vMUCGHjFxPVnaUshVZIIXBlBKEUngxj6zyeCGMpF4HGw8bGXSRUWOJJsyB1VK74txKsHbvvqER11+dGhMRjRfrJzZg2VFFaSwdfOF7a2pxKNj7R8TKvxm4mixEIr6Cm2kXoUDhW6Jh6sqkz/ZQj8XVRqa9R3inyUz+Hn69IRaDhXldGdjgqF13CKl5idgalbJ6V77R6ThYHFMB+dvXUTsEgrXuxQh5ewQGtEUKhvburzurDCAON2X0vNETNNe6t6/30Q8Uwoz3A/y0e347xMryRZ4CePbtTGIeR5ww/zcntlo3evv6PYHKnT2B0/TX9Fesam4jVV8aqKSFnv4lz24HZYf3jRJw3FYVBHPgMc5lDf7uY0A7MRFx692Xx/k9rAEoNbgMsDY0aQlo692ohXS3krJbYF5aJTxACrdQM8a03Aa/zhVUQ6JhAS61Fo65Kb+paUSLuJDtO34alXRxc9AI6//ILNQIlnNmgNy2ldvwslvKKQXA249E17N3wSzpnN6BrfJ1L+7RMOE7OaUQJG4HCwqcxN7imPjcs5PzyGDWjSlZP7AsrYVe4Swmzyl2wrKawI35o+vSPLxn6bjikslZRV9+K+R6sYOLoP9e46hqh/bN5oMzaqBTS94W1R59bjqVnm4UNqt2J00SaX40dOoqQowjrL1D+F6rOqpU48qxYqsCOzMP6yswDB9WR5D5MBTgHY3/IAZcXa0hflFy0D16it6//VuDPyg5tXehcrdS8ALRRq5UJq4lmfwIQHAutw5WzXoHlX/pSta1SVJXApM4GU9zjrcBlxd9/81mo6PYRH9El4O0KIXSs412uL/pEfuKmnNPU4mPppvyg5SG0RLtTdvOPHmlIhBPdiRNTD31ltCE3FFfS0aBlARtbe1jFojmPd72aDam7CHnZYtCaWlLkXVE1pE2dAo3QFDTYHiF5pvEyr2DX2WeaLhPKcjjQ8RIuG7x9nirJk0Zb0Jg7w+Ndr+KyM98qbnIkoNUCtoiaE0zYnz6kjRHzvRihUcD8fsuCo+plbTGd3U0t7yRzbEONMdC2ds82yRVtFAigQS4Qb/AcRWO8bBZm+5u/R2UcQTVnmEvEuLSbas36wjEaVFrmPSK25FJboqTAxbiASuDR51vN9XPhohcIwF8v9cRMeorszDTRurldghaSxyKXEdNZfCSFuayeWUylphKHMKqFzZgvXvry030THBz4IdAE4m/p2vEQMHo+Osxkij/HZHzVwIcScf5hkXM/jAmOEcA/Az2J7oRbOHrPzbZ20UIqoZUoNHW/rHndlfX3prgxlh+9dsfpr79GaH/nSP3mlkPtN78aOJFMceM2p/Hx+kJqKygh0ExHVvLYqtcilIuWDpYqUJ9PIfDRwpo3aZcor6ZWwqZiYgwbn5kCYZXTKx3Expl7yBMS99ddw94Nv8j61I91V3q/1iCENp6Uolh1LR1dwXh0Dc0zJ1DCxtKFsEKOS/wmiZLMOexriruEcnFh+/ntWojLCzJ2t2dHVmecVrspe+rmkMq2iUozRilfUelBz8m3X6DP6b2sLksKlR4Npd+XFWGVjOJLDuIs4z/o3PFHDCRfgRmjj9OdWFL1tWQKC2PvOoMRpud077DKgzblVn1Te6klbPG7GMP0UZ5bVeCecS76OITevv5a/soL0rlmne5cvW5h973KHUQG+AbwnmJFp1apCh8IezMrHS/z91ed+sptmPJ4RqcMr2JXz73n+gzJFG+FeW50V1Qksps79xVUeiVp4PWJOLelj91/OKqyGxRCSbQcs1v1E9HLSufcjcl1lMPsdEoTwT1SuX+/duy+z7dlBsRMqJVD7Tfj2nVEChO0Tx9i1eQjhLzMop5BWbuR003b2TharSqeo9I7RkLX9tnPxmWT2ld35ewqXGjF1qHvEZ8+bFJrx29mqOlSWjLHuPT0t7CZc2edDHeO+la0OVYY1Y43c9DG36aLtxEgPexBoMtEVWshgCNt189sSP04Zum8qBYiZWNTEgjVWsWzslS//lojOtcHKZbqjnvO2HUeXuZcBNc0JhvrP9K185epiroHbqU7sWgZ3WQKiUnDfgvGUHwurrhorYl7KbblnkL5HkrD6Qz/s/GKxMKV1C4yLuodQm9f/1s5SxbTahpb2hZ9qctjETBf2LcAb06meMO60XvWrZx87P9YqiBzTsNbtAkGKrnc2JiMmIsKhKLx26tySasOaCod++Uaxz9U9bsAPp5M0RMPrfvXDfnDfxLSrszLMEfCG8onspswnlLVz+/U5VN/3ZAfxrWiOmfVs+P014USNtHCBFIVKlb9tZaTCsm+VT+Da0XoHrsXWUyqJkofVwVMGRegyoKYYZ2fEwbKQwvJ/s5bcOJZlLTxi2q88bp1DDduZuXU47PXNuWH2u5b8zY9E2ln3ei969aP3QNlGUkHGy+/v2t6/60aIaTWWmhfbhi5M2pXltwsqXAAxoQxZpY0Y3nmgrBKi/dFv0cLfFguaBa3/4UaoWU9pM94zAyf53tevc8oY+nCoIB5bhsTbRuldrGnF2MM44txJfByzHgWMLabJe++hBCk7DiPRyzqC6McPHbaOzChu3uuWGoLz38uaoFApe1gSRTyeaJ1S8/vVdyBCeDfO9IHLKkKQglbRbzpmMKasfBLqa5dalS16u3rj2Gqoe3GvAAhwO/t6/9j4E8ty75p0/Yr3HA0Vr07WZdM8YeYIjflKS9qOf2vB9annPYbRu0219Fu2BUOWlTOOc0zJ27YMvz9cSB+umk7J5uvUqsmHv6njam7Pl2KEG7JnsDHRgg9W6ymnPkxBTBat5GCXceOU7dh0t4ZHX/ObshFvbQtUNa8NrRQFGYkIeMCPOJ0gtZsTP2IFVNP4AubAx0vZrR+vprZUvPNM47Ka4DjzVdHVo8/6Nu6YAOMRVZ7R9teeGNr9rgIe1NY2iulvl5wQlaIqGVShZb8XR1gFOMNFK0xDBjZsagfQa7YlotJwbCOSrfUSiHjTgsmTmjyE3Z1Ztals4hAWDoh5vocwlRv+1vgpWXnaOD759F2GuOu/TrOYfc1brcQ0nluWhu1X9CZv5yB5E10J+48j/s/77jYvYw0S0vFqwGklIQjS08/UlLHSa1Y4Z6OhlU2NBs9qjXpSMcA8FXge8Cb2NXzeI1mvgK8GjOZlPypLeAPhJR9G7df8cNwNJYov1+RWzCxDPuTKV5bdvyPMfrXkpqq/MfWQoYKMqyrhYHQPpcPfrM75k46YTed7h69J3fZ4Dd/dXPqRxur00VYeAjtmzGrmlBK2VArUXSP/pi6/ChKOLgyipY2h9tucASqVKClrA3Q2pWMHUYrn6POOnU03K3b0wfomnwCX0uE9tk2dAeWPz8t1Vxhn+LdhUWryMmomhEr0k9MIoTlYymA1txJ+9Iz32rat+LVYwWrThfN4QunWzXPHAM7jJA2wnIw71kjCyWNs8LQ0Hm2GdvCBFfdiJlgq3cZc/8W0thp8uNF08aShYGaa6pc43W+SFXslsIsPm2MMfkfMO7XOUzKlD+iO/GtJTT4EGYXUVoY/RAT3LWv2E45GiMw5kn/ej/N5vxhwrpAUwhbw1cYSC7s9XQRcbHvED6C8fA5m2BUgKxvahFOqPY7rXxfS8uav0rRmktzT9DsT0KkGXtmRPhao4Wlc07zn3Hlm75Uo7lyrmP+y68B2dwavzUSXTSLro0JdvtaMsUfYdQ+LcBfYozbdRhjWvnzjzT4U/E1heMUvaZUxqqfiRYmvmipwtuBaQsFWoXaM0dCwFDRb79iDEsr13K7QdZq5EzTpawZfxCNMsnqhEXrzPHZYDNVJojiM0et2dZqrlQVTzndaijSLWOFMTaPJIupLgQuIQQ+tsrjW5V/M0flcEUIS3ueRFki1KhXeYNihT/kMb7/y1K5Py/Rs65irdkT8toTX2qZDrV6jsratvIzwqz0Z/8YlQiQElTFin/hDKLKhekz0kzkehxTRa2ByjF1CDUUKKQ/hvm7Lbwi1qpWxDMIy0X7i6lYivdb4q7ACkMNgTuHkjgxjbCgMB0B/TDwq8Uo/PcXf5ZMIo5Kpvg5TBCXBdyXiOMnU7wf+P+AbZjd2BiQSMR5Ipni9zBxBoBRG8VU1sSiFBdm4EcwtczHz6U/z0cuaoHQs2f3l3v7+tMYw28toZDD5Ij/eeAbyrzgwvc9LMsmqnJcnn2MsM4zXLDHH49cdsCJ1a/GBKP9lfL97znCp8WfwNXghzuIOdFswVePThH7WNflt/zvErr5OPNTYQtASctaqg+2pNKIdzVzu4KK6yMqO7RjZl+TQNkC7bZ7IyMCruDMIxMYHe42jL974XD8Jn+kfnPu2qOf2yfxr6h144IVA21yEo00bCEXauaxFbfSUEjRNfUEYS8N6Fk1jqNyKOkw1HgJGacV0k+w0ASlEaTCqyTAutF7i+mujfCwdYGJ6BrydgNoTcf0AepzQ6QjXQzVb2PV1D5Xau0irBixlrSFn7G1bxNpOKmnp7NUVbST2hMxd8IWyp8UC9S3Lu9ZpcboLOiSmYQ86DxG2AxiJqmywUz/X8w7W704qP4OlKq9VdK42mby2Nn7sxSEDfGt4BVgfADUAoLBdyG+IYO0AD5Id+Lohdy2mNp6VrWaTPEaTARz+cqoHXgkmeIo8DmMgJhNEZG2G6EA1lzywiHO5gZ7kXCxq4zo2bP7W8BK4IGqjxTmy3Rz8ZwPTk9O5KbGR5UQ0gOmr8w+WKjTGWw8vSqUa7lFPfCjF7WLNYk4L0/EuePIE/t+cvrkSVxfg+9jS5GXkcZsrK7540sUBmCM0ndjtr+HgXswAuye8dTwuVVjraTCUFuiwZtYb+dSeTs3PmNp5QgzNr/Nrh4f+FWMkIy6womNRdf15p2Gv7pz0/s6FHJfzZtohaULSO2xcvIRNg8nac8c4mTLlUyH2kz0sprz+BFopJR0hCQqGmc0tn42sngssgZd1mWNnDVAl4SBa0VQWBTsev3YyltBCFZPPMTm4R+ycvIxtgz/gJxdz4H2F3nHW6+N+i2b8MMtjS72Cg0h6jtmROvGRmFHZgdIFJ/EVnnfRkUoLqQWm+71WVJy1LhiCrSD8XhrAP4GO5Kp+hOVjNIVQ1yjMYdqg4SwoDAzr7vVOsOldBQsiLUZt9Pc+MLCAEC5gvTpUuT65qXd4pz4KLXTSNuYlBB/gvnOznYyJ6M8Gt1Oyo5TEKFvCLiF7kS1yumi5KJ3Oy2nt6//r4G3Y2rgvrpnz+7hqs8bhZQNl197g1Ovs+ramfsGyj4WwIN0J64pO18Af7OxUd/6stV0hSRKCL4EvI/uxAVFIhdTdH9921XXrg2FIxKYV4thCVXcKpDK5cqTX9H1BVMnV4TqoHm9RohJ4CbOPPL/gFfpMo+aB9a8TWci7W60MN77gmOf3yjgtWZyke5w/RbZnDtlW8oTUhWEb0UQ2lRR27v+59l56is05EdquKMKvJbNCFXgSLibGbsJAYS8aTaN3InQpbQXmpzVgEQzGltLe7EwjtCK/Z0vmzUoX3niy8QK4yhhI32XmXAL+1b9zJRvhRvb3BSb8weRKE46qzPdhYFBAZvQGmZG0NNDZTOlmjXiLuWtOUfn/Go1jwKeJNJyhNz4+aQ+LnVxQa+mhZ5hyf0WNhj14WIngR2B+BYNvJ7uxG1LbX4pJFOz2U4XswWW8j3VMyc8NPA7ifjS45AuBgKBcL4MJCUwReVW9TN0J35xkfMduhNnX9UPJNdjVDOP051YMCNjb19/ZOuVL/hgOBL9IyGEs0BhnrPersSq8YfoHv0xUhfX5EJA68YJnKgHvJMzj3wNiJbfZdpu44H1bwchTgO7Egd7R4FLn+h8ecdw47bPAaHV4w/a60fvCaO1lHj4IsyTXbdwydD3UFoQ0vNttFrYCK1QCCQKT4bI2/VIP6+j/nSNyU0w0LqLbKiZmVCrmgm3KYrGzEsGvy1bZ46itMTROUDiWhH9xIpXicnoKiw/T31+hIIdc1+Q33dCFqOHNYCbw81nVN7zVX1u0Ial+94sNvJLjDOYwixOLl3C7WrdotRVTQ1twFkEgsaoni68YE6oHlo3KuBv6E78TsVnJk3Ex4DXYlStv0p3YnKpTRfTv9yGsY0tpPHwgFsxwmA/xqliqiLBZAAQCIQLYyD5euDzmJfmKeAGuhPzktqfY5sv9hVf8zQRAWo4y1u+OiC+gdErZ6tT4n7jh8n6rZ31d7RG5RVTTkv4eGgtWsjZLKJaLq2e+aqJh9kwcmdFeUqBgPiWaezICL57LSNPnNQ1jKMFEWFv9y9MYtm3JeK8u3T85BM/fF9b5ujvW9rF9rNhUZZobCrcqWPuhDACoXYOv/I0154IQTHJXcjPYJUVgC/NesN1m3hy5asJqxwb84d1SOULg85KxmVD+JIz36ExO4hA4ckIQvvknQYeWfV6dp76GmHP1Fhxwy1fiDZ1vrPUtgJ+XHeDv3Xoe248cyQsziFFxLkEky1wvovJr9U+/6MlsWi8wyL90BgVy1Jc6s4aUwFA62YIxf6T7sTbZo+Zwkx3Ay8sO/ME3Ym1S7jvLMkU7RghPgm8CvhN5uwvGmOHuyYRX3olxIuVQCBcKAPJRozB6sT5qoF6+/pfiIk0XvGzGzRtYaKuBkfCmRmytx0VJzG1eCeAr2GC2R4CvppYoX+ytZmdAJZtiUFnJQVhs65gNhbHQms5GV4L5mWJUNvbRe068mkR8mcqXmwBitZNnyRU9wm6E0fde/9xr63yNYs1D9dtVk+ufNV3E3FeCcDe3jbMixjGzKut5d80jdSp2DrdMnNS2rjFnUDxjJLbZBW+cFDCwlGVAqR8xX6s5QW6LeqIaHYEK33CB6xjLS/gaPNVbB76Pp3TB/CFg9A+vnT06ead/tqxn9gKC4HSEq2PrHz559vU1Ls94XAovAnXChOfPqS2Dt0hpfYQurok6HxqzJCzwWTnoaopn3R12f9LAQY17UFLYYk7lQvHikD71iTdiRfNHhtIXo0JxKxetbTQnZi4kNslU7wQk/r6DPAPiTgjF9LexcJF7WW0LHQnpjBb+3Omt6+/HbPdvb50zNMm2ghd3LMroswZ47qAXytrYro1YlZxvkbj+TR6g9TZppg9wIbCQKZRpV/1RPSyuzC+3+8tu15jVmjvcFT+HkzJ0KK7KBooMHbo79nVcxTgwTVv/ta2M9+5piE/bFdPHI25MwJTzxb29oYw+e5bMD77eUxUdtmWXonW7EkxUbeasega1o3fj+Nniwr6BTKhaoXQejZBXi3Wjd8nfLkNa3IAtLYA1ozew2h4BdHCBJYqYFFAIzjZfAUaLKG1lvhaal9Igdzonnj76dBKDoc3zSajSNVvkhpJ88wJZkItdI/unSeYzkJp8j4fRw6BUXt8EbP4eDVzk+jZ2lt0BV/jg4XOX9pOYCH8HGi9uvroAm2e1/tUTiLOvZwl6j9gPhe9l9GzzPcoEwYA9wyBr0wWGk/BT4YXuNJQPzSDBQgpTBrgmbw3JJWftrU34uANSnSuwxshEUcl4vwaxvf7AUw+o6sScW5KxDkmtf8+AdNlS00F/C9wkL29jeztldlQy+NPtb94vgIfmAk1+8A32dsbwewM3otRpTViBIPxL51DKWHppuwgrl3H6Iqb8Fu3Gm+YGqhiTQMtbTwrisJacGsrxo+CKi6chUQhWTt2P035wbKzNE25QZFxWoWlPWHrgpT4QkWalRbC7XKHqFOZinZH6zdwuGM3BbseXzhF7yeJX8OeuUDnRFERd66U7ADvBF6DiXpe6tZ+KbcrteWzcDRaGpOb6zwRIES1J98jmFKR5TxId+JpSsAUcDYCldGzRG9ffwNGjTPvhY3ZmuYQjOchexaVtS20vrELsaoOhrLw4yF42ybO1DmzqqHjGNvG2V/mvb0rgOs0bD3VtLN5LLZ23eWDt79WoCICphTiRXmr7raIPz1PxzvUft03u0bu2YOZrMqjPn2MT/1tzOVcEgC+sGc0IjYdW63rIjFhjR9ElLmgljMV7iQd6aQz/RQUjd6Wrp0hfE79JFDCwpMhZpxWmnKnZ1U9GkHebiDjtNKSPQ5CILVCOzH8tq05EJFjukmvGHtICDRH2q4n1bCZaH6Ma078J2iNKmpqLEyN54VDh4u3XNDTR1T8qwZnNRAvE3nMTqQ6aVzJwGyd970jLY/QvPbFdCcqw8QHkmHgXzCxNvdjPPAC9c6zRCAQniWKtZuHlqGpeWqIqKWP//Kl/FXx1/+kOzF6Lg0mU7wb+JsXDnymOeKlTY1lEJ4MT9gqH64VpSvil4wxfvjT+IXfrdG/P8PsFkqflVw3XRbxYtFYpCMdnGrazkjDFgBWjz9EfT7FZLjD3zx6p1V5vpnoS7YIjWQ8upqB+PXU54fZNPKjWWO0wmK4fjPtmcMVgkXbdfjxbUxrR9ePPCzKvTcfWPsWtgzdQUv2ZNk9F67vfDZ1zOxV0VbIzs2TYva/unRahgVqfD9H0RhVYQH4MXb0H4hveQum7vHdGE+i2pI/4FklsCE8e0xgVl0X8jcwev4qb5CsL87QvfuTF9DuGwDp+Nni5CVEMZq4WWFriTdrD5h1UcwMR1De26s7B4iM0/pb49HV962e2pcRpq8CSAvjwrqAXl3wZOfLGGmsrFd0onU2zMNqyI/QNb0fAB+TjM/W+dnrlbCYjK4k6k7iizAH4glWpPdjqQLpSIeKZwZk9S7jVPPljEc3Q3Zi6hJ0kxLmzyO0T7QwoRtzQ4KyPi+W0rsG82VEw0pID1Yc0lC0XRhfMan9+hoXX5hO/+llEhMQlgQeIr7lq5hSlRamVOtbGEi20Z1YNCfUM01vX/+NGLXcMPCJnj27x85yyfOOwIbwLNGzZ3eBCzN6KeDrmOLgh8uOj2K+1BfCUwDj0bXFVVxJnaGQ+IL53xuB8kCrGap00J4IEXanIqumHr1ZI6LMZXX9CyAkjLmkAo1kMtI1uytYsJNdt/Dw6jfyRNcr+Un3z/Hoyj1F3b7EEw6gWTP+AJee+TaXDn2b9eM/Yd+q1/Lg2rfQOnNClie+8zGqpZP1l1JwXS45890mW+WNi6vKI9BsHOnXQvt5YQzkVZHM1QOyMHNRwQLcLNQQKirapv2mdZgiP3PXVTWzVJ5pNUATcBW7eh5kV4/GuJWW7+aimAy+zxl6+/qvxKg13wr8OnBbb1//RTc/XnQP/BzjXs7vZc1iDL79wDd69uzehHnh2oDOnj27D5xXb/b2rmJv75tfcPTz3wG+/+SKV04WrGiVK42e/W9FugPf/XPgrzDGxxxQ8LF9JW1tFSup+cLxMTuavRidcY6q59cIJiIreGzlHhACoX1WTjxCd+ouGrNVheSEYDK6ipGGLRTsOqZiK3lozZuYDsextIfUPnbZDiDqTdE5tR+hfRyVxZe29oVTbEpwOH4zeadRXzL0v7q8gI7Unm+rPHXuuLRQYeZ2dbNDcDYBUXFy+ZHcRM1zfbseP31GL7IDOZd3d7l2Ej7zjcAL3e+N7O0t5Q+qlcV3zTL1abm4GdPvacz38gqgtbevv7O3r//VvX39lwH09vV39fb1X9Pb1//TpMJbMoHK6NnlLzG+0q2cxQ19ZQzqHTiVgYwnosArgVcA7+jt69/Vs2e3iwnPP2eSKXauHb33q+sRGwVax9zxbOJg70eB/6PhXoUVLu4MFugdeeHNfIa2zd1Mnojh5R2E0EIrz1KuLVAl3X4p2OkdmG15AeNi2E5Zps2G/AitmaMMN25j69AdtGUGEFqxcvJxHl35GqaiKyvuL7RP2MtQsKL4wiYd6SJWGAetsagsjyvw0dJmLLaWlpkTAsAVYfatei3T0RUAOH6u5HqrMMqbWq5PJevbYrHgs2qdhaW+nqczO1O/rdCWOWaH/GzNSb+8reWY6Wv0rbzOggJc7DqbptWQG19DZvhs6iqFSQ7YhPlOvgkYoXK++fQydH05Ke2yYxi7Vgrj5v1tzHe2rrev/xFMzXUNjPT29d/Ss2f38Wejs08XwQ7hWaRnz+4hTKGTn8PUWy7lua/gBe1w6zrYvRLevAmaQxWn7KR2BaolkUzRFHanvrdu/L6NYm6xG9MmNfiERtp6ttTwfDSCVKz74+zqOU1u4m/xCzZCKgAphMjZDU9lrfppQEnt1mkzUUSA1cXnbaPse2g05wVWjz3IzhNfoT19AKVASRupXDqm9lfc3/EyXHXiy1xz9AvsOvxprj7xn3ROPYml3Xn6/YKMcqbRZIHY3/lyjrW+gEyolVyoidbsSYSJf8gM12/2zLMhF5rIlRFmpcm+FJBY7r7pz9tFLUD1SxjyMhnHz5baK13uCiG1FpYud809j+2lpkytt7B77CwKCNPYJclNSDLD7ZxdDkmMx9IxgGKQ2QrgLow6sofuxN+fe9efVm7H9K2Bue/oNzDJHeMYNdd1GA+6Rsz3t+dZ6enTSLBDeJbp2bM7A3yht69/P6YYTgdzhXAAzZVxsxxVQEjC1ma4dy4+oZRm4HzZEPamG+be8VnvFsWunqGRR7+V7Jg++Oral9r4TevJO/G3HX/yrr9bq3zbbAWERoPQvlXnjq2jzCupSN48o5imaU2cyROUT00SqHdHEEVtT4j87JTbld7PeN1a2qePYKkCCkksP1ZMdgco8GQYpS0kPj42k9EVjNet43TTdrQ0KiIlTYnPmDsBwJrxB9AITrReow51vtjyZFi1Zw5rAZMxd3w2dTKmp96THS/Vlw7fUTpkFZfMmmLKdIUIC3S5+21NFAxaZrKcbb4td+J7mEI4peMa+BpN675wLNLdGx99eH2dO7a0nCTz0ZjVcDdLe//NOePHBPqcAvHPsKtnzp7UnUhh6nE8V2kFLsPsZBox7+FClIr9JHr7+vcAZ3r27P7J09/Fp59gh/AcoWfP7vswHhibMLVnZ18mzwSqGR9EDa6qSF/wZUyAz/kykAm1zXiyFLZgmj7RfFUymSLUmjlWyydca7sOP9yIPXmIVal71q2ZePAE7szXkVKhlcAYQ33mCwO0WXE1gF6np07XXKUuvCXxueTMd2mbPkzzzAnimSOIqkzPlnJR0sa1oty7/l08uvr1nGy5GqE17VNPsePk19hx8mu0zBxDI1DCBq1pzp7yTb8QR9tvKNy3/l3+kc4Xx6r7p7C4dPiOWnl+TAJDaFDCavKsKH79qgWfRgCWmXxKxRMM0fgkkeYnMQ4CWYyt6N1csuf2ow3bj4a8TMV7u5RdSJEC8HHg4QU7tRC6souLnVn8WWxCfS5S2p1bLC2HE5id+ReB7/T29X/46erYM0kQh/Acpbev38IYutrfuknXN4f4FGDnfU6fmmHX/54Ql2ACvp6sTnh3rkw9+KX/F82nftvClwKYCK9wT7VcmY8WRvs3jt17i64ZKzC/Tq+AYezYG/Bm/geztV44nPg88bGQaFwZAQGW8pC6UJXoR+JZEfZ3vpSxum4AVo09yKbRubK5HjZSaJSWSKFAa32m8dKJg50vaaDMaCy08q47+WUvnBuOzrW/MMV+FIZjG+VYfbe9xj1JSHvYU8fQyq/IgVQ2I9fQyQtNpapuGFj9wOo37d4w3P/N5sLQghXYarRb3b7GTIChWs+0iKRYNG6kiimgpWKX8Bynt6///cCfMz8w72yUSoF2FW15P7UEAuGnhYFkG8b4epjuxPJ+6fb2HgDWl0/8yqjPtYWqoUefN1mZo2bCeCMmAZ8F56LjnovW9YVdkc20nKzVgIWPpdzZgLKCDCMxRXKydgP7u15BJtSKlmZel8rjpsOfmteWKyOAxlEFNEK7VkQ8tObNOuc0mmfTmk0jyWzX1JMRS7szAkJFe8GCK8ii2khNhTsyUXeyURfTZww1XkLXxKPa1gVRdu65kgFcbca2ofwDhdTSeMOeS36ic+VcczG9kF09P1WqlN6+/mPAOWVbLZIGWnv27K79xf0pIbAh/LRgoo3PKeJ4qWijlqhY+UmUUEifmhNATWEAZsL4P5R55cxN80voRZG5aOK5m2sEJ5qv4njbtUQLE+w89TW0FvjYWFozFlvL4fabKdgxdFU+JMevXQxLYBLmFWQUhBBSe6wZu0+cbLmabKiZtswAK6aeiErtUYzOlgKyuszGUyNSWQCiIT8ccmVEa6Rw/Cyrxx8oG6bzpq7YiKdhRiHxZThyqnnHdDrc4W4f/EZb1fnK9HFeveta6MU/Bs5dxXw98KwIhN6+/i7gUxhbzADwzp49uw8ucn4Dpi7zqoXOOQuP/bQLAwgEwkXPoYMPfLpb2Dtk1Ypcg5IolwW+IwvMGiHg8lrnLkUoVC9jjVJKkg53EnPHWTW5D0sVONSRYCbUSiw/Nut9ZGmXvGMWzbHCGCFvhnSkA1+GKNh1uCKMo+ds7wUZ1fu7Xi4uOfO/xiCtFLZ26Zh6ihVTT6CkTd6qK5XoLO9+WEDGE3bhRNOVjZlIuxURPhvPfF8wN4ZSQNhRuZw2wXfzDMAXIhl00ag5Ur8pd6zthSrrNMvWzEBIgZZzeYcUMKlr6PIX2C6cS5c0Zqd0tvnjWalT3NvXHwIexdiqwIzBk719/fcBf9GzZ/dtNS77I0yRnvPl9gu49jlDYFR+OhhIPldTClSQTLG1a+rJ91QLA2EmlklhVsLnUvBHsIA6ZSkDomucJVE05QdxVA5Lu6yYepzO9AFOtFxlEmhqBUJysvlKwFR9u/LEf3HZ4O1ceeLLON4MWljct/6dZJxWz8fWM3bzxCOrXy/G69axv+sWfOlgaW82rbZAIZRH2JsuXzWX3pUCELW117hi6vEHYvnUw2uH73SZr+ISAqISrOqgtaqnLDA3wZZ+H9QwkReRQ4u5rrZPHwpffeLL1tXHvxTePvjNiDT3lBjBXCj2WSxh9Es2haVq+ASwD1PhbCGmMT78zyi9ff0RTI6weNVHFrAD+Nfevv6dNS4tHTufHEtZzG7kp55AICwnA8kIA8nPAykGkvsYSG5/trt0FlpE7doDpzG5Z9JAViNSnnCWam1a8Dt1tmheWUNroZw6XXmOIuxOkarfxCOrXs/h9pt4ePUbGK9bh9A+68d+YjyHkETcKS4dvJ3OqSdonjmpI17atvBEzJtovur4l2nNDDBWt559K15L3qqrVP8IiRaCghUrDVDJf7+h+IxWRM1cu2H8JztC/kxosQjls1B6PlX8twNIjaizcDd62AsOu0ZqH4tYYcwWYIu5NsDEANxpfj/rX04D40vusQwdo2HVCVo2fhnprMMEWP538Z5Z4BTwYnb1PBsG1kOUVearIoTZPc3bxQLfYi432Lkwjam/PnGO1z0nCVRGy8uvYGq3CvLpzeQm72Bv7ycwdQ8eY1fPUuopb8MEqs0A/1D03366eOhE6zWHtwzdsaVMD66Ab7Cr5y729n4SeK9EK6HdUtW2yokv1JSlMDnPtfR80KAENgJfaBnSNK1GerkMbqbCgLpi6jEac4Mcab+JocZLarYltYulPRpzZ6gvjOLKsCjfCUl81o3dhysjXHHqa0jtmTxyc30BhJiJdKpQ5lge1AxzwqDcc+d84wFKlCqe2czN3FFfhnyUsrXlaFdJjdYqhGuV39s3ife0RBUwk125XNoEPCXgCW3cmWdtRDUEl+Ss0fKzaJS7jvSpdQi5h/rO67n8Ta8AYG+vwIxRhl0951U98ELo7euPsrgNQGL+Xg/V+OxvMd5CN2OE48+zuEeVjxF+f96zZ/cPz6e/z0UCL6PlYCC5CvgccC3QQH4aJo5iKn+Z8AHMSmIPu3qSi7SzFhNTUI/5wj0JvPDpTBWcTNHYPvVkT/foPW+KeFOWNLrQj7KrJ198wW/whdUphPwbodx182aMrp1DTJ12mBlp4DwKspd/+8radhESGlefRMhWnT5Tj5+3ys8u1TO4b907UXJuXbNq/CHWj92LpQoINAUZmw1aKx2bvYmMaF/YOuJPl1J8o5EcbL85EytMRNqYtqKZQSG8zNxtKyfNc3HDdIEMTWvrcbM2aAg3QmZkgkK6uew8D2FpBZZGCCWkBkQ63Hm8NXuiD7ga2KvgGuC6ogTIYGoJvKzYv1LOqOjcc1VO9jVm/ixzmWgXwqdaAErbp+OyduB/gEuBe4A3PhvprXv7+hsxWYRrPYPGFIb6SM+e3d9bQltdmBiDazB/41jVKVPACeClPXt2n7mAbj+nCATChTKQbML4iIdmj02dMvntK9UxGpMWeDW7emoXqxlI/hPwS2VHXOAKuhNPLnOvl0wyxeXA13YNfGZTyEtXuzX+mF09NxQFxy8A/1z5cY0JZI6SCqZmniAgrc32/wpmV+4llY5ZJPvC4YG1byMbap690PGzXHHiv4m4E8jiORYK7dSjvCxWWQEeHwtrLqitJBD0gY4X5yZja/LXDvW1kBs3TraV2R5K+YtKK/Oz+fIPAycJNzk0rtqO74LlmJ/RQwo3I3VFbWShixnHRcZpdrHDM3XZISFQVnG8BHM7ghIKIxSupriLWWLMRHkfl5KWoqoRqenc/ghzOniAb9GduPWc2lkmevv6fwC8qOrwfcC7e/bsXszmsVibFvABTBZUC5OH6fvAvp49u6fPv7fPPQKV0YXzCsqFAYAVqnWeAJqJbx1gILmH7sQ9Nc4pZYAszXo2POvFwT8JdBxvuWpq00h/ExQVrTLm793wix0tR4/+1g54kzBqiQLmhSlN8nngKCYhWOlY6dkWU7cIDXUacaUouriK2Uvn/u9aUfJ2ZdLJrsnHCPkZPBHB1nks7aKdevxYO/bkAAqT0kIj8GUYS826pBbvo9wtwz8I5e16gShWV5NF3x0j4EVZT0Jn8aDKMRetvZFCupGR/XN3a1gFdmRUu5k4FbYXLUxVOEF9KDzOzFB7MeN2+fhVI5mblGcT75Wf4AtHH2+5Jr16/P56R7ulXZGLcctcUdb20uMWIs0CszMoNwBdu6Rrnx5eBrwL2I7ZrXztQt1Be/bs9oG/Kf48rwkEwoUzOO9IrA3crCY3oY3DThleIY4d+S4DyUcxxq0DmNX1nwAbMZOqSeYP95bbEHr7+l+L+cI/DvzLMxQV2QH4p5qvyOesentV+gk9Fe6sO9p6rWWr/KZLhv63VJmtpKbwMd5JEvBcEZrKOU2TeStKW/Z4s5jzqAmxcKBToWgJXTAaNx3uZH/XLVgqj+PPkLcbKJXBFABS4usQUnu4rVuRuWIiWGmB8hFoZHG3oItzYHEGDEkUUW/KBuGBsEu7g5IXUjlnWYWXPK6ywARaNVAu26ZOcKZ+6/fbGXuLrNmShsxgtdvoYhN1+XjN251Z2hUa7djaK415SSgnMML7bmBz9XUL4tRB42qNUdO0l33y1JKufxooTt7/9mzd/6edQGW0HAwk78Lk9wfzqs8AH0DrG5k88Q5y4+YFE9L8tF8KYt6OvnRAeVinLPykhA+Wyl/29vX/LPBPGCHuAf/Ws2f3h562Z9rbawEfytjN7xxsumzDqZar8trosx2KUbIbRu5k9cRDJb28FmYSegghd6JxdHFV64kQCMHh+A165eQT9zTmz2hgF3PzablQ8IF01m44PhlduaUz/dQ8N1YNDLTdAFqzbuJ+AFJ1G9jf+TIi3jQ7T34FS5lsp7mGNUSijQh3BmvsID4SqX1Ak7GbifrTaCGVUL6w8GpF+hbydtMZy8us1Whs5ttKi+qeikI/S4kWNjozay4xXxUXGKtAMS5BlB3TebshHfHSDaJyRf8ejMroUuC3MF4608wZmud3pXUThOrAeKK9EjMJr8HYvV5czHAa8FNGsENYDroTNzKQHMSskkpv9xsQ4uU4UUF+0ggCrY3aQSsQ1rzJoYgcdFauPBLZeAOYpVuR12ImzmnMpLwHWHaBkEyxHojeKJw/t7T72pg3wcbRu9k4endkunHjY8dbr90xYreDEETcyXIjrfHVl84ptLpGm6rzZrGOBuVTVxjjwbVv3pQ42LsTo6Yo162VdOgW8M2hhm0fPhq//qujse5LLx36ToWXkUaydvw+hFYo4aARtGWO0jJzgvG6dTy45s005c6Qt+vIhNtZ4Q4ScvIUnLWsSt1LxEujsIioLEONl7By8lG50FpfQ8j20mtlsabDwroUUXlVjRNqHV1IGCwHsqqrEtyol/4J8JLKE0OfQHuxYsW7HwG/CRzH7EY/g9klzgntaJsmVKcxwuA6uhP7MSrDgJ9yAoGwHAwkr8QEwkjmAoOuAgThBsgMF/XPAkL1FOsFLOivPyOjEpOe+DbmUgYfxEyaNkYgXHgU6EDyDRgfchv4w2RDYj3QI7Rvler4lhAg66cO79iqXerrtzIh6rD9eV60p3Ei95DPvBbU7PPJYs6hronHxZnGy5qSm3vuaMgO3nnFya/MWKgtGBsDzM2kXzkav74AhFP1G+uHpzfqjszhWd29ko62tCfQerZsj4ZZg7Fr15Gq3zjbqVOh1eYfEVg7cjeeDKGFhaUKtObPUFwEK1DzynkCs2qihaqXmWCwys9qnLl0vfy5nLjI9WV9KOUayWAM/8bjDSDUoHFnWowt23LQ/m6gkV09HvBt9vaupf3Sz5Idez2FaYkTs6jvdDFqoXcVhUHA84QgMG152Ip5ycq310b3a0egZSPUdUBDFzSvAyFm3Ryr8RGkZYN2zOS2I5ma/Rv9NSbyU2FcU3/lgnpsXFz/GaMeqNPwiTp/+oNCeZGmmZPzygOWOmtNn2TtmR9yydD/0pQbxEd6wLQwzx8hn/5d0B7SAVnpkSnxaZ05FgIuSUdXvOTOze/fijHKZ5kbu8F0KP5t4DHgio2pO0Vr9qTwMKUWEBa2doUAhBP1He0hURTsOsZjJieZUB4bRn7E1cf/gy1D36O8dnLerjPhXH4eof2KFbpY4G+iyurkLFHBqnTRsKsq0zo9o5RuOOHZZwoK7SoatOafMT73YTp3/Ad1HeX9Kvnpz7li7epxsZzfpL7zTlo3pmlY8QOEXE934gq6E/ueqWcJeGYIdgjLwwPUyPs/ixMxP5XUnCA0mquzD0gNeMhwGFUgjejZziDwHroTb5930UCyBeOe+RjdiaV6Ja2iOL8pr1DP5PHwlcP7TXpp7S+8nC3GVghtK4kyy3SjJtOY5GtToCfQKo6orC4pUNTlRrBUAV+GNLApublnJHGw91Lg/RgVxF8+sO7tWygK1NZizQJthcHPaylkhlBdlGhLmlC99gvTzUfsdemwl2lcN/YThhu2lJLSoRFE3CmE1jzVdQsAh9tuYOfp/5l9Nic3BjIEKi/Lnnn2nxpOSdRszc6c1ahT9Rv8NZMPL/buzO40Ttdv8VZNP3nO79lC438W99aaNNveioKW2lXSQvgNDvoV7OrpZyB5hlAdhOo1BeNSjLS/gvIq3Zy7E0PALef6DAE/fQQCYTnoThxkIHlB1vnSFt/2XXCzCCtEyImUe42sBP6DgeRGuhNTs0cHkjcCd2DUVLpo4P4J8Fd0J4ZZmDGgUYMjJo+Dmy36i+bxsfFwcIr/tsvqEpdcZByVK0XsjgBfx7gv/pJCxBSWtNCgVYVqTADxzADO4LfEoytfC0JMA1l29RzDGDMNqbkgoBmnlSb/lBkcKYV0Ij+kee1NxfuFCuGOyc6JAw11hRRCK7qmniQTagGtTfpr7dOQnxuGiJ9BYc/WWhZotCqgkSjpYKn8jDRBSD6QkbCq/A8b8qfF6sl9tl9sQyFmU25o8ETVO+XaMUsjEVXeSXNjufCuYyk6pqXqoUJCCV8IpbQQCH2sePjPEOJnaF63AS+nUP7nGD/8C+zqCTxNLlICgbB8XPBYCi8HY4fnvMgbVkJstnqjwLgxtmGiJEv8KyVhYCbfmzApfz/Io//1MNODJkFd4+r/JdZWB/yQ7sS3MW6uRp/hF+ZCrbRZyTt4sxONh4NrRYj66dIpJRwNrdL4fSsNMYEpbJ+zm1XETc+fq7SiKXsaR+WmXCv6ukS85nz4KCaf0soDnS9h69AdxApjpMMdp5/qfKm3wT0ai3ujoZyIcMReM7Mjf7/wRAikjdA+rhU1LqjKAwET0blsBp4MUwz6Kh95lLBK4x7BqOVGqI4vgWLyuzkpV+4umrfr7YhXGae0buIBIagoWj+P4mLgrHP7hczSPhYCxGm37kB3OP2vgKl1PJDchhDrcaKpwDMoIHA7XS4GkmeAzgtqY/IkZMdBSqOakTa0z+bq0ZiIyxvoTswt2QeSJ5jL3zI3oeSmYGKgsn3paEBjRz5D64YrgZ0aLCZPCnLjxbvULnBVWuUuEIilhREIswpzH1tPRVdOt2SPV3kICSXRg0D3YsnPit5O/x/wcoybrYepOf0zGLuHcRFVHtcd/SxSuWghEcBTnS8lWhinJXuCrN3IYOPlTEfaac6epmPqCRpyw9S5Ywg0Stgca3kBHemnqHPHKD5eyYVWY4rinBVVluKoOlYBzrqK11mr/jujdd2JGadZbx69c3aHdFbfJXOOi5FRkxiXYKfyGqF/lN/w44enGv4X+OuePbuzS3ikgIuQYIewfPwa8AXm5zxZOqK4jpwV0rPTgQf8HfCnFcLA8NfAJ+a1lZ+adwjlCpACN/MuZkb/iVjbZgENunEl2GHw8sYLavI41dNPecqIWj2nas6z8MVTnS+57wVHv7DSwttW2Q6/fBZhcAkmN8664qHSmP48xuA551svbQ633cDG0bsQWjFcv5FoYZyCXU863MHKyUfpmD5AwYoRdtOmB1IyGe7iSPxGXKeOnNOE42eJTKXxpSOE8oSjCxLIC/MTPptQmBMCAi0chJ57vKWodKL+9J2HOl/8L5HCxK8zeueLalynhPlmVAeNDWLSVfwJRlhOYYRDDBAC8SDhhvfu3nXrw7uX0I+Ai5tgh7CcDCRXAHcBG5Z4ReXM7xVg/DCooudL0xqINAF8ku7E+xe57x5MnEID8HrAIpPSpE8tMBeJGezwR4lvfTWwu9iPuVXlmUdqX2bXQyjmMzNSkWiOaByRTeV1WaSsBq1k6Lu2Kvyzhs8WAzFmjIsmL2dXzwMLPU4yxRFgPUuYS1szA1xy5rtoBEL7FYp5S3t4wsHWc0ntNAJXRLCEz971P49vmS6vmHyUDam70Qgs7c2qnhw/mxLohQO0qhESLcMI3yzCl+ha5GMy4l6DUVUNURGCQg4TSZwA/kDDq4vHswL+gV09H2Zvr8Tk8KkHfsiunhorgoCAxQkEwnIzkLSAz2JWa1EWn0i+hFndbZ49onzwcmCF0NJBafx/2U8654sp4I09e3bfd5b7bwW+itarGD2cxsusmX+SGCe+NY8dbsFM4pr8lGB6yHTVzSysnmhYeVLZ0SaRGW5ASGjoQtsRPUzzmbbxRxw7PxHHqFzymInsAeDFxct9zGR3Obt6Jmq1n0xhYfzlq5O3lfM54C1A+IoT/01dPqWVdITtZxFoXCuGUC6Orp1wU2FTsOu4d/27ZmNChFZ0p+6mffoQBStKrDCOFhKpPV9qXwJioTiEstEBIVBOA7IwRZnQrGkfKDtQXap0HLPj/EtMsKPEuBonMOqhD2LSNN8L/CW7ep7xzKIBz08CgfB0YaqmvR6TZOsQZhIrf+lPYUL9NzM/94sGxsbzjN82wKYpd3bq8IH2nj27x5fUh729f46ZWBrNAQGhBhMP4ZR5yXo5SB1EG/X5Yi1qAX/Brp7f8wZ+9A4L9RnK0z8XMjB2pNyjRmNiDHIYl1QBfIhdPZ9c7CbJFA9i3GhrCYQhYHUijpdMEbnp4CdvB53QSFmycbgygqXy5ZlMgbnsphmnlf1dL2c6YkJFLFXQzTMnhRIW47E1ICQrJvaxYfRubFXAxzLuudLRtipILS2kVqBrRBnbEbymDdhjT+EJG1u7Cq1kLYlQ9nv5wIvieN2ESVb3d8ypzDSh+i9SmP4wu3pOLzaGAQHnQxCY9nTRndB0J75Kd+IP6U58kblSiaUiCfniOQeAd2PyH/mYlAHvADo+f0CkyoQBGP3x686hFz9XvO8Q4BNphtbuSmEAkE+XdWthihaOowA2qk9U67OdmNEMzVFKYudi3FzTwOEl9DsB9GOSph3BqFMGMIF51ybixmc0ESeXt+ttiZIWJkAtE2rFwkNLGx8HV4SLBl9ASI62Xsf969/JdKQDSxXYOPIjrhv4V3HJ4Le5dPBbbDvzXUwEdGg28tkqxkNnQy3+nRvfO36y+SqtpYOueHwTNOe1buNkeDXTTgu2KpibCgsdbmWw7Rr9cNeeWoNcKs4jNQiNkLRt6cIK/zqVJUkFhel3Ao+wt/fqJYxjQMA5ERiVnzn+Evh9zKTqF/9t6E58DrODqOTR/gdjtr7uqjiEJTw+DpbELZbmPEB34mwV2EYwhlkPpDEc16Kw5JTueebKE9ZRvaAQwgibwjQI4aFVBjOpN2KkzT3AHeztjWGipF+KkAUaVz9EtOW/gX+nO6ETcaaYn9O+JiGV3enJsNYIpPbFdKSDh9t/FukXuPLUV7H9HEqEGK7r5lD77lmbAcCWoe8TzxyhVEnN0zbxzGGihXE2jN5VsaK38IUvbN2cPaVXTjwsPOYKE2gsk6tQSCZlPV3De3G83Oy1hbZtWFaIlvEjYkXh/vLul4RDXs+pFwER9q3QN0SkyZOZYVl9soZGgfiwgJ9dyhgFBCyVQCA8U3Qn/oCB5B2YrKjfpzuxuC0AeMdm/QHg5xscIraEy0xIwmcxaSLOMJD8T4yO/na6E7VWnr8AfBmI40TGicWrC48XKZk5Ft4hGB9PaaHVK9jb+xEQeVo3FgjVVfrqN3ShxwcQypvG2ALegFkBh4AHMd+5k0ALYNxcJ4+vwYndjB2OYoqPLBmNHBDa36mFLbSQZJ0mfBnClyEeXPMm1o7dT/v0QRpyZ2jOnmK03tj763PDdEwfrHhmGw80bBn5gTFQV9GYO/PuWGH8cxqBFha6qJIS+CakufkqRqxOLlMuvhVCaJOO2/LziOlBIoWxmsNqxkQIRGnHoUB5wq1bZYdykwg/X/GXERCaCK944SMpxAJxHAEB50UgEJ5JuhN3YgqfL4nmEKuEwBdlRkkhsICm4s8fYlRNf4TZgVSyq+d+9vZuBupo2/xx4BfnnePlq9U8FcyukoXUWCGNlyt6L+pGxg4jOrcrLeYaUE69Pt710onu0995JXCIXT2jFQ3u7f1FahVBL0w72OGf4RwFgqNybyxY0duBraOxbobqtrDjxFepK6TIOi3UFcZAgE2BbUPf44HwW8k5jWxM/aimpdfHpj4/ynh0Ne2ZQxUf2dr9VMvMsSkBbVK5RjUFeCJidhmqQM5pAkAqzyT/FoIZp4XY5PHFgtNCc9pEAaE6hBVGCKlp3yYYO+xTmK5Qz43VrW/H2KCOn8t4BQQsRiAQnmsMJFuBjwBr7h/Bv7qdmCUW9V6MAX/KQPKjGP38z9KdmJvJdvUoIM1A8jGqvV1810RGKw/QtW6iceo9WtbPIKRi9GAO6GLWCKpNOiPLQSHQCG2hxHo12ELXzk/RnbimRn9zNY6BHVXAuZc43NVz+McprgNGpXLFtUc/J8K+qVAayg8aN1NpbLJC+0TcSWKFMRpyw4CuKHrjijBaWEgUI/WbmIp06hVTjxN1J3wBoxpibTNHndMNl47VF0ZbYvlxAaCFRGtB88xJmnKDjMbW0VBIAZqjrdeRatjMtaP7sSjMG+Py5b3n1Psy2mpNx1YSxtW2dgVQINygqgVCJhx3MTvFgIBlIxAIzz2+DFyvNeEXdGAtJAmqnMMcIXAwnjk/BjoYSK4BdmAqsmWBt5UupSQUsmOzwqAGimhbgqbVHRgX2oN4OY3ZlZS6lUXaTwJbXBGKhHW+/Pt0NQPJP6A78UdV7X4B+D1g02w70fgUodh3gD+u+bADSQfjoz8D7KtWjyXiTCRTfKOuMLrH8SvljUBjKRMk5llhfGFz6eC3oOiRJFB4hJiKrdCNuSEBiqzTyFjdOlZOPqYdlZcCbA2dpYdemX4iLGBMIcOeDNVbmBQZEX8a5ZuEekfabmCweTt1+RQrxx8h7Gfm/SmrR931eduD7a8a9YTzfksV6lfMHP7KJv/0HUTbPsT08NvRfgNAOtTujdWt+1AizjwdVEDAhRAIhOcSA8kQppJYFBbWL9TyFNZ6Nh1RnIHkDcBXi01YmJXk2rLTffJpi2mzSl7oNmRHX8sV7/itYltw5hGBSZPxJoyx+M0IcZ+AqKXd38YIi/J+vwujzppjV4/H3t5LgFuLN/8mV7y9dr4MgIFkBLhDG4FgAXcIU6GrmtfbqvBBLcRflz/SVLiL6bAxnZxquZJYYQyEwBcRlPaR2udA50v0SP3miebsqRZLFZiIraYxd4a14/fLkj9o1d9CAHUCVUjVbcydaL06csXJryC0RqDzQqtwfX6Ejqkn2TzSj1CelnM1kYuDW9msRuhU4+aXesLZVZ8bvvSywdul42dfkpfOQNif2SXgNzS8ZCq8ov6JFa/8fqJdLs31OCDgHAgEwnMLF5N6oE6I2hP/EvCAD2Cilj1M5Gpb8bPS/OaRm7SMHXlBY7KFcX+dy0JqsmD+CvNrMcw4A8lPUhQIZdTOtrqrx8dkSF0Kr9RwHXMurq/IHd37vsj6XZ8qPykRR8HavxmeXN/Vmjn2m1L7ciKyQj2+8jXCt8JlE6+pXCe1C8LsGsZi61yEcCZiq2fbi7iTlIzHxrlqHhEBblf6yd4DXS9tzIZa9jRlTzUA2sexbJXXm0Z+JFC+lPgVRpriaM/2SZmobgpW3WXA1s0jSemoHL5wsJTX7Qv7Y9Z173ufgO80YVYMAQFPB0EcwnMJowr5celXUb0sXVoeBAeTxiKGcfcs/xsLQOLlx/ByvtlWzObtrM6RBJU+8Itjaj//HWWulCxPic8QVfEOaVn/7oVO7tj+6t+2tWvftfG9N+9b80a3XBgAZEPNPLHiVUxFVzARXc2jK/do3wqXBCeWn6M7dTet00dMIR21aBDw4L5Vr3sEeMOjXbc2jkXXzLgiNIHAbp055jgqb9t4s7URFvbjkiLnNKozjduGAWn7ORM7UfyDK2GtqnlZQMAyEwiE5x5HKM0bykcUphDeDOVpE2oJhqpjzvwziviuS2p/GDdj8hFpH6yQIlT3EiBVdfa959Tz7sRvANdjAu2205348eIXLIlv+Miy0GfBqN02s+gVu3r0zR32ncDf1/p4IraGR1e9jsdXvoZMuP0HFAWO7We59tgXWTt+P/GZowjtMR2OM9ywDZ95i/yHXRm+aiK25i+B2NqJByKN+eEuiVontVfcWVRO/wtt+DwZ5qHVPys9K7oHyJ5u2o5Am0JFQiipvU8tcGlAwLISqIyee/wt8GsoL8bYYYFfzJoZi5uUE0WWuFuYz9RpG2gtO6Kp6/wosda7OPPIKzHRwC0YY/Sbz7n97sQ9mAC05aE7MfPomfG/WJ8/+lsOrjxlr8qdCa38i21nvxKMuuulmPQh1WjMs/4ppqbEJe3Th3D8OVkj0aA1+7te7ofabpCdk09Q547l6gupPzvQ8ZJvT0VXaCBuqUJk1eQ+U20OjcBDqgWTuc7DtaIok5pcALFTzVfkZkKtTkN+aLAjffDt9Ve//UdLbiwg4AIIchk9FxlIrmLy5D+THUsgij6TaGi/FGR19uNzQGsYelQzW3O9SNtWcCJDwBa6E1Ps7RXPpapZyRQCUxR+G3B3Is5d53BtGyZ1x+wuYEPqx0TdCYbrNzPYtB2ESDVlT7VsGk5a9YXKTdJodI2qL4wLW+XFRGTl+GMrb30v0v4HTKT2JNAilWvvGvhXNBKtwdF5dFFiy7IAtyknTjq2khWTj5XVlpA82XkLI41by3MZlWw9M8C6wJso4JkiEAjPVfb2/jLw/xBWEyXVXsel1UFklemzz4bvwkgNV/+OyxXSEsC/0p34pfPv9HOTZIr/AN4KsP3UbTTmBmdrmB3oeAkzoRZ2nvwqtspVDKSPBUJildU2ONW0I32o40X1VbcQq8YfZP3YTwCYCbXweNercO0Y60bv9Ztyg3IyskIfjV8vARpyZ9g8nMRWOU417eRUy5UIrbBUYa6im6EAdCXiBB5FAc8IgcroucuXgPeg/UsQsoH6Lh8hBSamIAScAT4D/AumSM5bz9qikIDIgo5QEiKhBl2262he5md4rvAOjH3kbY25oTaFBVJiqQKNuUEcv1RATEIxSM3DZiK2mvjM0eJnZuHeOj3QsN2dIhNq1cfaXogvQwqQp1quEqN1G3BUjulwe9GGAEfj11vFRmdTXKcjXfrBtW+ZnfUbckP5y0/3+ZZfiOWdBp7svIXpaBfAU4mDvZMc5A2Y+hA/YFfPQ+ztrQPejtmlfJVdPUG0csCyEOwQnsvs7Q1jgs3G2NVzkIHkq4EXYHLj3zYboDWQ/CDw8SW1OXX6y8yMvAyI4NQpWjfWFVekPnD9UnIs/TQzc/9nD4X87AZdfOiD7QlcGeHywW8W1ThmSH0sJqOrac6eqCiJqbBM/WUhGIut48kVrxzGxI001LofZpXvULmLSwO3YfJajV5/5NMdlp9bW4pVUMJm36rXqcnoqkTiYO8rgPdjvMY08AFPOG/HxGUIgR6ztLeTXT1nlmuMAi5egh3Cc5ldPXnKPX26E7cDt9c4c+nVsRpXfoGZkV8CYjSuHkOIPwBWAJ+iO/HghXX4uY9GJmac5m9K7W8803hJ3XDDVq9zar+NEEIUF0cFGcFC0ZgbREsbF1GssxAm5OdQ0kFon6bcaY1ZpcdC3rSWWomc3VBt8beZr9JrAFoTcTYC+Ifyg7LMriO0YsPIXQearnrznRzkK8V7iKJh4RNobSthCUywSsdg02V/vhre83SNWcDFQ7BDeD4wkGzEJDlrOsuZCrAXyIz6vCeZoh7jdZQFvo/xpGreeua7sfbpg2FfhhFaYas8vnRQwkZqj5lQC6eadrB5+IfY2kUJGyUkk5GVPLbqtXrd6D2smXhIAIzUbeSpzpcumjCwyI+AlybiuPmffPovHD/7O7O7E2FrLawvOSr/Tkwdicay1NcV0kVh8fjKW/NjdevbE3HSyzVWARcnQRzC84HuxBRwKWaSKac62Ow7F7EwaAB+iKk78S1M6o3/bps+Qnv6QMjSHo6fwVY5TG0Fb7ZOQt5uYPNIv9ZI41qqfaYiK3iq86VE3EmxeuIh4WOB1jOd6aemV03uu7t75E6uPPafXDL4HaQ/L9JZY6KvR5Ip3hr2Zz48Fen6R184vhK20sIadlT+I8AvaKirSn1dgUAzGe6yqHQlDgg4L4IdwvONgeRVwBsxRucvY9JJX4VJu/3LdCcuylVkMsXrMbUkynX9+oZDf5+1tRurfRXMOC0MtO1i6/APZiuvCRTHW6+hc2o/jp/FUXk8EUJqP2/pQkEj68rTVeTsRu7tntXoKCoXYhroBnzLz98d9qZX5ZwmqaQ9csnp2+s7MocW7JtCoqXN3vU/X/CtcFMivkAm2YCAJRLYEJ5vGDtAuS1gz7PVlecYeYwuvhxhaW/BCRdMTqM14/dTsCKEvRkQpmbzmtH7sXVh1uBsAUrYYU+Ew7aqnJcj3hSOm2bj6I+pz4/I8ehqjsRvREsbzKL/e8Aa3wqHZ0xFNwF0jNV101FZk6Gy88DRlhfgW2EN/ComqDEg4LwJVEYBFwvfrXUwb1XLiEo0gpg7yeH2mznRchUnmq9gf+fLsHErvI+0kBxsT+hUXTfVih2NYOPIXbRljhDyMnSl97N2vKKU5iYgzFzpOtCakfrNnGy6Yl6lClNKRzIZXcmJ1mvALOy2LmEMAgIWJdghBFwUJOK4yRQnqUwDzuH4zVw69B1EcXL3hY2lfUCjEShhI4Cs08xYXTdgCu14MkyoLM2F0JrJ6AqBELRmjhNSmWLkguBA+4tZM/mQiWQWFkK5NORHyrtRMePX5Ue44sRXsHUBXzg8suK1tGWP0ZF+CkcV8IWNQDMRnc15J4HvLOd4BVycBAIh4GLidzEFemyAhtwQ24a+i0ChEQzVbaHg1OPKCDF3lI7pw0ihOdbyArKhltlGtLDY3/kyE7ugFUrYpCOdXH3iywit0MLiqY6XehORlbm801SPlNQXRuhMP4VQLgjBeHTNgp287PQ3sbXJsmppl+1nbud003YOt++mffoQdfkU47E1nGiZLUiXBr7x9AxZwMVEYFQOuKhIptgB/C/QdeXxL9GYH5p3jkYwWtfN/o6XmWI6Rq8/j8bsoI5nDqu8XS/bpw6I+kLKBLdpRTrcyaOrXqtD/ozIOk0IrVk7fj/1uSHGY2s51bxzQdfUGw9+CqvKQcwXDlpI9nfewmj9hvKPXOC3E3F6z2tAAgLKCHYIARcViTj7gBXJFE/YqnCJOVpZqUCgacscAa3x7YVLQkxFV4ip6AoLoGNqP1K7s7qfhvwQLzz6bwIgZzexb9XrONp23ZL6WLCiRP1KZzBfhrCUS/v0wZJAKACPAb+eiLN3SQ0HBJyFwKgccLHyvZPNVxSz1s3fJQvgmhP/TriwtCDwU81XVBgCTD0DhcIm4k6yavKR6ksW3JqPx9Yal9LyKhhaoYUg6zRp4CDwCWBXIAwClpNghxBwsfLhweYdrmeFf3Pt2P2irpCaF/QV8TO88NhnuWf9eyg4C6UqMow0bGb96F7C3gwasPBmq54JrbGUWx1kPC9DrdA+9fkRhhu30ZQ/Q8RL42Pj2RFC3gzjsTVqKrLiskSc/Rf89AEBNQhsCAEXLckUDjDSOflEaOvw96IL5RCfDHfx8Np5tYKqJ3ii+TF2nvofQn7GnCAkvgihhdD7Vv2MmwnHQwv1xVIFvf3UbaKuYEofHG++ipHGrRSsGEo6oLWPENcl4ty/UBsBARdKsEMIuGgpuqLekXOablms4nFDfoiG7GmUDLFq4mF84eijbS886FuRDY6XsbYM/1DUFVLMhFqxlIsro4DG0h7HW69hpH6TyDuNIWoIEYCIO8X2U7eJmDuOj4USDusmHuB0804jDKCAED8IhEHA000gEAIudt4V8mf+VGH9uhKWjVbaxpXls7ZEc/ng7dh+1pTVBNGZfmrLPd0/z5bhH9KcPYFG0jxzErQCYQECLSTjsTXkncZSUzU3IduGvkvENbYKC79YvEeWLAgHMUF1f/A0PX9AwCyBQAhYkGSKVkyu/8FEvCws93lEIs4MB7/1IQ2NAt6upBVORbqpz48Q8adnZ3DHz5abeHFUlq7JR2nMDWKpwux5qpgYD2EiiTOhtrP2IVqYwJcOQuliTQbFyaadeFbkM8AvJuILG6ADApaTwIYQMI9kihCmDsMVxUMHgesS6eQ0sBFI0Z0YWeDy5WVvbxvwj8DVwF3A+9jVs/wJ+vb2CmDndCjeeP/at37L8XN11xz/d2w/j5I2tpqXsdREMWtv3rLfE2EOdL6IVP0mpPJonz6EFpJU/UZ8OWdGqM8Ns370Hurzw9iqMHt8MrryPx5fcevv3Nxhn1r25wwIWIRghxBQi99jThgAbI6o7P8BbgI2Az4DyQ9gKoHlgP+hO/F0Zdr8G0wNAw94DaYU5m8u+1129Wjg4XqAFG9x7djnjrTd0LIp9SMhtNkclRsAdFF5VEsHZOk8qyb2MeO0sG34DiJuGtCsnHyUh1e9Hi1tbC/L5aduw9YeZk2mGKtbz3DDVj/VsPmdz9cdWcBzmyAOIaAWl1YfWF048WIN27QJiJKYes5/Dfwd8D8MJK3qa5aJyzEpo13MnHz503SfWRJxvgnEh5oufeGR+I1/M1a37pAvbE8JO1tKNOdapix1deI5MEKiPjfC5ae/QcRNo5AoLGKFcZPDSGu2DP+AsMpiaReBRkmHocZLSTVsVsDC0XABAU8jgUAIMAwkb2Ag+TsMJPeg9ZeqPtUg2nyshoIIdXiIBg22BleDpxA3PxG55EPJFLVzPFwYt2Pm2FJN4VolRJedRBydiHPf1o2X/1bH9KEttvb+UoCXsxtUwYohtY9rRdjf+XImIqtQwp4NJHNlBF86OH4WI8v07I9nhWnID9GSPTF7LwsPoRUzoVYNfCsRZ6ZmpwICnmYCldHFykBSAOsxM9UlwBcxCwR14/Rdn7ir4aa7gRdiVEL7ToVWXR/3RpBaAdJWKDzsDoHWEi0mraYPAzclU7x22dQdA8n1dG5fRfrMUXITIyj3q8C/LEvb54JRJ/2fO1MMAf9XKjca8dIib9cLX4b0SONWANGYHWT76a8jtDEM55xGxuo2sHLqUQCON1/NTKiVppmTgMAVYWxtNj4nm684mA01/xsmAjkg4FkhMCpfJPT29VsAPXt2+0Vh8PfAW7VGaBgCVnhIKdF4wvbvjl0X0Vpry7J8IUQUIKTyNPmTFESIDm+YTtckhjsaWl84GV4zhlnFX5GIM3DBHR5IhoGHgJWYHUIBuJnuxJMX3PZ5kkzRhilTuq3YpxzGphHG2FPC8elDkZUT+6RrRRmI30DOaTzSNHPyA1pYTEVXvAj4oFAeO0/9D7HCGALlS+3vtbT/Ynb1uM/WswUEQCAQLgp6+/p/A/hzzI7w9vddpv/Klnyz4ON4ivqQhdBSopBINNOynodjV1S0IcR8XbmjCsQKKdqnDuBLO3uy+aq8a8c2J+Kkive1MR5K6zE7kU/17Nn9/iV1eiC5BfgJzKpPbOC36E58/txHYPlIpohhjNu7gOOYHYsGdgDDmKplt2CKqJ0GbioJyGQKifGUukoqV8QzR9Lrxn7yO3WFsf9gV09Q/jLgWScQCM9zevv6L8WU1AxRdIrpbtBHbl1Ha9ajSYNwLChYdUR1jrwI8Vj0MmZkZSWxWgIhWpjgypP/hVQeILSS1rGQn91SWun29vX/O/C2qss29OzZffYdxECyXsN+DY2AEuALeAXdifvOfRSeOYqT/mYgn4hztMbnMcyY1ANfTcQ5UX1OQMCzRWBDeP6zAvN3np3Rj0+zNu1yMiRpAki5IQ7WX4kUGh8Lakz+tWjKnjIpoqVDyJ8Zsn1agdUwqzKqVdZxS9nnsxQn0ibgN4A4DYkvxt2RvWsKJ15n44kzdudPToTXPZhY+nM/KxTtJ08t8vkMz4YdJCBgCQQC4fnPfUAWsyIFwNfC+uIB3bipiRMFn7UzjV10tFn4CzRQa3egtSZrNxq/Gq3QiDqBnsHo1Ev8GyagrIQH3FWc/HUijk6mWA98maI7qdBKxdSM8IT9jpTTHk457SWX0y3AjUD/eY5DQEDAWQhURhcBvX39lwE/xqhfwEzMEqP7lluvuEaEo7HZ88sFgNaaRn+KLfkDONrjtLOCY87a2V3EmvEHWT35MEJ5Q2GVewu7epJV9/4o8OtAWkj5xh3X3fQxYE+xDx/G6NtfBtiW9sT27D7q/AwgOBzeyJnQilJTHvDKRJw7lnNsAgIC5ggEwkVCb1+/AxzDCIUIxugJwOUvvBEp50JSSgLB8mZYO/4gq/LHIdqCCjUggCcj2xi153L0aK05deTgH7zzuq1/vMj95Y5dN79XCPF3pdsU/z8N1AF6ZeGU7M4PoBCzIV8/rr8ePVdq8uOJOB+6oIEICAhYkCAw7SKhZ89uF7gD4xpaFlWscbwsUs8pjLTWaKXYPvhNVk0+ipUbxRo/Ap5xhImoOYcYrZQePHokMzZ85ie17tvb1y96+/p/D5gYGxr8W6210FqX66DqMMJBitkALlPXWFTWDAP4QDLFy89/FAICAhYjsCE8T+nt6+8Evo3xmT8FnABeVH5OSGpesw66Cg/iuzZPhLcxYbcAJrtnfT6FLxwkppqXlZ/EtyNMWE0AjA6fyZw6crCA1v+EKVxfixuAjwB146lhWjtXVH+uAR8QI3a7tapwClt7CDSnnFUoMSu7FEZwXLnIvQICAi6AQCA8f7kL2FT896ayf8+yvRXao+BjIbXP1vwB7rVfCIBnhfGsCNKbUa7SniWx0lbj8JHo5SsyVj2ZdJpThw+EgCt69uw+tEg/VlL8nmWmJvXxg/tZvXELlmWVKtLI4g8FGeah2JU0+ZO4wmGyKHiKSIzgKCRTrEzEOX0hgxMQEDCfQCA8f1l/thPCxcW3UdQIbO1TsikpBXdaV7Mze6+ISs/aN92m7x9xOqGiWLwD/APGKLwQP8JE9IYBMZEaVumJ8eTl114/Dby2+mRXhkjJ9lrtlITHHwO/mUzx8kScx8/2jAEBAUsnEAjPX4aAVYudcGASLmsBiQvS4rho5+ThA+RmMiilyGdneIKtOUyRHKhdY/IFi92jZ8/uM719/buAz2NqKTzge+57tNYJ4FaK9oxarq1VlE6oK/bnt4D3nO2igICApRMIhOcvNwJ7gU6M/l1hVvSzpHKC/z6iWVMPadfnaHqQWiV/y/5dXQLAB8bP1pGePbv3A9eWfu/t6xfHD+5/45qNWyxR9G7SWi9FKJSQwDVLPTkgIGBpBALheUrPnt1HMVHK9Pb1rwDux+jzK5goCCbGFm1KVP3bx+TsiWFiAz5wHt3bMZEafkkkGs12rF5X2n3MqqtqCYaQytPujaCQDDsd+MIeOo/7BgQELEIgEC4OXoDJZXS+ZDGZR0OYcpb/gfFeOt2zZ/c5Tcy9ff0SY3doOHPiGOnJCTZetrPinPLYGCEEtp/nyuxD2NoDoMMb5rHo5Z8Kvr4BActL8EZdHBzmwv7Wkz17dt9Qdeyh82zresrUR5mpSU4dOcCqDVtqnqy1psWfwNIeSkjQmiZ/MnfT9F1P9u4V1wD/DbQCfcC7e/bsXigDR0BAwFkIAtMuAnr27H4c+M8LaOIry9UX5gzUs4wOnWFmOr3gBa4wpg/lFbSj3XGJnhnNkQZ+CKwDGoC3A3+wjP0MCLjoCATCxcMXgYVn3cV58TL2405MoFwFhx97mLHhMyjfB0yktFIKz3M5nlY8nvK1BFHwafzBKWa+eFB8mWLKC+a8n25axn4GBFx0BLmMLiJ6+/p/DZNQLo7xHlqqW8++nj27d579tCX3ow2jclpT6/O6xiakZZGZnESpOQ2QLXRBQUhpsdCX9mM9e3b/6XL1MyDgYiPYIVxE9OzZ/fc9e3avxdQd+DK14wqqKWBqFCxnP0aBnRgvpXlkpiZJj49VCAMAT4uQqkiDNCvQXOAzwJ8tZz8DAi42AoFwEVJMdPc24O8w2UarDbF3AQngV4Dunj27l70GQc+e3eOYQjHnYwQWzAkDgYmvuKtnz+5guxsQcAEEXkYXKcXJs6e3r/8PMSv1ArAdONazZ/dI8bSnuxjNrwMnMekoLmRxIoDrgM8uQ58CAi5aAhtCwLNOb1//PwO/dIHNvLdnz+5/XI7+BARcrAQqo4Bnld6+fhtTQa1wAc1oYLGMqwEBAUsgEAgBzzYSkwlVXUAbpUjqgICACyAQCAHPKj17dheAT3NhO4SPFD2XAgICLoBAIAQ8F/hd4I0Y76bsOV6bAv592XsUEHAREhiVA54zFBPfvQB4FdCDiZdYjO9hjMmHn+6+BQRcDAQCIeA5SW9fv4MJNHsdcBy4HfgYJuNqHlO67aVBMruAgOUjEAgBPzX09vVvAd6ECab7TM+e3VPPcpcCAp5XBAIhICAgIAAIjMoBAQEBAUUCgRAQEBAQAAQCISAgICCgSCAQAgICAgKAQCAEBAQEBBQJBEJAQEBAABAIhICAgICAIoFACAgICAgAAoEQEBAQEFAkEAgBAQEBAUAgEAICAgICigQCISAgICAACARCQEBAQECRQCAEBAQEBACBQAgICAgIKBIIhICAgIAAIBAIAQEBAQFFAoEQEBAQEADA/w+xJ43FKR/mngAAAABJRU5ErkJggg==", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + } + ], + "source": [ + "from matplotlib.pyplot import figure\n", + "\n", + "figure(figsize=(6, 5), dpi=80)\n", + "\n", + "tsne_pos_highfeat = np.load('tsne_pos_highfeat.npy')\n", + "\n", + "plt.scatter(tsne_pos_highfeat[:,0], tsne_pos_highfeat[:,1], c=color_list, s=10, alpha=0.9)\n", + "plt.axis('off')\n", + "plt.savefig('./high.pdf')" + ] + }, + { + "cell_type": "code", + "execution_count": 56, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYQAAAE+CAYAAACN7GfAAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjMuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8vihELAAAACXBIWXMAAAxOAAAMTgF/d4wjAAEAAElEQVR4nOy9d3wcx32w/8zs7nX0QyNAAmCVSIrqBSrmucqybFqWe3fs2IlLDDvF+TlO7LyJE8eJ49dIYqe9sVzjEjuW6SJbkqWjGtQLSbGTYAVA4NBxfXfm98fcoYOiJKpY2ufzkYjb2zKztzvfmW8VWmt8fHx8fHzkc90AHx8fH5/nB75A8PHx8fEBfIHg4+Pj41PCFwg+Pj4+PoAvEHx8fHx8SvgCwcfHx8cH8AWCj4+Pj08JXyD4+Pj4+AC+QPDx8fHxKeELBB8fHx8fwBcIPj4+Pj4lfIHg4+Pj4wP4AsHHx8fHp4QvEHx8fHx8AF8g+Pj4+PiU8AWCj4+Pjw/gCwQfHx8fnxK+QPDx8fHxAcB+rhvg4+NzeiRTCOB6YBOwTypXVub63eWjD/247tzrCs9x83xeAPgCwcfnt4c/BP5aaM+J5obss07eQsidpGBH1P79j7xvzZrzv/1cN9DntxuhtX6u2+Dj43MKkikqAAGkmsZ2OmuHfoMofacwet+iDDFQuX7jirOvevw5a6jPbz3+CsHH5xmmpOq5GLgQeBy4KxFHncZxDnADWr+ucXKPXT+xx6nLHp2zT9kIaKscFfnBm5MpNibijJ7hLvi8SPAFgo/PM0RJEPwj8AmYM6n/XjLFuxNx9Lz9X1KVPfGftpernQo23IBTsQO45qz+XwYa0wdCT3Q928stAwaSKc5NxNlzZnvj82LAVxn5+JxhkinWAR8D2oDXMiMMyuSACxNxdgEc2P9wKFwce7Ru6tC6gJfFkwGKVojHWq7fI1Vx1SVHv+2cznWHoivZtex1AEXgokSc7WesUz4vCvwVgo/PGSSZIg7cClQDERYKA4AgcB0YgeB42QcaJvaus7VxFJKqgIOifnLfWUXrCRcG0+Sc6vKfDvBoMsUvgTcl4uSeSl98Xnz4cQg+PmeWDwOtQIyl368C8PFkCplM8aFQcXIDs1bqEoWtCqwcvpu6qcPoeafJ2pUURXDONoVkPNwye5MArgX2llRXPj5PiL9C8PE5s/zBE3yvMe9dDfCfwO+MhZeJ2sxhPGVh4U3vKNDUZw4wGagnVhhGoJkI1LOz5fU6lh8SK4fu0mF3nIIVFsdqLmI42kGoOMFZA78mUhhhLLKcvY2vWOHJwHLg6OLN8fGZwRcIPj5nluATfC8AC7NKeB9Af9U52tJFUZc+jOOmiRbnOglFCiM82vIGMoE6XDus0FqEi2OeZwdzA5H10d66TuVZQQGINYO3Ec2n0EhqMkdZPvIgfVXnXETP16fo7Bp5Bvrr8wLCNyr7+JxBkin+idNbJQwC9czYGASgGyb2irNP/mrBAZ4IMBFq2re95bpg08Su2lWpOyvQgNCMRlawq/laAC7t/TpBdwqBRiOYDDYQK6TSUitRtEK/CXjZ6+nscs9Uf31eWPg2BB+fM0QyxUZgGHiiWVYRCAOTQnu6ZfQRsfbkLTSP7yjWT+1DibkLd4XEEzbV2ePLEgf+6b116YNDQmuUtNFYVGX7KV/TUi6idHmBJpYfQiGjGh1xvOzrJgL1E7sO7fY1Az6L4gsEH58zQDLFeuA3wF+wuGcRgAdkmVEbTaxM3S3bR+6jYfIAqweTgdrMEaQ2E3gNaCSuLGuhtAtE0oF4vxYCqV0EHlPBOMCPgUkEeMJBCRslLAQaSxenG1RRGAovG90+Sk93+Jm4Dz6/3fgCwcfnzHBNuDBau/bkrda6gZuJ5ocW28fD2BgcIAosr0v3ohF40kGgEdqbd4hGopF4nkDvA7adqD7vvSeqNk3lnEpGIu3sb3jpT4C3AFfm7YrfIITSCJRwmAg2evOlU1VhIFaUgR56uk8rvsHnxYO/dPTxeYokU0QxdoATocIYm07caDteDi2gNnOUh1a8jYIdK++uMYJACK1oHt9JLD+EJ2wC2kNhhn6j7jFZLQSgERyrOvf+xql9/+6o/A/p7MpcAQeT9uUth7l8I9CXiHO4dI3td3rX/3vD1L7LgsXJ4FSw3m6Y3GuRX9h2S7nn9Feuv74ZfvAM3iKf3zJ8o7KPz1MgmeJy4H8w8Qa9tenD2bMGfn2ZFhYAQnn0V2+gYMcYibSRDdRMH9ue6qFl/DGE1mgBeSuK7RWYDDUwHmqmfeR+LIzaSGFpC+9GOruuP402rcQExbUD4vyjPyBaHMFSCzNjawQPtr2LTKD2NuC1iTjZp3tPfH778VVGPj5PjX8S2osG3KksWq2fCtZfDAKpXKR2sXSRZeM7aB++l/OO/4hIYcbjM54+OKMm0sp4BEmLmtwJWie2kw7G8YRDwYqgpKWAFU/UmGSKMPAQ0AEIoT2ixRE8jC1hMcKFMYCXYfIfrX/6t8Tntx1fIPj4PEmSKZzKbH/zpYe/WXHJ4W81XHDsh44Gd3fT1eScSvJWFIRAYaGEjaVc4lMHpo/PBGoQKCwvj6VdwsUxQu4EComtCtgqhxYSSxUBIYFvnUaz1gFV5Q9aWKQDdUhcFAsFgkaydvA3oBVAJXBbKbuqz4sY34bg4/MkSKaoBratGtrWbHt54QmHSGGUFaMPBg7Wb9YPRdsEWnPZ4a9jqSJKC7QQuHImJ9GB+gTrTt5CbeYIqjQnEyik9tBCooTDnobLiRWGyQTqshsGfvnPs67/MuDvgFVAGtgKfAbox9gppm3Ijzdfy8rUXYSKE1TkBpCzMm4rIbG0i9AKLSRAA7A1meI+4K9OJz23zwsPf4Xg4/PkuAHY5Ki8UEKCMOOv4+UEswbjgYqzEVphqQLj4WV6oHL9tLGuYEfZ0XIdWacKJRw8UZ6Ya4RWRAtDbBz4JcvGtuvJQN0ROrs0QDLFRcBPgYuAWmA58CHg64k4J4Gvz25owY6yp+lqdjW/Bi0sCrPyH0k0AxVnoeX0nFAArwY+B+STKc4+Y3fM57cGXyD4+Dw5XgtwomrTtJuoEpL+yg3TO6xM3U3L+Ha0kGhpc6Jy06KeG/saXo6SFhLFZKCeg/GrENqblioBlRWXHv3O2Yf33DORTPElIAEEmBvnYAFXlv7+EPD7wD3AJCbmwS1YEUai7UghKIoARRliX/1LOVCfWKqPNrArmeKXyRQtS+3k88LD9zLy8TlNkik2AY8CAq2pyRwlXBxjKhCnfeQ+KvInSTt1hIujxmVUSBzPOO+4Voi9DS9nOLaqrNZJA7+RqniWrfKrClbUQgg27++GmcqYaMCTQf1YyxsmpkKN/wX8HnPTanuYCmyJeW2VwNXAd4AaoT0RnzqIrQoMRzso2NHT6bIHPJCI0/lU75nPbxe+DcHH5/RpwgzkMYRgNNrGqF7BhUf/m1ghBUBlfgCFRAtr2oPIwwatWDf4G3qi7UILawL4PPBFJR1RkNO2XFWUoWMBlVtRnqYpbA1ox8vZwAjGfvDHGEOwwAiPifkNTcRRyRS3AfuBS7WwGKpYO30dTk87IIBL7hh0DyhpFx0vu2/Z2Pad4eL4rXuaXjUM7E7EKT65W+jzfMZfIfj4nCYlg/J9QAsmF5GMFEa46Mh3EbNssEUZQkkHx8sgtKIoTZYIC5ee9vePeFbQAiqlKoqVqbuoyvUzEWziUP2VWiNue8nBrx0B3uQho0o6Mm/H9CPL3zLqycBrE3HuTab4LPBHGOEkgRBQv9jgXAqe21Nqs4l1MxXb5qeuWCAkAu4UG/p/QbQwQtauxFJFbJVHoOmt66Sv+lwPk67jY4k4+5/e3fV5PuDbEHx8ToNkihBmAH4pZnb/b5gaA7rkpTNNwYowFFnJ3oZXUJBhJB4Sj1R0pfKsoEtpdt8+3EPj5F6CxUkapvbRkboHJZ1DdHZ9AFihpPPJwYq1N+5Y9vr/8mTg+kSce0uXOAa4mBV+CBgofV5AIk4a6AS+gRm8P4RRe5XRpf8GMCm5y59ZmbqbSMHEMkSKo4TcyVIiDUH78L0Ir2A1j+941YqR+3fv7N3/50/j9vo8T/BXCD4+p6BUbezzwEcxM+w7gCPAe4EoWou1g7fRMLnXuHCWkk8IIdDCYnfT1TpUHBdFK0wqthotpAvYATfNRUe+g63yKGGjhSRnV6QfanvnqpLH0KnaZAPdwJsxA/n7EnEefpL9uh/jrQRmdfBfwNsxkdcC4NzjPyaWG0JJG1vlplc75dXQWKiFmtxxBBolLB5teWM6G6jO1E/urwx42fRUsP6z561o/eqTaZfPc4svEHx8TkEyxVXAjZjVdBWLZTLVmlh+EMfLsqHvF3gyAKVspEdqL+F4zQVmP6V0+8i9uiI3IEPFCULuBLKUqlojChL9JTq7PvMs9asC+A/gHOAXwJ9jVj9/DVwA2I3ju1idusOk2ACUtJHaZF06XHMJHaP3oZBIFEIremsvpTJ/kprMcUDjyYA+GL9yy4aOtT9/Nvrk8/Txjco+PqemATODnlPtXqoizROPY3s5hmJrmAo1IrSHa4exvPx0HeScUzl9zMb+n4m6zOFpgeIh8YSlLO1qiX4M+D/PSo+ARJxJzIpgNjcnU5zE2Ensk1XrKdoRKvKDTAXijIVbiOWHKNoRsk4VbWMPTntRATRO7iNcHDNxFUJgqaKozRx9K/gC4bcFXyD4+JyabcA4RpVi0JqN/T+nIncSoRXLxnfwaOubyAZq2NV0DWsHb8XxcvRXricVXQWA0IrazJHSCYxtV6JAi7wwRt4v09m1MAvds89RTKwDACPRdkai7dNfjkdap/8ejK2lZfwxABQW4cIIWtoIFFpLQON42V3PVsN9nj6+QPDxOQWJOKlkipcA7wa6gGbHy1CZ7TczYWkjtUtN5ijZQA0T4WYebHv3gvNoKLmiupQLqhVlSHlW+EikOPqPnCINdTKFvPTwN94stPfSE1XnrjtWc8FOhPyLRJyxZ6DL/8i8FBhLMR5eRuPEbhQWQmgEcCB+JatTdyF1kZxTdW88fegfnoE2+jxD+DYEH5/TJJniHuBCqdzApYdvMLmHkAgUextfSSq2agyoXur4prGdrB26HYHClQEean3rWC5Ye1EizsFTXLO9eWzHtpXDd68o5x06WnMhx2ovLgDtiTj9T7lDPd3hyWD9BTm7sq9+02t7S8FsKYwwmNsPrViVuouGyb0UrAh7G19JOljHWQO3UJc+hBIW+xteylDFWixVOHj5of+8zLrso6kl+nQp8FngJRjvqM8CPwf6E3FyT7k/Pk8bXyD4+DwRprLY5yYD8U9MhJpDx2ousJaN76RpYhcCxUDleg7GrwIh5s6staYufYhIYZSJcDPj4RYCbhrHy5AJ1CotrPcn4nxzqcuWPJy2bTzx0yursn1CSQehPfJ2jIfa3gnwi0TcpNJ4snj3/ssyV4Yes1ShToDoq9yw62DD5r8A/hYTgCeBivL+DRN7WDOURCOQyoNSVwtWhIPxKxmNtqNm8iLtSsTZsOCipk9/DHxRaE9qZnJBYaKip4DXJOLc81T65PP08VVGPj6nIJkicLkV+Zzl5T8Rcicj4ckJGib3lNQ/mlS0vSwMYJ6aZfnoQ6wYfRDQMCrY3XQ1IzNpIzLAD5/g8p8Hrsw5VaIq1wfaePPknelxuu6p9ms8tOz7Vbm+uCyV7Gwdf3R9Xbr3f/JOxYE9ja9M5Z3K5cxSHYXcCYRWKOEgyRl5AITdcdYP3MSjy9/CZKixfPqVyRRTpWP/MxHnE6V7KW0v+7cb+n4hq3ImOWvOqWJ309VMhposjBfXtmSK7wNrgNuAz/nR0M8efmCaj88S9O699zsbT2zNeVh/ppARJYzB1NJuySQsaJjaz6qhbdhuZsHxTRO7KNc2RmuaJnbP/jqIcfFclJL65qOAOFx3GROhZiSKTLCW/SYpXQH4f6V9a5IpPpVM8ZfJFGueqF9D23/24ZrssavkvPrNYXdcVuROru48fEMKrcuzdA0wGlmBFpaJR5h3PomiwfRNY5LqhTA1oyNAVzLFj0u7fqFt5H6nIj+AwKT1CBXHWd9/U7kuA5hJ6ruAS4FPo/XYjsMH/1r1/PNqerqf0K7h8/TwVUY+PotwbPe2zzeP7/yMSUlt6hV4MmCykWqNKwI4OjuTYU443Nv+flx7xjv13OM/IpYfQmEjcY1qqX7z7MucSrVyHvAw81RQCOFioo5/BfwvpcykmLKZxrUHvgJ8aqmaBl7Pv4xKvOr52zUSVwaKAZXL3tv23o8HvMwNAi0mQk1oYVGVPcGawduJFIbnCAUNHKm91DtSd9kUxkNpfloMMDP+3Rv6fm7Xpg8j8aaPVTJAT8cHTPzGnAYpzh74NbWZI0jt5i3t/TPwqXI6cJ8zj79C8PGZT0+30zC595OWdrG0h8ADBGmnhv6qjYxEVmDp4twc1LrIsvHtc06zv+FlFKwoFi7pQJyjtZfMv9LeU7TiU8z39DFqqT9PxPkgsBO4GxgGVmMEg8Skw/4YZpYNPd0imaIymeKjyRQ3J1PcKNDzRl6DElI5Kp8HDlx25JvXnHvif7Mb+37Ghr6fI7THeLiFw7WXLVghCGAyWP+7mHoMv6DsRjWXWwD7ZOVZc77USMZDzQuFAVCdPUFt5igKC1cEgp6wPoapDOfzDOHbEHx8FrLJ9vLTKUgFkLNjPLr8LWZQ1prGiV2cNXjrnIOUnFuBMhOo5YG2d2Pp4nSw1iw84AMlFc91wCjw7USc/KzL6ll/A/wL8PfJFBbwXYwgWCyPtSO0t1H3fPUrIN5zyeFvVO5pfJU1EV4GwPHqc73lY49M71ywIv8tlfuArQtvFSZP0jeBb6EpKiF1VW5AVGb7GY+0Mh5ehkaUEnTMdH1T/88bk2u6/gH4PlBkVixDifbStUBIypqJnF3JrubXLNIFmKvSEihhhw7GX3LNWpOsz+cZwBcIPj4LGZOojIdVIVESBAfjVyC1i8KmKtdHZbafvIwSVGkAsnYlx6vOBa1pG76PZRM7UMLmQP1mhmMrF7vGz4AaIFn61wY+mUxxfiJOAWNQvg6jj1eYFcEfYyqafQxTMW2pNNZu8/jj17oycLZG4LhZsX7gJu5r/x20kByqf4mVDtS5baMP3hEpjn0zeMkHyzWbvwJAT/f5gNBCzhRjLgkzT9oUZGS63wDC2DOqgS81TOy+qH34PksLoQ/GXyJMUJsmlk8R8NJUZY2XbNEKg9YEVQbbzbJ2+DeEi2MMxVZzrOYiEIKxcCvpQC3Rwoj5UcKt9Fdt2DSdxNvnjOPbEHx8FqOn+48U8q+UsMIDlWe54eKkU5XrwxMOUhURaLSQZO0qeuOXMxpejhCw8cRPqc0eBUAJG0/Y7Gq6hkhxjKIVxlIF7VrBTCq6ahwh7gDeUJM+EqhLHxI5p0r3V2386lUNgT8AKJWx/EvgeozAUJio5gCnnszlWkcfCraN3C/KBu1S6m08Kzh7vwywJhGnr3Q9AVQ2j+9Irxu87T8V4o2eDMTGwq16V/NrpKUKbDpxo47lU57AswAtIKtBe9iqaAUrQl56ehmkASUCJhhPCDzhINBYqmCSAApJ3q4gb0WpKAxSXggdqH8JJyvXAyCVCfpTwmIs0qq1sD6ZiNP99H5cn6XwBYKPzxLkHviv6kdb3/SppvHHP9A69mit1G4OdExgZrhaCzPQlgyidVOHOHvgV1h6oZekh8RC4QkbLSwGY2vZ3/iy3MqhO0KtY48CAk86jIVbU/WbXltfPi6ZYhizGnhSRPNDnH/8R0jlmsE03MLOZVvmq60AfpOI84pkimXAg0AzoNFq7/LRh79mq0L+cN2la7Sw+i868p32aGH4PcIIkhCQL8rg/witPqKEZVmqYNJxLMF8VZNGsLvxatYMJY3BXFpIVWSgcj0HGhILDzfBa69PxBe1UficAXyjso/PEoQu/sDYZc1Vf9Y++sD3LF0siJIwALC9LJZ2KVphYx/AGJbVEq+UVRoopfZQSBqn9rJs9JHg8rFHTG1mFJbKU5M5Gpt3aNVTaXukMIouZSkVKEYibYsJA2C69ObPMMIAQCDkWcdqL/qn3vjlTZvrrT8BfqiF1YAxWoOxgci+qk0tSliWFtYTjtLz7A64OKwcvhtH5XF0HsfLILXHRKhp9m4aE7D2d4k4W3xh8MziCwQfnyfmRzBTBcfMdCFvx3i86TW6PNCORNooWiE8rOkqM4sJCIHSSsGyse1i7nbIOlVWMsVdyRTVpWpn3rzD539elOaJxwGBa4VRwqZpckk7bLmBq+e0RXssG32U1YO3f6Z/580fBx7a0/jKVxatSEQhI6XjulOxVaNaSG15OSzU/NF6+uNiosiiWMqWOnOUQprVwgxTwD9gbCc+zzC+QPDxeSI6u+5KB+q+oISlPWFphU3eivHQircxFWpY6BoqTDCaJwNoZuyy07WWlSccCkTdsQWX2t18jQNcgdaHzxr49bpQYSwzb4C0wAzYAGhNTfowy0fuJ5obnN4pb0VLfkoaoTUFOwImc+vUvEuWG/Ho7I1rBm+nY+RemiZ2O/VTB75Umz4cTQfj+YeXv2XqYPzKfuBNwBcmQ43dO5a9fngivKyohI1rhVVRBAsamQV2CGNn0BjDs+dha43AlUHSgTh6dpgFAi0tbF0AyGJqRf95Is5f+dHKzw6+l5GPzxPR031VDH5fgQuWkwlU6YHK9Z7j5ez8LFfTmsxRHC+HJ4zHpaVdBqrWo4RkJNrOeGgZAS8zcfHhb1XOPr3AWIv3NryCTKAWtGb10Laq+NShB+PpQ2Kg4iwO1CeoS/eyZuh2Am4ajWA83ELGqaZlYiegWTncw754grHoCoajHboy2zce8tIVWadS9NZdfi/Ga6kVIxgimNXGvyZTvBWTw+hKSpPEunSvKZcpRNHSRVmXPmSPRNvJOVX6RM35fWvWnH87QAJ2JFMNF/dVb3pTdf+JzwS8bDk4rh/oTNvVXwh6U2+U2juys+naKUcXXunKACPRDqozx9gw8MtZCwSNJxyGox0uxk6RwRQn8nmW8AWCj88T889AWGOlQVRHimOibfQBu230AXY2v46JsFG9uzJovI+g5IUkOFp7EQV7xiyQl5UVVknrM3tpUbBjxZNVGxyAWH6Ixsk9eMISIGic3EsqupJ1J2+ZLnIv0FRnj1GdNdXJymELq1J3oEccNEJ4MlB5rPq86xHW1otb4uVhdyyZYj1wLsaN9WNoHXC87LhrBbUWZkWTtysIF0cRSrkS5eXsSoUZL/LAb5IprgVuTsQpJuIcJr76S/SruzV8ZCLYuOZwXWfbRLBp76b+G5sDKis9YbUsH39UbW+5TpSvMRptoyDDhFShqJESIeRouPWhvFP5JYzt5KZEnGNn/Nf0WRJfIPj4PDEVgKekE7NUHrRAI7FUkRUj97Oz5fUAjEaWk4qton5yPwhBb13nHGFQQmTtCi/sTlqzthWDbnoSrWvLpTfnE/Qy04Jg+kTAzPTa/GuhKJb2dFRORorjf9pwzmt+OvtciTgnkymOAZ22l49u6P+ZqMgNhow3UitjkVb2NL6Cs07eosLFieBweMXQycqz3olZPXwR+MPSqe5Ipvgaxgvq9kRnV8/Jnb9+XV26910b+n5WyvakTaSxDBDLD8lYfkhPhppcSm60QS+dlZAF5aKpaJ7ak2yOX71kbQifZxZfIPj4PDFfBv4KtGUiZkumN8FsWzMIyd6GV9BbdzmqNAsOF0YpyiAdw/cSKY4wHF3JIy1vsc478SMi7rgWph7ApED/G0KcDbxqMtToTAXr7Vh+SABMBevFUGyVbh17RMTyQ3MaphBZiQ6WWqM8bMtRpqSARoBemKYimeIyTC2CaEvpnCUBQjx9kJrsMQYq1/Pwird7GD1+GPg34E+ANkybA8AbSv8pIHVP/+QFl6QPfQLlYc2yfUs8ym2qSR9Rk6EmjUm58QFLu6/ClPIMYlYfNz7pX8fnjOHHIfj4nA493Vfuq098vyiDLWtSd4I2gWk7lr2OqVLa54rcyQNV2ROrC3YUVzicNXhryfgrSqokM6c/GL+KgaoNRPOpgxcf/e6XgWPJ1X+QQ8gfYiJ+lVRurnFy98Oh4uTjR2oveYeSdoXjZvTqwduLDekDomSozQNfAH4KJNJ21U5bFW4LqOz06sMVzt2OLk5g8iZ9Mbmm682YYLdKwF4zeDuNE7sRWk0nnFNYKGlzX/v7lGuF8hihEAW+hlEzLeaMoqUq/suVh/79g2gvdKq0pCdja7+zp/majyXijNPTbQPvBJYDN9PZdf+T/3F8zhS+QPDxOQ1K6aiHgWjATTvh4hgZp4aqXB/1k/vxpE188lCpRg4IbWwICouAyuBh41lBLFVkqGI1extfqYGjiTjtyRRhYFQqN9g6+hDRwggjkTZOVp6tEOLrwPtnNUU0je388Zqh5NhEqPHs3trLHpmILv/XRJxddwwWG6449O+9npaOjWsJtJyVEElr6L171YeDngxEKVVEq8z2s7FvK5YqTgsEMKuLiVATj7a+ae4q6NT891UHvjootfeJxfPbGRRWyur8WP2SO/g8Z/gqIx+f0yARRyVT7AEuLthRCnaU2nQv607eitTetBtoUUYQWmHrLEWCCKGmg8OkdtFCMBFsAjPr/mrp9G1AcPXQNuqn9oPW1GaOoIUQg5Vnv2pWMwRaU58+sE7inVOd6+O8vhuv2Fe/+T1JNr0W6dwwUHG2bJzc4wit52THA4RGdiwfecCLFMZkOhjXx2ouFBPhZvY3JGgbuZ9QcWK61kNRBEr5hzKL2UGW4muWdu8pysCYpYqfE+h5JeSme5Ff5Fif5wF+HIKPz+nzNmA6TWh15riZWWt32uArlIvQHkURxNZFnFJBGU84eMJhMlhPOlALcAPwpdKpDgG6JnPUxCxLpyQUjgogDeyjNK6Gi2OjtZkj0ymgBYqVw/dUovU2YOWBhpfKwdjanJof3gUItGgZe8yuSR+RraMPi9VD27C9LKuH7iTkTk0P3K6wp1NQrDl5G62jDyH00ikpKKWVSMS5m84u7Vz64f+TCdSsKsjwnsVUR0KrD57OzfZ59vEFgo/PaZKIcyQR52KMIXU061RpMS93j4WLko4+WndJ/+wB2dF5gt4UNdnjnHvif1k2+ugbZqVhEIDIBGpLqw2TDC4dqAMTH3AO8CrgjauG7rhOz3tvxaxzaCEDvfErQnKRiXnWqsDWRWyK2LpAbbqXSGEMoT084eDKIJ6wEcIkngNTk6Bt5AGWjzw4+1Qak2SvrGPaBXxo9g6xC9/dG1TZjwkYFiYmYQpj8/iQ7Pz4TU98t32eC3yVkY/PkyQR58Zkip9OBRtWA9uBkEbiCZvDdZcV+6rO+UJ19thnlkr0JlGsGHuoGc4rb/p9oRUKidQultZMBeIcrzr3oUSc79DTLYGLgXcDHXrWe6uBI9UXctbJm6nInWQ8vIyjNRctSCTnIXXYm5gjJIJemkg+hRbGhdZkJ7V5eMXbOHvgV0QKoyhhI7VLbeYIR+suKRuysxihlMUYqzeXUnbP537gBNBaSpG9D/jOk7zdPs8ivkDw8XkKmNl90359nHcoYX9XI8IT4SZ3oHL917S0P1GwYnKRQjLTWF5hYtZHuy59qBRkZogVUqw/+asIja8DeC/G5bOSmeRyaAQnY+toSB8gUhhGYVM/ud/YIeZfD7WoOn/V8N16T+MrRMvYDqT2OFJ3CTmnyp0K1FuRwqgQygU0k4E4GGGQAT4JNJb+/vYSwgA6uybp6X4Z8FaM7PoenV3Zpe6pz3OP72Xk4/N06emuOF61adWh+JUDL2lwBpIpckJ7gQuOfl/ECqnFjtDA74nOrv8ESKaoax7fcWT10B1RE5RmzMFK2J6l3RjG1vBujOunNROKJtDCmg5k87ABPScGoLxdS4mtCgu2K2nzePO1jEdaZ381bnn57Pr+XzaVazu4MsijLW8YSIca/w74Jz/r6AsTXyD4+Jxhkim+B7zN8vKsO3kL8XQv2kTsjji4O4D/oLPrv2cfc2z3HWc3Tux+3FE5AaWsn0JMWtqrwySS+ypGIAQAtDEaawFCG60/pe2LZhY13wlcnJxNMVTKU4RrhXhwxTu1a4UKpXOXD89d1vv1QNCdnLZXCDhOZ9fyM3WffJ5/+ALBx+cMU4pZ+DtMEBeWl8fSRbdohTdsrrcW6nNKnHj8tsvqMr23Ol42AmQt7b2fzq4f0NMtMnblL8PuxNUlIaCF0eGL2YkrjL7fW6CmKquuFJJ9DS/722yg9t0b+n623FZ5lLA5Xn2eiuWHiBaGpeXlKdgx9je8TJ1/4kcZ0OWazUIjVE/H7665oily6EzfM5/nB76XkY/PGSYRRyXifAp4I9DrWcHDBTv21lMJA4CWDS+7N+ROVVnaW2tpbwWdXT9Ipug4XHPx3qCbfjUmubbEqI2EnpMnVHC49lJcEVxwXoVFQUZQ0sGzghtrMkdClnZxZRANtI/eL+OZXhl2JwjoPLHiMOed+LH0hH0rJe8lgKlgXBTtyE8XXMDnBYNvVPbxeYZIxPkJ8JMndVBnlwccAEimcIBfLJvYuUYuXhdnlnZImvoL0tJ4c7VGFi5aW0yEm8hb0V+vHfvNKy1dPGWFN4HC1uqHA7G16yvyg2szgVq9r+FlAOuTKexEnIUZ+Hx+6/EFgo/P85dlQKtm8YDfMhqJRlKRH2J7y3XDZw3cVBcrjEzvr4RFxq5UkcLw2IXHf/hGzySSA4wLrEYyP54C0AqxfU/zNSeAtbO2K8ABXyC8EPFVRj4+z18GgKHDdZctmurBFc5+Vzi4wkFLyVSgzk0H4+7exqtzStgm7bQIILSisjgkw+5ULfAyOe+9H460oWZ5swrIC/iLO9Z8/DPM1FwuC5hdsISbqc9vPb5A8PF5npKIkwde3191zrbjlZvmCIWCjNzs6GJCwk1CcDznVH6vv/qcXUAkVBw77MpASgupBLow2wNpMReSukyvSWwXrPZEw8ZegpUfPVmxrjVUGHsrc1cmAlgPfKdkOPd5geF7Gfn4/BaQTFHVNL7z8/VTBy+0Vf7HVee/5R8X2UdcefBf/8hShU+X0mMHJgPxrbFC6p2nex0hA55WRaGEJTzpiMdaricdjM/eRQPjwEsScXY87Y75PK/wBYKPzwuFnu6LgJuBECathDMVqHtMKPeyiDs+K93F0hHUs9EIjlefz6H6q0obFC1jj1KTOU46WJcerFjXeXFL3BcKLyD8ZZ+PzwuBnu4NwK+AGkyFs1oNFeHCSPtYuDU/EWhQCklRhnQ6UOcpYWm9tJ0aMHWhhdYuxpDstYw9RvvI/VRlT7BsfEe0eXzHw8kUDc9433yeNXyB4OPzwuClwHThgvL8X6Jbmycfj1YWBiVoLJUjUhiWIDwtrEV9WWdTlz7kVWb77gImajLHQGuUdNAIqrPHbUw2Vp8XCL5A8PF5YXC0/MfCOggG45yKkGghtLvbE/aQQuAhF1UgucIh5E4Uzz/+ozrg39PBujRCILSHQDEVqNfAymeoPz7PAb5A8PF5YfAz4L85Ve3KWUjY4Khco0RjoeYoj0qlflAygCuDEdDLEvu7PxNw003D0ZV9BTvKcHSlOtCQGAe+90x0xue5wTcq+/i8kOjpXqfhC1m76lU5KxapyZ9Y1FAgjF2gnAtpwT4KiRYWWgjygZrDO1que6wgnKOrh+54W93UwbhAUbQiB6KFkXeJzo/f/4z2yedZwxcIPj4vQJIpQhuO//ShePbw+vnflUb/u4HLlxIIB+uvIpofJa7HIRLXWlqiv+DQOnzvHAWTEtb4eGjZ5bXnXb/rmemJz7OJrzLy8XkBkoiTq88ePjh/eyk4bQ+wGbhBwERORlTBiuBijMXaqWClNUlDxEZG61DSRngFWocfXGBtUMKqPFm5/qfJ1IxB2+e3F18g+Pi8cPnX+RsEUBRBlbNjN2hTZ8FGWEIjsCka64GbRmRSyLHDyKFd2GOHhDV6CElxwQWkVkwG43HgZc98d3yeafzkdj4+L1yS2gSohWfrhBydX6/d/HowK4aQNzn3KK1gst/8i0Zmh5e8wLHqC6YywbjCXMfntxx/heDj80KlsyvrycCfsojn0alD0gDtTR82XRBhHgrJ4XinAn4B3AZwx2Cxce/Bnf92aN8D39lz8PGNT6P1Ps8BvlHZx+cFTv7hb98QyI+876kev3RJTrLb1nSdAxxK7O9eppBfU8J6rdRFOXOM2CPQ76ezq+epXt/n2cNfIfj4vMAJVrf+CBk63d11UQRdT9hPOFMUcCgR5yAgClbkFtBbrDnCAECfpWErPd2Ni50jmUIkU8STKcKn20CfZw7fhuDj80JHiDYdqy+KiWPO0jtJECC0Eo4uFBAiTKQxIzInwyy+SNCqZnVmT//Jddj1X5TaPXuphHkKWSnRawScLG8rpc9+JfA1qYqtHcP36PyhfQeDXub7wN/R2bXQgu3zjOMLBB+fFz4P4OafYMZfKspmBRGReAA7WCRYMQEaMoOR+Xt7lSsyKlh5UVDldiKkPRlsoDp7fNEzS1TgcO0lyztKn5MpKoEkcD7AqqE7aZjaB+j1Gv5QQBH4u6faWZ+njq8y8vF5odOReEBrb0H9hLmUBEKwQhONC4IVgxTSNeRGIrP28DRoDVoXcxGhisLRRRtgV9M1jIaXo5HzEmEIlHDwROAj01u092G02lT+XJXrN6kyhIMn7Ahw1Rnq+Zmjp1sMbv/5R4Yfu/EXAztu+iw93S/IyfQLslM+Pj5zsbLDn9PQBSyY7WtQQtqCcJ0g1gCmXvIPmRroQiuNtDXKk6AlRrGElRtGF6cYWn4BlnY5u7CPyqp68lVN7HM62HT8fykLGak9YuSj9HRL4ONXCvsvlbCsIzUXc7z2QiZCTdRP7UeoIiACBRnKBnq6A3R2PTOlOnu6XwJ8BYgCf0Nn17cW260UbPePwBXnhFtr69MHm0tfvWYyEP/ko4OFP/Fk4IZEnCWzxiZTpv50In56Oaaea3wvo+cZ3Vu3nQ90YGZJZ2OyWN4P9GNq7D7StWXzgoro884hMIXRg8DjXVs2P2GaY58XAT3dFwM3As0YtYyFGfwfpXb1dwlEPwm0ATkgy/CBCtysg5BorQQygNIuStgIbWKeH2l9Ew1qnOXpPWZAD8TI2TECmUHk5AlQHkgLlFsU6ONAXJuBWALsbXg5QxVr6Ri6i8apfUjtIrSelKge4LVn3JbQ090E7GcmVbgGrqez68ZkikuAbwIVwL8AbwU2BYuT7mWHvx6Yf6r99Zu9vurzHgeuLxnXp0mmCAD/BbyudK9/BvxRIs7oGe3PGcYXCM8x3Vu3BUF/7KomPh51aN07huydFB7mhREtEc2ljRCy4OAExfsH2VEqbLIWcIAxTEnDLPAIcCmwBvOy54AdwE3AHUDyiYSJz4uAnu5XAJcDezEDVZbOLk1v8ieYiOM0ECY/McrY0TZAasCNtWBPncAV9nTVtQfa3s35Q78mlO4z5xYS6lYz7sSJelPIiaPI3ChoNSogoE3xnmlVdcGK8FjrG7FUgU0nbkRh4ajMoDTai+vp7Np2hvrcBvwSWIWZKM1mW3JN17VAatZ303qvitwAFxz7wYJTZpwaHmh/D5hEgT8G3puImwC9ZIqPAP8XmC1INPCKRNzEbDwf8VVGzwHdW7dVA68CmoCrru/gutao+S3WVsHNx7S1Z1xwSb3mkkbz9igNFXU4EwUu2D0253SNpf8Azp13qTBwCXAx4AE/6N667T2+UHiR09l1K3DrIt/ktJuzdDpVj9ZCR+qOiFjTa7Qd+oVrRyxkAE+7iEwKLSz2x6+iaIUIpvuNZUEIUC46M8pgpJG2sV4sdxK00gKqMc/gHI+lgJfh/GM/JBVdiVRFBJ6WM4F0Z3JleyewfInv8sBFGGGgmWdbzQRqyVsVBGdFdGtA6OnXSABXAzcnU/RjJmdvY64wKO93azLFtYk4Nz2t3jxD+CuEZ5Hurds6gNtAt51bh9hUC1kPmmdpdQVQUHBiCpbHwJ79dmjYMQx3DjxhnOl8dPnUwFVdWzb76Yp9FjBysOea6uFHfyk8FwQUZZCHWt+6c617ZGOlN4EGilqwO7CG6vRhVow+jBIWtsphMTPHUMJGanfB+QWgQIMQZtTVuCKABhxdmF3r2RPwfeA9dHY9/clLT/cazGpo+sWZNerpdKDu2gfb3vUYRj0rWcTNNuBOsWpwm6pPH5QKCy0kR+ou5XjNBdp0CzACzCr990TcA7whEWfwKfbqGcFfITwb9CajwPm1Qb43khetK2JwaakSbcRZ+PQFJHRUznwuf6819GWeUgvKpwhgjGmXd2/d9irgq5gavN8EPuXbGl7c9FkNH67QEi0DaCERWhMtjmzcGdlI3E1RO7GP+rHtnKcewSpP3heZTy4mDGZ2FUIJG08GkKqAEjaWNrbjohVGaIWtCkqgPvN0hUH6wW/aWaf6j2uQfzTb92l2kw/VXpE9VnfRPwI9GBVrdLFzFeyY3tt09ZHj+aFllfmTVsGK7BqKrWkEajETrUjp3wW2hiW4HPgP4Lon37NnDl8gnGG6t25zurZsLtKbPAfYCOwCfgA0vnkl1fec1FoK82x6pVzEljD/LTXv90r+fnf2w8EJyonELMwS98kuFy7u3rrtKoxdobw0/kPMs9D1JM/l8wIiH6g6W+IhtDIFcrDIOVVoIRmRVawd24lCYrO4849LAKuUMfVUCK0Q2iNvxwh4WYxrql3arvFkQEiVSz2lTvR0W8DngLeEYVWkODY9xs1ulcJCCVsraYUwzhtnB9wppHKxVYGiDOGoHHkripI2rSMPFZaPP9amhZCHay8tDlWsXQ58G9iOMcS/F1hmLlSuMjErgYdWOF6GohVGi+kFxJVPqY/PIL5AOEN0b91Wh1nmXrz9waR3Ti21gBZiWl2DLdGdjYifHjbHOCV1UCoHP+2FK5pgY+3Cc9/VD4+PolwtxoF3AncBU9eu0K225DtZl8t3jWIdT4vTEQ6ydI75MSi/hy8QXtSsLfY26qo25OQJLOUxFaiifnI/x2vOpzbdi63ySx6rEWb8ewINtCnO6YGGYzVXooRDdeYokeIY0UKKoh1if/1LvdFom048tW58E3jn9Eu3BErYIBAFOybQmlWpO2ke34mliygkAo3CQuKVBVxQA54I0j5ynzMSba/JBmo+BLQl4gwmU3wP+MXykQc7lo8+hBYWB+JXMVS5jmjuJOf0/QxH5ShYUXYuex3pYBygMpnivEScR59aV888vkA4c3wOuDhsaTbUUlt6IIXWMytVSyA8AUNZ+M0J9KY6mMgjegYhrwQ9JzUbauc+yHkPUnm4sB4xUdCRPWP8mUZs7jpHC635vqc539NY7RWIG3s1g7knlAkFYGSR7X6Q4ouAzIPf+F6oOPFWgHQg/vOH2t7xXuATUntNl3vZmBIWhKqx0oNE8imW50e8ymy/rMkeX/LBEuBNBBt6e+suX904uYemyd3zd9EetrBwzcpD2mgNDZP7qM4eR5ZU8Hk7xiOtb8kX7YhZNjxZeroDGFfRJ0Rql4GK9SgNa0/eSuPk3pnvSu2xmKv6EoCl8mhhUZM5SjZQEwLuSqZYl4izu3D/f75HaO8OjRBCe6wdup10sI7zjv8YWxvv2aA7yaqhbWxvfaPGuKP+fxgD9PMCXyCcOc4HAssihGbrKmf9LQSwe5RidRBrczPSkhAPwf4JODoFWU9wz4BxM5VAzjXfvdHE/AtXEawJcuU9J/kz4N+U5sK8R0CDsAQsi8Jgbk6bihjX1Nm4wJeBjwKzLBX8+AzeC595lPzSfw9YB/wqEefnpe1NwJsxv9V/Y97JjcCRRJwjZ7INhe0//Gy4OD49+MQKQ6+rmzpw73BsdYMSllXMDIvQ1InpOghaBtEatzp73BF4cwZoDXhYro03Afz+0dpLfj4WXfHYeHjZ6spsvwh5kwjt5SUIT9gShK20qdOssBBCES2kpgdfAMfNUp09XhyqWPvFRJz0U+hidfmPUy1WVOn/8akDNE08PqcNT4QAhPZYOXQnbSMP4ElntdTem4i//3+EVq1SexlXBiJojdCe6EjdrS1dFDPHa4LuFMBUqSmnY4B+1vAFwhmge+u2D4K+4JrlhNdULfy+UHK2S+WgP4Pzhg4I22bAtwR0NhqBAPBQSnBgQhOUUFTwzjUz57ElnF0D95zk3cAXMy4iYJVsEcDYjGpXARlYtKzhzq4tm1PdW7fVAt0Yl9SbgL9++nfiRUJv0sLM7N6ACXL6BB2Jk6c+aNqAGAR+L5nCY8bnXVte3m2Y2v+XSkg5FFsjlHREMsXvJOL87Ew0uf/4npc3Cv5y/vbK/NCq0Uj7UKA4WWOnT6IQCCER2gPlgrSCBRnJhLzJORHOrghwsCEh8nbsM+etWP4/5wDJFBu0tD/92PI3nbXu5K352szhvUUZvO149XnbpFb2SGSFbh+5T0QKo4xE2ohlTxJkxktC4rm1mSMf2dCx9ttPsZspjHH4CkAuJRTMUlgTUKeu6aMQC0qGagSuCODoPMLLInVR26rwtQePD64qtL3rTy849v2I42Y9aQSojGcOL9Be2W4e28sGXSs8xvMsZ5Pvdvp06U22/vAg2yzBiuvasctafAFMFOBnR6Ci5HfgKnjNCmM7sIQZtYsejBfg+wcXrpA7KjSva5u7bbIAN+wTx4C21qi+9YomNocsrB0j8HCqfGUKGGG/mBroNV1bNj8vfaCf7yRTLAf+viV//KUdhd46be6vnLAq1Y7IuQpz338FfBwzW30/xovr7cCS+actVeDc4z8mXJwAYCpY525vecOkFlZfIs5TLjKTTBEE/gD4WFOxf8W6yUcFw/vm7HO4/iVu09hjluNmhNQunnRQ2sLWeZR0yARqOFC/+Y5VJ7ddUlUYCIEZFHN2Ffd3vBfg+4k4bz9FGyRwAohECmOBc07eFHKKk4xFWhkOtbJq6I5pjyWN+Kbs/Pj7nmp/AejprgIOY+63LgXCnTYKaQzcaAYr1uK4GV2bOSLAzO41YnpFobE0qJRABx5e/lY5GWrKVWT65Ll9N9ZaurggngHAw0JJhwP1mxmOrcyf3X/TibrM4bwwguyTdHZNPK3+P038FcJTpHvrtvoL4vrGSxu45PXtyKy78McfycNwXjBcssWdHzcGhYIHIds8LRq4d3FP5OxYnpCe46sAO03g+3Lg1uNpceQHB5nCPPRjGBlTw6xITFtoYg5MFdGuFhPAg/P6IRrC+mU1AYqv3py446ndjRc2pXw0P6TkIhjRZlarhIXQmphKS8zPaaJrIYFJf2BL7QmFnPcrzqUq20eoOIEqPUKxfMqO5YfsyVDTk9ejz7T5Mkwwlg2QkRG0E0HUdMBEPwhQFcuomTqUc9xszBMOoLGUC1KQDtSzveU6XCukgR9FvPEujbzFlYG4QHO47rLypc47VTsScVQyxcdQ6j8uPP6DSukZnWbd1EEKdpQdrW8gkh9mItTopUONH0w81Q6X6ewap6d7EhMTUMS4gT5ptYwWkqIV0mPh1oGBqg1NIISl8px98pbpfQSeEFBVkKGpdKAuBjiOLhSVsLC0y2LrE08EkGiUdOhI3ROsyvat1IicQLdh1Lm/99Q6fmbwBcJToHvrtgjoHRfEaSy7hIZto+IJWGaOXnYTnU0qW7IrCLNv3sP78SHERFEsVqXQGi2gDo5jtVcCGiaK8NhMedsEc2cgUeCvmLUEbQhrrllu2pb3ED8/ot2hnLj37p7k+IYaNnia3BWNBDbWEpICDj2WPL6yknY6En48wlzeD2wpfxi3qmgoDiK1uU3jsmb+/rVCK9bk99FQHMITFntCZzFq12LpkmFVyGlDoyed0uwTyoOIJwMa+Iun0eafU3q/hVbkRZBep412jiLqKyng6EOh1fc25+68RAMIgdaSiVCcfY2vJOtUld0jHwD+NXDJh1x6ujftb3jp/elgvCUTqAUzAelf4vrTJOL8uPDA19dLL/dX0xu1ojI3yL74ZsbDLVB6bZ5Gf2fz55i0EbaAI9qsFpo5DUO1RCF1gSJBlo3vEFKrSk86bF92HZYO4sogSoFDrmynsFwZrC1FLVtZpzpk6SKgF1zLQ2IJj5FIG8PRdlpHH8LSRQS6HCH9Dnq6P0ln11OLNjoD+ALhqbFJQFyKWS7H4PVnuT1dxHIVy+8bZHW2ZIcLSM0FcYg68PgotEQhU4Q7B2AJYQAQAKFuOqZZEUM7EnF0CgpqetdpYRAPaV7eQjTm8Fc7RxD3DcJVzXBundkpryAo4fImqu4f1JEL4qwUxiU2cGE95DwTHd0cofXAOF2rjdHZB0imaATezaz7PWg3YAVd6t0UGRnhcLB9wXH17hD1xRQeEqk9zsruYtSqJe4No5CMWVXUecMINBkdYjyygpp0LwJIx1YUV7vH7qvMP/4o8VNkgjbZQ1+NCY66hc6u2XaMGoCgynFOdgchlcMVNo9EzmfKqgDzzF3iVZ8vzzp5i0nDIATHai6iNNgDDALXJuIld5vOrv6hFG/A1FAOYFI+nJbQCrjpzvnz5fFQ0+yPUzyh0+o8ero/Cvyf0nF/QmfXN0rt/BY93fcBrcDDwsQBRbWxqUmN0MJkbl0SiYvGxpXBqKUKtIw/xv76BHk7Sqg4gdDmokUZtmyVJ5YfYjzSSjZQTcapJlYYnnM+jWA00s5odAX9lRvQwiITqKUq1w8gSoYGiQlYWyytyLOCLxCWwCSd480YT5wbu7Zs7pv19UmNUA8NaXlRvRnMXUV2zxj/tndMbMHkRSmhefNKqCtpkD0NPzoEJ7OCCkdbyyKaVG7OQA9oaoJgCcRwDo5MnSq+QPOaFRCxQWkC59aZB/Xs6pKwEkYY5D2wBXZVALvsEhsovRJBy6xYlHkdm5a40AsfM8C+2RXO2sN1l6WP11wQAD7LfD20EPQHWugPtCx5KkeXk3SaCgE2mkZvsOR55lHvzcRdRUWOaCyCCq9FADHLcZQ3/gqFfJje5Eo6Eou5CcNMRk4NjNPTfRWdXX3J1Iy3zYrCUYIqh4cgoPNsyOzgkeiFFGQQwBqOreIxu4JYfoipYD1ToQZCXkaFdC6dFeH3djaG5wSIJeI8mExxDia54v5EnCcyppd5FCO8BKUGH6i+bPb3x55Uiuie7gswqakDpXN+veR2WgX8PsbA/BE6u0bp6f4EcKMw+wYEelwbgTnnvZpthLa0h4tF2VdQlXT/21uuo33oHhqn9qGkXYqr0BTsmQDnodhaQmMPYavCrHNr4plDVOdOUJs5woH4S5gINtKIcdHVCCy8ojCq3+cM36i8CN1bt4mqgL5pRYwr8x56/zgpjejs2rJ5YNY+nwL+pjGsrcoAfSfSZDOuqMXM1hQgQdMegy3tc88/mDUpKM6pNa4QrjJCYihnHsmXNMP6GuNV5Cq49yQ8OgyLLSQcqfnAWWZAF5isqNOJi5T5DEYg3HwcRvPw5lVm+/yzuQpyitaKtYkTZ+RGPp/p6Q4DXwA2Y9Qifwz8uYYPeMKp0sKydix7HRPhZU/61GGVobE4oJcXjiEXUR2cDh5SayQO7tV0JG5ZsIMxnh7FRK0rjFrkL7Z3vOvmtIx+Pi9DbwLkOelHqFHjc37rjIywN7iWsM4xKSvIWDOD2brMLpo8Y9QSxlOtG5Mg8WHg/9CReCruoJQKytykIaGRanfjK1NDlWfVltovgN2JOJc/ifO9GxOENrtrLiZTq8BMdoeBVXR2FenpdjDvZg3GBvd/FawU5r6VjMaG6dxhwinZEsJsb3kDRSvEOSd+SrQwjNSeCbGTDr11nXqgauOMa6n2WD7yEK1jj+CoeX7gIoCFQmuBxINpVaFgItSYP1pzceu57R1PLUr7DOALhEW4777kJWdXc68jzTNyeBJuPi56gQbMA/Qopl5BAfMc/Qnwp8wYdQG4vFFzXtwoMp+I0Tx8e7+gJqj1W1YhgsaVQQCiqNC/OobonVz0RP/+9tX69ZUOTY40AibvGVuG0uBqSBfh18fKAgeaI5o3tBtbhhQzNg8NRUeykY7EvsUu9HTp3rrtMuB3gePAF7q2bF469PWZpqf7b4APM+ML/gPg9R5W1LVCFZYq0F+5noMNiUUPl9qjzjUqn2G7Dk+YxXZYZTgv8yhSe9qe57s/H+HmIT2EFgIicbCD0y6KCqk1Qtt4G+hI7Fmk/WHgGGb8coHAWMWqvmgwvFFYDkNOA4edNn1p9n4xO5WEh6Cs/VIlj5mTdiNHg20oLbg80zPnMhqUgAlhrvN9OhIf4elgBIOXXNN1AbAVEycjgHck4iwUfAuPvwjj8nsWsGGRPVyYmdrvrX/pX/RXb7oamAQ+nYizvXSeyzC/eQ1GXdXErNULwKGayxiNdZAJ1KCkQ9PELlYN3WHiKNBIPHraf0d7VugYxq04CLwC6ASoSR9hU9+Ncxpnop8VS2nHPBnYbavCOXR2PSd2PF9ltAjn1HKlAFH2719ZCSFLd+Q8Uco+xAWW0D+oC/GT4Rx3eFqUPRlKiiHNOTVwYf0TW7HKI0DE/BJagivMS1J2QgIgHiLfO4nNQo+J3/vxIbggblYVQcsIARSq6CHuHUTvG0O6syaq/RnBXQOay5uM+6vGrDBsid43xitb9iWz0bWJY0/p5s2je+u25cD7MOmBr5j11Ye6t25r69qy+bkqpn4x5vkPAKIgg68v2tGGQHFKojVaCAr2YmEcxkh7TnY7MS+tJV4mL0KZA8GV6TX5g20BXRDCuCUWhcn/v/g5lAsjB0F5xgEpN651fO2YlnalQlqesNWUjH6hbsW5C4UBQGdXlp7uPwS+oiHoCTtbNdW7kSmBCtUQrxR4pRQMc66LUVl5CCxMxtFmd4Bab5S9gdUL2wnSw4paeBMCXnLKO2pUNhadXUs7+Hd2uQAJeCiZ4kJMEN7+0wrC6+k+B7gd40Cx2KulmXk/hAblqPxn4lMHgu3D90qNfM3OqUuPpmKrNiU6u+6lp3s1RsWU9rAelnhnlU/kioA+UXMBynLK1/GE9gpCe5YElBAOWgtLez/2jKA5UNrvs8kUVwI/G42sqMpbMYLeVLltEjQFGSQwb+VQxlKFdZjcSjuf8H48A/gCYRE8xaRT0q+XDccXxkFpLXaOIiocePUKXheUvHYgg9o9pr2ITeB4Ggazgk21Jths/hNbHvw15pzl2TnAUZNqXQzncQ5PwuoqkOah1pZgarLITZgkWpcw79QFJbh3ECaKmquaSjmSNOKxYcSu0Tn2h+kZ6PYRwcmM5vJmaAqbtuRcAiti/IsUfEUdSn5LCk4CP6cjcc/p3rtStbb3Y1w065gZeOfTjNEpn5HAq6fAI8Aryj4BUqum/fHNrBv8DY7KMR5aRl/1pvnHaCBb5Y0fr/QmV0jUuAAV0dngptzjLXrW7yIWRojPpZgF5RZBF8wDoQpicNf1heaL7u2JXR4GphJxTiksk2u6vh0sTrw8Uhh914aBX1aUx36ZG0WH6/AC9oJIWIVFXgQI6dwsf3qBrV2q1ERZ1wnFDIwfB+UiwzUOsSYbIR5YsjE93e8H/h6w6en+KvDndHadUv2QiDOAqQJ4urycGXvOYumK9mAiwSXgaWRRKNdZd/I3JmuYhrUnf7MiHaj9djJV8/2q1jedv7HvZwlg9WBsTWVBBHRt9rhIB+Pe/vpEQVtOATPrvwP421h2MGJr90ZwAyVNzw2XN0XfP7sByRQC+H/R/FBV/eQBcbJynW6Y3E/Qncy6Vmj/Q8vfem5FblBsGPjlUn0UwGvp6R6ms+sJPbjONL5AmEf31m0vl/DVVy+HFRVm4PY0nFNnvl9TbX4xR0BBIZZFsZojWJ6Gi+rh50c0qyoXnldjdPRlpopmdh62YTiH+k2fqYMDQv76mGbfGGyqhXgYYUvcqgBvNAuIpQ3Mu0aNumhZBIZyqKNTWI1hzWh+2mg9Z4A4mRP8pFcTs40949y4aWPAwhZmUJ8CPkRv8rV0JO5d7JqPP5TckPP4yGiegcdHxZcxguDvMR4dC1IBVzp6OoHfiTQdS/XlmSSZQjTXv7RzVepOIdGlYCRjNLyv/X1I7aLkkuN52BNW1kKVdd8S009bzB2kTr04tJwp0AFmahxHQNvB9stzCZOG+XR4ed6pfIfjZYVGzIms9YATgRZLaEWre8IEWsk6doc3ENNpNqa3Y+XH0NpFB6qwLYvlxeMUCOJ4aazhg5STPIh0irRTMxELhbqA8kz9KuAAcAsmy+eXSpfNY4y6N2GSMJ42pfQet2Kq/o0Bb0zE55zjKAt1LQqjJsozN3W1pY09ISS0hxYSLUy9hYCbTmQDNVesTN1dJbS2lJA0TO3ncO1lPNLwdjDvSJgZ+8JVQEfL5M73YwR9+Xd+Gz3dP6Cz69ezrxvNp1rOPfETIZWLFlJMBJu8h1vf/MdFJ3YhcF7eqWIH17Jh4KZpo3QZjSWUEJ+1tPsherqvpLNrtjPLM44vEBbybQTOlGtcQ9Ou8RAqlgbzkGVm017psyXM0+gqMzNfVw1jBVRDBFksbVMajqdN1LKnBQ0hzZtWzdgWGiPI958FO0e0unsAqREcmoQL6zW2Ob7mvDoT0bxn7FRNF/rgBOLgBLolqq13rp6JebjxsGY0L+armwDBlDtj2grbc+ovhEoK6GuAaYGQO5DckinyKVuw8qxqmjXgaXTQ0u98OCXuwTxXC0bUoKW5fuWMofvCej5Ob/IbdCSe7ejMPxiNtl2lhnvQpapXWkiyjsk7EqSIp1TZE2cBk1blXoyf/8cwt+tm4LXM7XM55GT+COYJGEfILwJ/yYyAzmEq3i3uctjTHSRYeQ6xpiBOeCcdiXGMcVSng3ExEWqkMjeIRlG0YzxWdSUFGeRQeDWHmKUK0pqCJxibmqQ+cwypPSyOo4OVeJVtOJbGmuxjpuaL+WvQqmt6sOLidYme7iDwU8zga2Nmz39c2rW8opFA/aL9ODXfBK6syp4gUhipnww23qTu/eE7pPbqMAVlPsXCMSuDmbgEStecdieVqMoVo/ebDaUfQSNQwrKBXLg4JpQw+ZWkKhApjs5vT/lVCABfL8hQoWQkLm8PA/8terpfT2fXXQCJOO6x3bsPCu2d68kAaE1Vrk9WFIYeGHFi51FyOBmpWM1O6/WsHLoLqYoMRTtondiJJxwQIiw9t0qY2Jd/ewr38SnjC4SFVFwQN26bClPAxhaogpp50PaNGZWOzYz6h9K/WZfCo8PkYw6RxjDWySz0nISBjHkYweQjsmbNHwVGOGyowUrl8PaMYZVdTz1l2uFIqFt8fJpDbVDnX9lKoD5k2pbzjF3hknrYOaqxgGPpmbaUKQuvedNau+gRGitQLL/d3sHkVRJ+VBnAmdcHsamONQ+n+AlLBBg1hs01igoCkgkpqAI28SRnkmeAP8w5VexuejUrRh9AIzlc14lrhdiY3Um1N4ZGcDjYzolAKxiZXzbyu0A3HYl76E3+M2YAGsDEbvw+5rHwmBEOuqQZTAPbSsf/hvTgv2PSSsSYSTWyuPqkp7sdxO0UptoYPaSpbh+kN/nqy4XzUo2wXGyOxy+iv5BHoBmJtuHJhXVahPY458RPqcka89BsQSXyEzjjvYjaleDONQEINJlQvYNSvxoPNtgV+cEKwfSzshlTT2MfxhUVjMvnU4l6f2Xz2A6xavgu0BqpvZhAfRcz+7eYm4yxTAyzysqysFbygvQBAs3K1D2Fx5ddGxwNL8/XZo6EpSqamg+RtqXaZQEMxVYHlk3snH7qS/evGviC6Ol+M51dAwBNE49/SWr1HxrXltpDajW6qW/r9uSari8DbwTiAKORFTzU9g7TTlWkZXIXAoWeCZF41tNY+AJhFnsfSX72Va2EG0LmYS/55eMqpCoFotwzAHvH4cgUxBxIFyDRYozCnobxAk7OE5mfHTlF9SXFdAnaMl7JprCplm/vGaMPxB8NZHRgWbQUSaNPq1ra0LUrCMccHAFCCuO6CrCmyqxewAiJHx7UjBVMAxrCmguXmM8dnMC55TifUQe2fb1ry+a+qSKvD9kmlqFMeSrsmWXG1zGD558wb5UwWSzVJxQmxgKm89w824wAbaPRNkajM4NAfXGQKm982hjbke9l0qp444RVlcK4X0aBv0/EMTaVuQntPklv8tMYYfBu4B8w/SsPYv8F/CEdiZmpd0/3e4AbMIPK/2C8XubSm7yUUPXN5CcqNQK0Ekz2NXh1a3/k6GITIAIUWFM4yHFnOYdCCw3DZZaP75gWBjBf+AtEWRAEYuAVpmc6OlzD2e4R1JHf1Nru1OyDSoZS3gQMYeICpoAf0tk1NzLL9HcZZiU1DvwvnV3TNpJkigagcsXoA1iz/PcxapspSkFlS3RNYoRCnlk5oxZZnSGA6tzxWGfvf8kpp9bLOFUni1aociK0LDwWaV3i9Ib9DS8TQmvVNLlLzppSSY24RKBvoqf7Ejq7io7Kfw94laXd6zGD+tvp7ComYF8yxVpMuusO4HHgX4Gwkg776xOsGUoiTR6kJPCjUzboGcAXCGV6k6/vqODPNViSUqpE43mDAoZzxnWzPIgeLMnuCkcjhclPpIGXNCP6Mzo0VhBLKqEfTUF7zKiipDCCp7TSECN5bseUtgz86hhc2oCoCsC+cVjC7XQ2DTHHzMLLBBd5hUIWJJbB1iOalohJm72Yhc5TpRKfgiCatwFf9mC3LrW3fEDZbfWeAY4Bwa4tmz/TvXXbOzFG8GlG84Jt/ZrORpMOPCL5CB2J3ifq1DPA72JyOs3pctnIKtCehTch0c4FmUfupSPRh0lvfmo6Ekb335v8DsZgfjVmoPwQHYmtC/bv7Lqdnu4OwJ49OJbOUYeJwn0vWsfM/S55OGglNXKVRpX1z0qAbC7260PBlWIxM5OtiyzPH1602SU1CsVANSGtwImAVwTtoQMxZLQBilPIucLA3KrSQhSTu0nQ2dW96EWMMLh7ej+4np7ut80yPCsga6uCzdzfxXaxoxZuOYf0UlUCS2/swo2LbJNSu6KyMGgBlRoZrswN0jC1j8da3kjRCtM6+jCxwhCp6GpSFSUhKwT7ml4hjtec723s+7kVcsdRwkZhiYDOrVTI1jtSvLei9S0f2dS3tcZS+Slp0lKcB2xLprCBdwAXYlaL3wJ+jVldXjlYeVZkJNr2+IrRh/5oxehDDz2RUf6ZwBcIJfIe1wlwiqqUwlIa+0GlY2b0NUF4ZSv8zyGzf9jSRGyzMijp0AEzwFcGsMYK03UGpwNfymQ9wQ8OmqRzrjbuohUOHJnikUMTYpyyK6QS+s7T88GYdg6Zz2IDPRhbwevboD5cUlmVbB1y9s4CWqPwsmXwmz59FKA2yNcPTfC62iBbAJF1zcz/ngEYL4o24KHurdtWY17+BWvwPWOibAcZ7NqyeeGM+FkgEefhZIrXYV7ENZRu0bBdR5s4UgjrbLpkIP4Rp5GrZwEdiQK9ybdiZv5pOhKL15wESi/9fGFwBSYxnfk1onEoTM2EnkfqyREUEXIloyRoKLjCkgix6DsdUjlNsEpQ9l0AQOj+2LrxWHFUZ52qbG/tZcsuHLgRu2hiz4qBSuxoQ0kQLVkzII/R44cxapuleA1GGJRjd64BGimpyRJxUskUX54MNnymJntsejKlEQIhKseCLTklnS11mcOvBF6H8Saa/2if7ng2nZRYQ1ig0EiCbprGyT1Up49Rlz0MQOPkXvrSGzhWeyFhdxJAj4Vbxvc2vaJ6Y9/PpAZdqiE9dX/buy8B/qBpYleF0J7tWuFKx8uMCvhkMsU3MQkSr8KoDV8PRBJx/hUjJEqEofFKnqvqmr5AwLhKXhDn9RfXmwFRCpOIbiSPjlWbPCOegsqSWnZlheYVrTPTo4CcEQp5DwazFCip/lnC20QhmCgNAw8OTW8uB+aUFcBLLQnK47zCLNM/TKn2wWRxpp2WUeMsGhh3bMoIorIgE8rYFipsqAvP1HkGWF2FPrum1KaOhF4J13Vv3bYK4zLaQSnVcOk/B1Oi86+Bt7D0M7Ziie3PCok4vyjVJPgups3KFY7aG1r39vOzj1Zj1Bq30JF4arM0c9wCK+UT0psUwC8p//ZuHoQFtauMq6odhECUnAxxwmnJrswfDgQoui4We0NnLfk+Z2XYde2wI+JnI6f60Vrp/tjaf2hd2/mn9CZlJfxTfPLu35PFtO0KG4XEKqaNGsmJgLVo7XgXM8CHMDaEr8/foZT++i3L41e+tT3VE5ImPYONEYLzlaCfP1x3WUtoYPz9AS9rFWWQ3rpOWsa3EymMBDwr+HlMNtkGjECYTQ7z/B3GPFunzFVUQgNCz9pVaEVt9sicnZZN7qJ5cjfGAC1lUYZGx8Ot+XSgrj5cHFNSufuB38sFqi8CwgrpoBXSK0iFqHGtkIOpmxEX2iPopilYYU9J540YldHzBl8gGCoeTVFVHTCupuN5uPUELI9x25oqXl7y6+fAOIBmcymbgSXNYOuVzI1DWXNczhNRzMvyhDqeefwJJi/6nIn9sohmXbVRS00UIV3EOzTJaGmXcmjpJyyhnXsG4KUtRm2UKZqVwPxW5D1YHi15QJXNvwJ17yBTjmDqLauYk6/BlnhCzI0K7dqy+WD31m3nY2Y3/8VcdW1v15bNe0zhIL6GGXDnP2unX6bqmaMPc3fypX9lRkbKAUbjwA30Jl+PsTm8kY7Ew89oa3qTyzAePMbuMDVgIpkREIhAdbtxn0QSwGUg0FIYcJb9Tnvh8M4jgbYeLZZwiwI8Yds7wufQZh1Bhpo47rS643b1F0pa89cCb5foSaDG1h6F0hzawzYW1cxMNoXpxylSXyRar3CzP2S09+N0di22EvoK8PvHq8+zY7lBEU8fCkntjgv4+CK5/z8yEV729gfa3m0F3SnQcNGx75ZSSSNR2UswDgjVGLtCUJcmT8I8T2nMKuQS4NrSfpsxKpvFBITwsCa1tCoEioIdZaDi7GLHcI8l0GJuXInC0go02KqwMjy5K4NZEY0JeBmdXYNiyJvsSN0TWjaxc3YVNmmpYmU0P0TBirCp76eEC+MoaVnjwaarBvutw8PRld1nrzz7/y712z2b+AIB+PB6fXVRIQOWKUBze59J99ASYddwjpdVBxATRXgsBVUBM3MWYmbmXV5BZD0YL0w/Q3MijRdjZaVmUy1kXbjnJEwaLWnZsqUA0RjW4rVt5pplF9aCwj4wTuVtfaSBg0BseUyrq1vNPoNZuK0PxvLwe+sXXldgVhECY08opeL+cW2Qt71mBTdqPVcglLxkFngClVJP3NC9ddsrYLpIyi/KqqCuLZu/0b112xEggVERXDzr8B+e6t48GyTibE+m+KzW+v9YqiCbske/t1af2MZMgj8LQEOlgrvvHNL/ghDfSsTZccYaYVYEv4epvtZIOR2DV5wRBgCFDCo/iReqQ6C1EvIR4K0IMXg42PEA5ep4WuN4GVwrVE5fPU3Giurd4ekH4kAiPp1IrR7QIhBTOliRJz8RtFAMVJxNRSBGROewtMZ48TMl0AGkbVO5bAywsJy303Tu52BuortkCgsTpW5pYendTa/G8TKFoDv1potaG29f5G5chdZOrJBCao8VqfvKwqCMpuSho81gHJv1Rbh0p47T2fUQPd21wI3MqGwX1Z5aeN/Y2XhNWgv59vHQsoBnBe8q2JEHQu7UF4SJZZhjMZtlNlMYoRQCLgB+tfnAvziusJVEzbnxlnaJp3uxvALhwjiesAh4WeKZ3oBGtFVl+r70mBU5dm5b27NuRJ7P6SyrXrB0b90W7H0s+XVH8oOIbQb4ygBsboaXtaDXVvPhligi6kBzBN61Ft6+2pS+nG2stUp/n5ib9ivHfN2wQQM0RTSvbDGumB2V8Lo2EEZJ3MyM9wbLYyYhTlmylCOn11XjVDr6U8A+0P/35S0ELWHsHfEQtFcsdC0t45TSW+S8meyrN+zl9fvHxQowk8N5/22jI7FklbWuLZvfifHAqejasvl18767vWvL5s9hgo0+hEmd/Angd5Y637NJ+NHvV6/Z842KzuM/qFpdOPRhrWnBvPBlYQCAhJDU3h8D25Mp/vwMNuGfMGqDV2HiEEqTtIVzCa3RSlgKxK9qvLGXl9IlXAa0A8JxM5x/7IdcevibXHL4W8RycyovzX6MXODPZn13MxBCiDpRtSJA3ZqsV7vGaw4JArpIv9Ok+yo35pWwcwKdBVEk2lBW9+hZ559PedZuvhNCFO1oaCrU+O+ltBXzO3h07eBtwU0nbmRj38+ozi1wQBNF4UT6KtYPaEQYYwOgpPIRCuEBHyjtuxXjeSRL11/0ZRCw95z2VZ/e1NbRroR1eag4Hr6/7T1vfmTZG/5Cw+OlAxfLUV+BWclZCnkkmUIoZIXU3qLR5QUrgq3ygJ4j5AQaW+VktDD82sWOe7Z58a0QepOXZl1+ZzTPe16zgvDyeelqLGE8a5qjZhEw//Cy0HDVjKuoLU3hmu0zjnYak0nz0+UNIUuzqRYVsFC7RrEbwrOM0dqsPMI2MuNOryyKwM8ni7wSiIlZJ7YleSko5NW0O54TkDNuskJAqCSkHkvBxQ0z7XdLnlNWyVYylKNkyxDlIvCfB17GTJK+AxiD4Cnp2rL51AVqoQWzdB8HHunastk99e5nnu6t26ow+ufDXVs2F3n4G1ddHJv6a4E2mvCxNNStAyEXTJQ0pkJaib9KpvhRIs7ieYZORW/SQeuPkhv9BF6xkmBlNc4iVR6lA6EqyJW0KlYAKxgT+wPtR04Gmq9LxKedFoYoqU2Wjz5IpDCCJxwcL8eaodt5ZPlbZ3dhEuPB9utEfE6cQATjLjuFEBFhh8KB0iEh8np58Xh+2Kn7mNDenQI6sZwTROv/GmOQl8D/oyOxwP0hEUcnU7wbowYrR2TnML/BD5IprsU8a0MNE3vslbmTb2qc3IMrjOnNYq4GSgMH6jenT1ZtqM8Ea/vaRh5YY6miUEJqLQL0VW38cNtZV9xFT7dg8ZKlRYygL+tzcsBj9HR/UsPbO0XgPEt7jpYWaafmIiVsV2hvXKKrFzkXGiL745v7+mrO2wl4d636/WMXHP1eIDovwG082MxA5dlMBePEpw5iq/yc1YpEUZTB++jpFs+FZ9FsXlzZTnuTXcBfa00FmFmyxdx4AA3cfgIuaYSwNTeAbDauMsdLIOPBt/fNmZF/AmP8uhgISzRvXgXVwVJkk4JkH7y8pbQUEEZF9d39xtg8i5stof/qVcv5x5YIF9sSKUEp0LedILNvXBQwaYkPXNmk37++xgTmaFD/ewg9nBcWaC5pMGk1tDZCY6xQmtZp4z0lBOwehTv6+buPb0l8mt5kAybp2BE6Egef7m3v3rotDDyEUYfZmOX+S7q2bH58wc69yRgmqGlTab9bgR3Al+lIjD2NNrwiIPV/hyxCMYdjr29nd6Awep2eOGFNu3MC1K5GODNjiZ71b0+0k+KMmv79iTg3POmG9Cb/kYkTnyA7Kqe1GLUrjeF2PlpDYUprrTSBmEBaIieCfxhu75yjb06myAKhswZ+TXzqkKnApj1cK8j97e+b3ZWeRJwrkinqMDPpEPCdxGTSwqQADwIhMiOQHQHLgYpmsAIak0r6sunnoTcZxmT1nAQepCOhkymiGPfcoUScvZioZpVc0yUxHl3vxqhZJGZFmUFrZ1XqzqrWsUfmCGFz38vzco0SNlpItre8YVyooj73xE+qRClW0BMOjzdf+/BYdEXndP6nnu4sEJo1uilh0rG8udRuMGrQncCfaPNcLnjbXRnQtiosli9Ga+Cejg+yZihJpDAixsKt9FWdk20ZezRckzlKwYpwpPZSRmPt0wdFCiOsGrzDq80emZ81ICOMreojdHY9V/m9XnQCoQ+o0trMVtySB46nZ7J+3tFH8bERoVbEtPOqVmRkkTWU1nAya1xFFcZLaMeIwBaa1hjYgrsPThDViE2ArA5o3rZ6Jv2FxFRL05iqZlkX7upHjy587hRmRv0l4B9soetWxHjl8TRfKShRTh0dAl4l0PHORl7THCGb7OPVw3mxnlkrnI4KzcpK9FgeHh02xXHes9ZcoJxo79fHSB+ZEj8APvJ0U1OXspz+M8aoV13eHrI0iWXQHGGqMsA3gT+e9t8H6E3+DybQaT454Cw6EkcW+W5pepMdwKsOjRb/vr2CSiHtUk4bEG4GPXKI6WFfSKg7C6FdY9DVCiJxdLAC0kO4mTFyVoT9DS/VE+Fl1yTi/PoUV16qPYc5udO44wphxHukDipnzDa6mDUDspAekXhWWE45wHEUaKcjMTn7lMkU3cAHqzPHAhv6f1kaaDRHai/heM0F5d2yGF33mzCpJoKYddG41N4lL5m68yfAZeQnYezIzD2xglC3Jo0QaeB36UgsOlglU8SBx4AmqVy96eQv91ZP9a7AGOz/NLmmay9GXWiXrpsCGquyJwLnnPhp0NKLalrQCFwZKv2lOVTXyZqhZHnkLnkJUcw6NZdFL3rPjNG/p/sKbSYWEiAvwkpb1t1hd+pbQPlHvwuT5HB96WQLxn2FRM6fppl99XioOe9aoVB15gQCZXJgITlZeTb7Gl7OYvEggK6dPMQ5Az9bVMhgfuM1dHYtVRTpGeXFpjLKQ6kqRennyrrmj7SCm49BX0bkgd6jU2Ly63v0pUELq7PRRPlaEoqe2b8maFJCAFzZBJ7SbKyDGlO/6YpNWfb8b6+ZAWbcuUJHAmmX1LEpUbl3DEeivTVV0BrTdu8ETLlzDNM1mAClRz76usRN3Vu39c76rrzj5Me3JB4EfgUw3LutBljFrN+3d1J8t3eSIxjdfWPQ0rI8HoFRI13VTLRxXL//oSGymDw9T4fvYgJwpqe+QghevSbKMieL0sQ87b3PEqQwOX3KvHKJ84Vw81+hp/tnGL30/y4I5ppPb3ItcDuT/fUdhZTDMBCqQVS0mB/RiSAqW9DpISMMAhWI3Aikh4z7lQAKaahYBlMnEUhCaoIN/b8U97e9+10QfPICAY4g7TZUccbMKWdeQ+0WNKOHBEqhhbRyhYII1y7/v0LIAvDF+cKgxP8DLh+LLK97rOW6pupsn0gHagKjkbbyrFth6gzfhclAW8YC8kpYVwIrAZPlFG3uh9bg5UErF6Mu2wHTGT3/DDPjPo6p5f0HwDKhFedN3E1F+uh6LSwttA5o1JfChbEj2UB1+fHfiUl+9ye2lwuc2hlPI7WLFhaHai9j5dA903sLpktPOieqN/2kL8UfJOKYAMDOrrvv7Zt4eNXQtk11mV4nqLMSl6swDv6/wKzgQwrRKBatf2yV0lm0F6OFkVysOBxmxl32wYxTs/vR1je97dLD30ALgaXc6VbVTx1gqGIto5F5ntVaU5fuFVXZ4+XfZL5pQmBsEw2Y1cKzzotNIHRhfKWzGuyxPIWYQ0wIM7hfWA99JuXEB4DtCvGvWY9339aH/XBKc80KYz9IF436p/w+29JkQ60NzkQJt0RZ1xbl34+k+UBBCefXxzQvXQaORD+UonBsiq8CPwb9plev4DOtUSxHmgji42nNzccgPSMYbEzqhJswSb5+yIxXz1e6tmzeO6+fn8IMxJsxS/0vYQboH2OM1owXTH6lhvCMTSFqw/l1UFpaP12BsJ55ltH6luXUBQZL8VUShQ5ZqPOnd+hN1rN4vhpQLoweug4hr0WrKeAt9HS/6Ql0rm/EzTWSGZ6pD5cbg1C1Sc8AEK5BhGtMqufMkGmyLsf5CfN3xhiHpBAUsbBUgYCXvWiR1Dmnw+9S2fILxo+tQXtGVRSpKwdxuBTTUmgtPOlohdQBLxPtz6qfLdvwsuRiJ0umqMQYUGsAMRVqDE2FGmebnAYwk6B/QGvZMvYo8fQhMk4NvfErHNcK2hj3WyMonPBMvwGsQB4h7wL+jo7E4dJ5P4gRAgITh3ILWotwcYxKb5xIoWT30BqNDmiwzznxk022KlKwwqq37vILKvMDn1PCHszZFdUFK0zIdRGLeCILQOoiU3YlY5HlWGyb3j77h69NH246WXH2DclU8GJKHkAtmUN2bfZYQM59DAXGJvbZdKA2GixM1klcZmUc9QCrlBPK3dN0dX8oP/qOS499txs4B2Ov6QwVx9ZIVSyOh1uIT81oVRUSjZiTfqMy20dH6h5CxQkCXrr8wxSEEcjzMxoUMZ6DzwkvLoHQkdhKb3KDEDQI2BcP82lP86msa1zWWqIg0XUKcRtwRVDqoBTGnbSo4JbjxrtoS/uMWC8Lhaxr3qVZLhfiupXs7d7Bq4FNR6fEI9/cRyPw75iH4GNAZUOY/rYYlmbGXtEchpe1mOyos2jt3rot3LVlc7Z767aPrIjpr5xdzWWpHPnurdtqurZsnrZkdW3ZPAa8a/bB3Vu3XYfxNy+1W/DzIya+4dIGIxTKQWwt0TPifdaDEUjThCMxRmSBZm+w9PILQTkYz+ikF6STmCY/UQqaEA7CqkJ7r8JEQh9edH/jzvkqtHEBnDOAaDV3Na+1ERRQmhkrQM3yyclSVlnYKDKBWp2zYzef5n2YS0diP7CW+//NIdawmnBdnSckd4c7/28l6Qtj4iRtkydLuhCvZDCV+09xxpUYIZqhJBSqsid06+gjwhM2R+suqVJIZ/nYw7Iue5xgcQINVOQHcVSWXc3XjgG/KCItGwXBSkRFs1FZSQei8b9NVr7074ArSHEVcD/G8WDmGdGatYO3Uj91EKEVIliJFhZCGx9sATLsGiHhqKx19sBNYQvvRwCuDHqp6ErrRKiBWO4k9ZMHsJjrbyCAiuIw5xz7yZJridrMEefyQ/9RMxA7+/b9za+oAnRt+vBeYVwtFtH4cI2HFTMhNnMERlZAHsSn9zRe/WstrMFLj333FcBqSsZ7s8zR8SsO/SfpYB1D0ZXUZY4iVRGEpGBHGQ0vN/11M2zo/wW2KiDnutAGmfEDma0C+/snXPk+g7y4BAKUE5IZf+ne5AFLkLUFAQHWZGHaqBtaU6Xve+kyQpaAkTxUz4r+LeceKq/3RvOQPAHvWTeT+kFDXkC2a8vm24DbSoVjtmEqNIF5uD5Q6dCpNF+05fTyF6XNamMeYeD67q3bblpdqde9pJlbwjbR1VUQtPR499ZtCeAIRlXU27Vl8/zkYu3MzcKJqwWPj5JujepMewX1ZYGkNUtW73gSvBtToP4yjO+4yqYn8weqOjYUhSMq9ZTOyfBdy4snvlba/3zMUnlx5vrUSxAO6AXJdWZxHXABdthI6mLW/LJWAAKL5BwUEnQpIZWQ5j/lAhIhBFprtBPT+UBVYVdd4qta2p853RuxKJf8fpHe5HHgzwVctLLQu+ZAcLUYi9VAMUPr6MNCS9s7XnP+b9xo48tbepP/TUdigWfW5Yf+I6GEXeUJWx6Iv0QoaXNO/8+F1EUEiNrssbCHJQIqOz0gCWkjtKIq2w9mhs+e0NlszBkbv47UISJ1AHfdGbvyq5go5BWYx9NjXtGdytwA9VMHUVggLJz8OLp6BWLsKOgFTcYyntSVBSsi0VrUpw9ypPYS2obvY36Ft9mEtZldLyYUBAiBYtnU4ysqDg8UH25/19hQbM3G2syRCYzAnD0HKALnVRSGFqSDF8ZrT1l4d76kwT5KT/f1wN8wK+ahjMQjXBgj6Ka5v+1d1GSPI7XHcLTD86ygJbRHLD+I0Gq+MACj7pq+vgYKMsy9Kz+wWqeoSsQZX/JGPIO8uIzK8+lNSuCvCx4fHMpR/5sTJgGbJUzhejCz5qht/PuLyuQuctV0CmeOTMEvjgIIEs16OqOoguGIzXo6EoMA3Vu3XWQJfR8gvRmVZQFY/vbV+m3VQb7iCITS5tx7xuDQBEQdU0thygSteQCXNGh5WcPMe6GBf981pwhOHri2a8vm2+lNVgJfLHh07h0prj0y5gaO5IKWa57FE0DWkXrV5Y2IpgiczDJ4bh0r6Eic8XrHJd3z72B87h8DvpyIky/9FmuB7Sylh9Eaxo9BvvSehOtu5dx3LGVvgN7kH2JsExVoZVYYWkOwEuScccDI4fwkjB81qwMnCrEGGDk4CdJBCKMgrFszgR36XToSZyaAqDd5J3BF2ZipkOwIbWTMqQUgXhzS63J7hUBrV9j7Qzp/McZL5wJAcHKHRKtfKoSlsCslrtBIU7xdWJTjp1wsk59Le0ynvRKCbLCOh1rfPF0PWihPV3jj+TW5A++rINNDR+JoMsVfYGxYSyr6qzPH2dD/C8oZ4h1dQNWsQk71IQqLmTwM5SL2Stg83nQN5/T/HKUFjs4uUKzPm0lPs9ToNXt/YTo9ibE9FTETlOhix846/zjG+P7F0uYYpcnU/OM0gkda38xkuLn8tRfLnbQ39P8Cx82WjNIL1WE5K0bIm5nTHK69lCN1l5XPcQvw5kT82U2B/eJbIczGpCL+TAA+86Ot23owwVOi7KM/u8JZ+UHxSm+vEFDU8MDQzLfJfpMFNWTj9me48QPXJAa7t24LAV+8KK4/clkjUggYyGh+fAgUQgL/0bA+cR29yf96fIT/lIK3pHJYAQnXrJi55o8PacYKZppcWZrjz37oqwImpgCzlA8D38CoVP4JeH0gPxLeqPqCGys1blWQE4GV7qhrN943aDJkB0ta9oxLw3/u5uoPkmzE+JnfSkfiqalH5pGIozE2nAU5b+hI7KM3eTMmcdlChChQtdxCNU8ixBTS/ugTXO524DOY2ywJVc/+zsOo7r4GfAT4AMGKIPVnG7WUMfL2Af8J6hNooYjUPoAd+vKiWUufOhfN/iBRnJvbTp/bxP7gWtbk9wsNKKSwtbsqR+AGgX6VgxsW6LSINT2uJ/stLewA2mQ+9coCQXvTKx2hS4kYNIAAO4C2wziVTazJ7WdP+GwAtLTEhKw9/pBzyY8T8Wm9zVL1i6cZDzeTt6NECiU7qB3Gc6Laq+ogMLR9wbFll1KpPVxps7fxFaSDcVwZxPEy04kaZ+t6hAmmWzBeiVm76bnb595aM5h/FGMfe215n1McE8bUeXAx9rjytceY5TVnjtMsG9/BXiMQBGCvHbwdx8vjyQBaF0A4WKowvQI6XHspgxXrOGvgV4SLk6SiKzlae/HMKY3x+5MYYfys8eIWCHOpx8wgYgUlODih6agwX5RTW1vC5Ae6sx8CFvRnZtJhl7EkBCR2QJpIzKua9O1rq7gsOmsQb47AFU1w5wAW8JrurdsCIDKYlAlaoPng2aW4AYyhem013F8KPN0zZorslK9cVCap3TzKqqnLPE8jp/qnzeC2ztNqjdhNFQ2mCI+G1oi51vl1sCLG9zFunjbwAXqT76Uj8fPTvpM93RFM1O0gnV1PxkD2ZxihXM3c8ptFoA8hPo7l5IFH6EgMLXL8DB2JR+hNbsFER7djVmMNGHXhJ+hI7ALKK4nLgHMR0tImJs3zNP9md3b9NT3d/wBace67nijw7snRmwxQSn0wf2BqdE8yZtcgtTKxLUIgtBYOxTfIkkeMhgpCVeeRHgxLVZ75g7Ic8MxnbUdwK5dz0GljxfD92LIIsXoCoYrpUOwqd5RQcZz1/TfpSHHEE9IZtyqXncdk9KFSgr7vAX/EKcaKSGGUoDuFEhaWdvO42aAzfsQTlc2TItZqM3W8Yv7AqxCF3U1XMxJdGVAlL6udy15769rB20TAnXq54+XQSGydL3V30euXT3sAWCVKXkezmfV2RmA6biRb+nwqSedhxoMWPVddFBWQ0+b5lGDqVJdtJGUclUMJWfrthB6KrhSp2CqCbpqRyApygWqE9jhYvxlPOlhegXUDt+BJh6O1l5B3KixMRbxnlRe3ygigNxnIuVx001H+LhbgwoJH+NCkKUizqtIkhzs8aYrhRGzoS5v01YtxeeNMvWCl0Y7kdxR8HY0su6iWpzNHp4ynT1sMQjbJHx3i3lROfBxTW5f3nzUTI2FLIwweSc1cd2Ot5pJ6E0tx+wk4ll7Qpv8B3nXNcj2wIqZrgsOlGLBSKmMvXI8baUIIk+zOFjPCR5h/U0JQxMwQ/5eOxO+c1v3s6a7HBJO1YF6YT9HZ9f9O61iA3uRGzAxuBcYF7yFMucrjc+IVziS9ycuBW7WeKf2ZdVF39vOOqzcnzlyK7p7uKMZbLIETjVLTXoG0oxSz6GIarBAEYygEh4KrCKg8y4r9JSOmWQHM85jRXjGPyA4jlCd0YRIPC4EmG1vOVNVqTjgtpGWUjdnt1HmjM0e6OaOC8wp4SmctPCGEFQQtiNRDRVMuI0JvuD922b9iVppLjp31k/tZM3Q7UnsFWxVmBLl0XOzQryhMvlrPGtA1uCD+9Y41H/9bjFqvFRPN/A3gy+HC6Ps29v08FCmO2nPCPecygDGinwAGMWq0RVOyzsLDCINJzITJxqiGDmK8rFZi8hflPRkQuxtftXvNYDIf9KYumXeeOenmXRGgN345fdXnTu+wfORBVow+VO4vjzdfq8cjrdNdkcrlnL6fEsunQHtY2ptWK7kyyD0dH9BaOn+ViM9xyX7GefEJhN5kC0bi7wcaXMV2S5j6rwoTZzBVNOOmp2D/hBECplpZKXWKYY4nTnlWXzY4ByTakfwkr3idp3Dm1CoGDo7D8pi5pi1Q/RmKNx4WU5hBMNBRoXllq1FdjebhthNlldApV+9l9mBc5O51pL7w8kY42zlJIFeaVAsJtavBCqC0sYMsmxcoG7RQQlDArBT+kY7E50/r/vZ0/xHG5z3LjAG7ic6uRctqPi/oTZ4F9ChNlZh1gwsKvr6HN3z4tYkbz8h1err/Bvg4EAARIFjhEolbeuzIzI8aa8KNNvFI5HwyMkKNN0pAFwiogruicMSWJcdG8yCKgkI6CiksFENUMKFscnaF8YEvmVFb80dZVTg0ty2pfeUYg5kOi9IjHayE6jY8xMSdFZsdFqnpMZu6qQOcffIWpCpOq0TEzKC5D6Ozry59LmJWa3di4ma+Nq9y2geBL6I8ddXBr9VK1FIBXCswmVSvP1Xb5pHFqIAczAJpH/AJOrtupac7BHQMR9oqjtVccEfWqQnknQqWjT4i1qTumHMShTUFOuqJgEjFVjIabWMotmau2VprajNHCBfHGA8tK0yFGuakwo9P7mfd4G0ohJaqiI07pw8H4ldxouYCD/jLRJzTe/fOAM9/lZFxH/xT4ArMzPOf55QhfHLn+gTwudKnO4GjlqB+Oj4ICFjoOmvmh6sPmxn0rlH0nQNoYEyidW2IUNYlkDaJV7QG7Spk2X0TcB8d5op1Vdh2KaANYVYFj4+YKmUIExjmgqwOECw1wQNU76SQ39irubwRzqqB61fCkQn49XG99JzJvIQ/xtQj+CRwYVEJtvXDNho5ryJGe7RIa00UYTkobdo6kjPtKsdRNBvhILUmJART39zLQ2M7tn0aU/LvZ11bNp9qFjE7YE7P+/x8ZS/wbWGCq6YJSFgW5V8xWTPPBAlKZVU1GvIT9nQOkbK7ayZFMdZKjTtCJhBh1DZLTqGVFdAF4sVBXOHoY8HlRRcncFZ+D1Jr8iLAkci6gZwMVzIrGBAgOstwCZQCzmZKZFJeuGpl2lGytwh0GOPOulSVMlYM30fb6AOzz1U+oSjp949gVKGjmFl5FjMYX45RD7Zg4mbK3AC0R4sjbxHomkWuqzFePxdhMsQ+mWdrAOPp9zaMUNgA3EJP96+BZcDy6uxxORFqCkTyI1jaFUraTATqiRVHECUVnisDUak9MRpdwb6mJfwahGAyWF/ev6xunF7BlBsttSqaGz8391Lr6CO0jT5ojYaXf27H1NrvnNO+6vCT6OdT5vkpEIwQ2IQxML4X4wMMRpXw5dL3HvAlOhKfXvwkC84ZxyxPC6VjL2dGzz6NWOQBsyVsqEU8MKTzEZsPvaGDv4/YBIDivSd1/wNDIgzi0P1DeuiqJl7uSJyBDKLnJJFdo4hzaqEpAo4wNRN2j0FeGXWRLQENh80762Bi10gsM3WQA5YRSJ42tRqWx4y6aR7lIvB5zEv2HeAV8/rF2vooTbOGCrcUK5lX8OBJ0+3fPcu82GUhqTU14wW+g3mJPYyR659Ocae/gwnsq8MIg8/T2fWsJ7J7UnQkNL3JrpzL60I27eXNSoNcGDj0dCinH515xoqZ0mBaGlClTUjnaS8cQQDHg0aNrIUUB0Or9cHQ6iHMjDsA8IC8iKg7xpRdqV0ZbmIRUk49jd7gjJR2c4jZ+ZsMOSBE1QqTVM+M6bdiZvYbMQPW0dK1qwCE8mgfmesqqpBaospmkR9i7ELfxxS0+Q0z71621Idr6en+OjBFZ9fxxP7u12CetT/CqJCmvXuAbwMfo7Nrgp7uL3D6wqDcwK9i3o/5cTZXY2ofp6X26ttH7hPlSZcnHTKBWh5tfRNSuawdvE0H3SmRcyrpjV+x5AWj+ZTadOJGKbRCoCr3Nrx8OFWx5heYXEpyONpezDpVgUhhRAigKILK0QVZbqjj5VHSpjZz2K7Onvgw7av+9DT7+rR4fqmMzED/OYx3yOkKq9edlsHTqIp2YWY8GvMwdnuKz0kx16+6vPIr3xqlja7+ewf46PvWIYC/BSp06an530Oo4xkhABWQ+lDIwpso0lBag1dvbtacVT2zht4zBtv6TTqM9hikcvDoMJTdUTfWaK5oMtcNl0p0Zlxj1L7l+ILaynP0maW+TTIv4rcxrHnLSqZfodlT+P/eD8N5s+UtKzVNkRmBUFS4/3OI7OZmAkEL++AEJy9rpG0xn/hperqrMUVKBunsenTJ/Z5v9CZjWZdUyCI4+60oejwatLngKVdPK3Pvv7wN7X1LzxIyCgtlhbBV1riK1rSDE8HFYsKqYkdk05KnE8pl/cBN1GSOoYXF3oaXz9T/nUdjoZ/W4nFcBZGhx7C8PBIPgVbCCIMCwarvUNNeByxHq3sZ3HXD0erzXBWO39M0vr1GafT+mkvzo5G2ECCDhXEuO/KNOdcZDrcVB6o2TC4fe+R9Vbn+n09Hkvd0W3R2efR0fx7j2VWOiclhBIwFHKOcQsOQK23PYSYaXXR2FenpDmMmh99jXkzEEmiMivg8TEDo51lob5jEZDCoL99eMPYBhOCxluuZCjVMWV5hvCp7YlnWqRTZYB1LoM8a+LVXP7lfm/urHEzA20XJNV3HMK7XfyOVKypz/TFPOiLt1OmmyV04Xk40j29HllR5lsqjhRy1tHctnV09S13wTPF8EwhXYwyIT2bl8jgdiY2ncW6Bqez1eswD0g+8pODRMFbgH9NF7P+fvPeOj+Su7/+fn5nZpl5W0vW7vW6fy/lc5XZrbIONC5iYYmxqIIFAEIQSSiiBLwFCElCooYbeTDOmGWOvcRHu9rmcfW2v60636tLWmfn8/njPaIt2Vew7l/xej4fs02p25jMzn8/n3V/vJfWsCZhlExKvoXzG1XzEWhX/D5KJdwOf0bo4EccL8O0nyzZpjQiePNDy8pVa+dk8IYN0Kkvgp7sUyGSeVhV8VqdmY1Syh8Ie42rWEZbSX+xC21r5xUF5IBQ2NSe1C+X11hE4kp2uOJ23QHNKtPqj2T8Jv0hK8krE1Oqa1VL/4LiQd5kIGpKzHTBkkSjFAHCiX2Pxfwlf+k3ilCX13HXFCsn+8TGW53fXJ/noREHtBBavbtIv3xRl2YI67lWKbxOLl2ch9fUGkOymFN09wyQTC7TWt+jRfWtUdtjSKBxloQ2LPQsvZkVuF6ahXOVRb9uYHAwsJhkum45lWDD6GKtSt+NiSkBSKfpif1vWGMfSBRqdcXIqRNqspyE7wMkHfoGrLBSuE3Qyg1jhb9Iaez1GwCIzXEduNEVurAsISKaqZfpZTFqZ3Lfklbl0qN0y3ILRvevrWLowlSi1reOC+/tbTnpbPMo91cb8wL6DG9cf/tN/RAoj65VkWfkU2tXg1w/c5B0XQ4LIPhPgbsTyMKkdUL4HYR++zxMmAcTyuIZi5ul2ikKpHREMEUDZKgDK4P5l1zi2EXJPOvBLK1IYU6DZ3XYmB1pPqXZNe/2hP9pd409YqnwvSwHrE2t6rkaEUh0V7j2AVQMJFow9juUR/rko20SngI109xyuPP5o4rnmMlrN3CR+KdaRTFxELH7zHI59L+JjjyDm6wuCJis7I7yP4+OPk0xsqfyCUnxfwR8NxQ5PqPwYKVaZQmT6iBXo+rM6iZzYTsFUOAqCppII2WCOR4CVJ7bpho1RSRy57whsHZEtaM8EnNQuPmzbhYMZeHgI9o6DrZWimB3hKHToyuXQGpbZva4FfiY1C1ODMZWe6p9cSuftWwEljK7K1hJPCJkST7CM6RWaWtOpFN+kVs3A8xi2VgfSts6oCp9uY4AXv2EdL847Wif6yZ6/UKrYHc0rLcWlJBNXTlkQYiHdi2Tn2PT1vpoFJ5/gKnON3bzSstRejOwQrhHkya4LSYcWsMw+6FgUJoCIgxk8bHWxJ7R8xrFaPl+OUmitMLWDoR0cTyCE3Qwnpx/2Mlg0e4LLOOLFJDxOHRNUE01L1mIG6xndb5Idrke75aXc2usGa1gorWnOHCikQ+1DrhFo3t514W3HHfrDKiUbau+6I7d+et3qk6pqmYkULyGy6Jv3LH8t6w79sW7BxJOdauYmXf6GfaGMFRs4ruTz9UhQ+VtIt7lKTchGOqiVatavQnp9gwic/0FiR8sQd9Im4M1Kso0slGHsbj8rlw00hxeNbDEjhVFPAGtWDN1Nf/OJuIaF5eRom9xFdGIXkcKI0lIZWLozaA2NO6PnfwThEwtRwx25K3ou0cldWLYIBANtuagGA72Kiq50RxvPNYFwJ5KFUCnt/QlWy2e4Fgk4V0cyEQC+i5Ba5YAPAw8heb4K+BTJxCurXNdBerNe643hN0iK2qhStPnGVapKMuSqJtgYxVAQNBS27WIcTOP2T6IeHOTWDa166zkLuCboLYeLlkDA0GwZUvSnhWco1gjjtgShS6qbtTeuZpB02Gi4xM1lCCfTSElvEeX9J+dI1pJfhOafsaSxD+uavfOV1wVVw5oZ/va8Rc+Vmwe+cmPiDVrzq9KkET9CGjJRL1xCxNWQc3AdCFoGm4E2hEgQAnV/pJD2fTcB4Efjqv57EZ21UAqneRlO81L66s8Bw6DNHsRAF1yMpjGjMf1I5MSp3PyZMFi/gmXD9+FTRw/Wr8QxilN4Yf4gAV3AUSaWLrAqv4sVzjZMt7RoRYUxA1ICmR2JVAaHp1R/NLgOWhmkg20gSlV2oHHdl46PrfvtHB/vJ4DmSGHY7JrYpmDWhW0jG3PBxQwZOE0Up7NChPa5yDOepFhEpymmlz5GX+8Cunv8Bj6fRzZqP+HhSrp7/gFIej8/pq/335GWnMMPLfmb1aZb+NIpe398diQ/gqELuF6lu/JyvUwnx6a9PyJsj/r3YTK9lYrSYKYaVr7O+3uEGsLQ1A4Bp3xTcZVVZ+jCTJxWRwVHg8Ts6CEWfwjJAvArbfLA3UjAp9bmZAF/TzJRpcPIFF6LcNvUIwv3yxS5WUCew38hBS4u4Ef5CniEYd4xLwHe4p1jahPOVvGmr28RDT8gTKKWZaBu2s/YfSmVdrS6+KIl3BkwsP0LKWBTR/H7B9OKOw8rtgyqUmFQes/gfc/02EpNJeR7uYoET1sr7h8oztDBrHR4S47BH/bBlqESa0JmhOcpmxF/mOXvz1u89fL4r7eN8kFHT+UAAWVVs/KsTQzveU0gm4+ksGr39PIzEt4eXr0/r4IoL0csGVoJhoGpbdZntrpAyMGkwZ2sb3FHZx2jpQscV9iJ0bYSs6ET1bSY1vp6muzidwO6gMLFcvOuGt0PA49jDu3wZrMlP0qBkz+BzIiDdoxqr93XNVBq0rbqPjJat+T7wF+Bd8ajzFUYgNadqwZuM0/f84NKQjkfBUQITMVWkTTsdgOnVsW0Br6I1/MO6SB3NpBAChH/BUjS17u+5Jyl353erq67J0V3zz66eyZO3/vDRzb033haU+4wAZ3DwPUqjl32tWzCNQKsGOwj4gmD0p/pAzUsxwj6XdsMaqwxx7CwzZC4FDHQGOxtOy1Pd8/MxZhHAc81CwFkQowjEj6MdB2bTXCdAFxPMvEyZGJcjkyGm4jF9yIl69Va6vlQyMR4L0LbvAARQlnE7C/FNK6d1vIGW6rO0qxoLD/G0yibkc3jCaDPUDilCpnpnaLekviBx01UbaxTaAlJJXXQ0/ptVwLQlbgvpdg9oQmb0tyn4E5x6ZVh+yhsbEeHTJSpvR7O4I7noSGAoRSuoXgEKev/P4v1m+Kf6r3htq9Hw3rdNau5mZL5o5H4iiWzMgW8lljct8lWE2xwsLPFtWVY9pjVevMD9Zs+1uiMK9+fD+SCOv+giXuajWn5ndtCbhmNVDllg9YEnUk67EEibhbDNF3q2g3ZXVyW5ffwqHUSdc4kHXZK/JGZlJpicy2pagYk78EI2gztkhaaxT7EU4lmCFHjGNo5NXjaG/bHn+IzXTj22OElow91VX6ugbyKvDMdbl/enDnwMgNd0UhgxvXfjgSpb0diC7ch7+RS79RoWbOfUaLQ9SJJK/69fWWWYTcFnUyZaydnNbB14SVMhOVWFo0+WnYvUD1XVuHq1vTepoHGdb4rqep9aWWydcElrDv8JwJOjkNNx7G39bSf1Y4mHT08FwXC1xFfXhhxi8zFijGQCbDF+14UESgfI5n4e6RIazbcSCy+lWTieGRiHUQmy+srjit710pBU4AHL16ix+4f4JyhvLKqMJUykgNQFvDARYv1ZxAXll2a/VcXgFetYopO4taDmu2j5VPLQnNWF4QteOCIFK0Zqjgoy5DvSyFdudsnVRJsPq5Fq9M7RYD8pR/2e5XOaVtx/S59S6yR9pzLoeEsb13VzN7kOLEXLmFje5hB4I6nXAvyPELPlZtTXkvPvKtxgDoFynElk1/DnyyDyyqyjx6gceEgTr6T/IQSCumOt8Wj3JVIBT46bLV9AJmvvwI+k1XhM8fMpjMbnAmUdnGUybDVWjqMqeestMNxh/5A2+QesU4bFtrUtcrmUshgjO2n1XVZ1pKlJRTQXrGTxs6pqVoHAMwi46sZhMFtjaDrEbfLYSTV8wuIi/TD3ng/SXfP/qfzPBeNPPw4kk5ecnMGg3Urtkec8X9vyB0JeKxL8yku+HvgDrp7fjD1SV+vn2GntHeq0dDCKx5KMbik/dxrlg3dm1S4CyaD7X8aqV/+4RXVztrX24zUO2yqHE7BikwJA6Bg4NRMTS6dGC6mKhjh2aqpARiNLNb3rHh9Hq37UepmROgdczy3sox8JBPnItr6lU/zTHnEtXEJ1bMQNGIFAHyFWPzdFeNoRnKvGym6kKQnGvg9irVSxRrFRwaF8O7Va8R94/uef5mE/ZNkLl3KbWtbeCEVqaLea1COlowif5P/5hOl8QPNG9ZJ606QneJH2+GSpVJUppFd6nAGfpEES6G9IHQZOsOal8bk38o7z18PixskOQ6jefUfPVdufu/sj/f/B0gmLKQp0VqtMTQEbZcBy6DPULzDo1Ov/M5GxIKqQ7qc3Vvr9IkUN5vavnBhoR9T2xyxOn3rYRo6xrexduAWXG1gUtCmdnCix2OapiL1BLg2trJQKFTTQm2Em+Xd58ZgZC9T25NXiUwhgxryqKa0M4G4VIeRDLkL6O7Z8VQeWU309Z6rpa0leFNvZ/s5n89b9cetOXLbJRpDW27GqHS5VNuhKnIjknT3rPLYdF8EXHnmrm9eG3YmplKvHWWxu+1MvWLwbsySquAnOi/+4XGrjr+2ylh/iCiZDbpCcR4JLxrLBFsyg/WrWobqV+TP2/HFxlIXmAYcI7gt4OYHXThDAtSK/qYT2N55QXlFs4fOsSfpGt9K1mpid/tZFKw6F/hkfHvvV5C2pxngR3T3TFZ7tEcLz0ULAWLxO0gmrjoKZwoC51HdyrCRAHMaeQ7JKuMYJZk4GSmSWeedC2QDV/sncBc3TLVAAKRz2toWyRRqD0nQdzALGRt9ThdqTTMv0tNLjX3KfT9wOWWze5ksAHSERBj4U88ANkYlgNwYFDdTwACtKbx2LYE6C3Uorfnd3nL3U3OIKeI8kNTWc7rkvKd2wBMj+vdzeLb/95BMnIJkTt1PLC6+8VjcJpm4HHinUrQo+HZwde0N3vvOQ0jcakYkUnQC3Y6y2B+cncfMcqW6OKiz4JPcjST1410vLKx3Hw84BEAZtnJtE6egXJS4jEJN0LxMqMPNIIVQsxuYHDBUbgK0gyjSTCCxu38HvlMShJ0/+nqvA16NUJz/yxRtSXfPHaqv9/VIcaMN/NPqtaf9Zsue3b+WL2psFdSWzpcViM5iLUjTCsH7kU5u1uGm41g6/AB+DzNT26wYukcVm++I8dSS2Xc1VBEIQnho44XeSoVSc7a/qTF3pLFjYqfa3rF5wsEShlnvHg43HYflZLs6JnfFNKpgq6BSaHW4ca1adeQ2GvJDpOpjHGjZCErRNrmbNUduBa1pop+6whAPL7lah3NDCzRsUVIcaAOvoa/3wmNZ6PnMCIRk4vPIAhkDriMWv2OGY6NIgPdFR+nqpfa3i0jaPyNVy19Hgsu/A9IkE3HEF7kW+BQSh5CCGPE7XuCdRwMqYpULAx8KpmIIGsnauXIFihppZp6j0zmcYXc0zGo/+OtoOc+2Uble3uUAsNjzMimQAPL9R+CK5SJ8xgsQsbAiXhyiKyKbfF+JHnsoLdf0230aCnKeMLIUrG/hbOCWWZ7r8xfSB6MHSUHcjvALXYq48ZR3zA5k4/qhV2/xwWMwkv+lSh56LQzWx4il7nLR+SkFx3JyauWhWwJKOwSwcV3TcpVFJtxBGAOFIzcUbtKEm5h0TTs4+EQAJ0ORBEUrxM36OPAFunvST/mO+np7kecJ8kz/jr7exXT3iCXe3fNd5DlPYbhu6YcON65/wYKxx+sBJkJd1zflDn8EcdlcQXHdVPMmWUAjfb3LWNPT4/3OeLizeO9T0NP+nQm01OpOdi/St8OlJGNIF/+rTDfPqtQd7QNNa53OiR2m1JYEac4cpK4w0gxgoAOudl0Mk9jgX2nIp0BDQ24AxwhwqPkEGrOHxV1oBEG7NOaOoLSjlg/f9zogqIu64VlIdt/WGmN+2jj2AkH4g3q831qBP5FMNBGL13oRX0Pyjhtr/P2pwJ8Jv0UyjP5ELO4Ap5FMfBzJRLgWefm/R7IUWrzvnIG4r5aXaPYKoC3s7cwV9quthaYCxEcfMGSzThemWypZB36ygwyKl43m1Z6/P04/PtXGREnP422SOFIYLajPgz4ReK1CNv++Q+Cg+MF2TdgSS+R166TRDsjY6svf8l3jBWX/dq/uOLGNpbaLXtVMo6Ek8K0UTr1Zoy3l/x28DhH4IeT9/g3TiwTXIMrAV0gmHgX+ofcRlULm8BM9V25+ym0OEyksJDNmPkpPPm/VT9LQ1avG9nwM0FoZGu0aQTdNQYVcSxcMpTVPLHghw/XLidopLF2gzR7S9c6kTpt1h4Ztd+Fy7ZRFjL2pagKXPU1hoBCffinaUNZf6Ot9HfAk3T3TYk+bO8xH+wcKF6D1z03saFPu8KkIA2oTYrUoZK+qpn8d8cb+IiQGAsCSkYdL7tC7QW2TDrTqSGFEKTQTgXaa0/uDhbsf/EHAzf1TRdHXW4DPIm1gu/A4qPCaIvqpvgEnTef4dnNbR5ygm2U0vJBT9/+0bIAWBQqY/Q35VLNG1WnDUIZb0C2ZA+6h5hP0ZKjd0sqQrmpakw62oJVphOxx380t1qDoa8PVHv3RwjNhIVQGQ8JI5s4OkomLkN6/B5BG8JuQFzBrU46nAA38mVi8mC6ZTCykmHEAsiG8sOI7BrCiWqhFef/xMog0oEwp5ppqlho2i2016wPFGepo+PMB9BNSjOYgm8zu8ScTDwcNTlKAq1GH0kLCiri2tv73o+rnTQF9VdjCGcjQ4g/dRfnZRWrrsObkdo/SWsOTI2XDXtFz5ebF/i+9N9wWGszpH5/ZyeVBE20qfq4UR4/y+bmGZKILWeilof+ZMtAagTO05oGXrtD2r3czoVFbem+47fKeKzfP1MJzJrwK4bSZDwLAfQ9FL9p5sv5jIThxMABaFQJNKDsDyjBsFcTEYSSyGFeZDAQk8HkwuARkonR1ONtqnV8j5G9PF9OTDbR9OkKMOEFf7zeBd1cKhoVjj78DUcIshL7ij96fZksq8WVa45nJb24brIvFJoPtqjlzgMrUVheDg00bVH/zSbRP7OD4gZugQABxb8Xp611Wwso7gQiCViQWuRvJQMxTQVJn6TzHDfwJRwUomBE0quzaDtZA0M1udJT1XY16ofYC/Fmr6XHgrlT9qteG28bCXeNPqJzVyI6OzYBYhM2Z/il3lG0EtwXOfOvReEc18UwIhMrF5gK7PcvhP5EXnkcqDjuRDAe/cORoCQWFtKT7n4rPF9c41in596xj0MBjw6SfHOEVK5r4edgkvH0E6gK4ly+roMlWKNul78uP0aRRxyG+wff1XLl5N0BjkItzDp/TmvVDOW7aMcapSGXjNxDX1oaxglJjhdqb2N0DErdoCQktxaF02S0s7L3htpN6rty8BaDnys054CqveC9CLD422/0+b5FMXAH8knlWw/s0JcsaCLxtA61/PqDP2zqiPshTdyMtpLbGqykqIqVQwOa0WX/xvV1X0NHSjwsMmW2cfPBXBO1JFJr9zSfjmNPS3PzrmKmG1W5req/qGttauWHm6TjuDSQT1yGcQu8nFp9fVlF3j6av972I9VNtDPWI6/gOhDGgFB3I2vfvey7ZhRPI/vJb4P9F7InI4rFHambJKFxWDt7F0uH7CbnT+h0t8n72eb+fjngK8sj72IDodmb1ILfGxSToZBiNLNTNmYNKeTGRB5e8PJIOR6+Ib+99TcEIfcVV1hmTofYHJkPt1wFhlIrvb920Zn/rprL5cLBlIwE3T/vkLiaD7exvOeX/nVbl2kcTxz7LKJn4AaIR+S/YQVgfF1YcqRE6hizVy9CfKjRwObH477wuVa9DtMPveH/bRZHQCuAXyIT9pHfcdmCx1tMpHHzYGufGPYzsnVBJpLQePN/jNav1ZEe4eH6l0HmHHV95XK0quccJYHHPlZuLDWil0O42pMZiH/DC3kdUNyIYgsxNmNcSqjmKVMBvexqa7nMPycQiZNNfjigBb0QEwDuAj1A0/eeMakvkLwfJ7Z7g62mb/3jrBhq98z4wgyt0CokUJyAtPn12NN9yHEfcl73MY/6bTo6m7CFsM8R4qGvKh9mcOUB9LsVEqJOxyNRy+zBwyorUnV3Lh+87XnmBU1CKrhO10KBiIJTgZz0lQr8Hv3spTv4XFCarKS0F4EN093y27NO+3puA2j2yp+NfkTUaQAgx34snSOc74JIHHaW7Z9Abz+mIlZJDapSaS79T7Rp5ow4Tm/FQ55OPLrpiImRPrM1ajbZrBCzgkXiUafSoiRRvBD5rOrnWtvQe5SqLofrlZXxUHtLA4niUkXne3rxwbCuVk4nXUC4MQBZnpTAAeS/NiMR/yv5ZRKB8GjFRdyBUzHeQTPwUeahfQ+ibk8iLPg/hJ+oD3gxcTSz+OWJxv4/qOcB7vWYxrquLc0Ep9Hieod/tQe2dUO0IR3u9d14XiPx6N62lhWJa4zyQYj/FTUAjmtHbe2+4rTRH+y/e+cLAGtvl0WUN+vugS/u7zgSNTOZJps/fEBJMvw743BzO9fyAcE39FdHuOpG40NcQ2uSPMI8Aroc8UCVxF85fROjVa3j7K1aRdDV3INbb70kmZnI/ARCP8ihCT/5dhGRxP/CNlak7rm2d3P1mw7XnpQw5Zojh+uWMhxdMCYPOsa2ccPA3rBj8Kyce/DXtE1OdTD8GrN3XetpvEKEUBmwaum5EKZNCtkAhk0fr9VSr4p0LTnnt72lffRLKPFjlrwGEDqYS6yla5jMhg6zVFcBKL+YxVHqAZ9ZPr++fGY9PCQPB/YhAaKc6TX4ZNKCURqPyOavhA44RTKeDbfWuEWhG1vemRIo9iRQf8dJjfRRMJ6c27r9erR24lfWH/8Tx/b+v1EJs4N3HWhjAsbQQkolOZEN+KsFhn82zErO5kbYjG/E679jvEYu/jmTic8hm72uH/k371NH7gI3E4sWATTLRBDxKsa9pFjC1lowHr/Y+c+MeRnaOqYU1xuWdX3z60RB6OM/3HkipR02lPyPd1fxIxNSk/kDPlZu/RjJRoLTtoJYA9AMpuO/IrFPdQRbNIGKNXYbHEFlvaTa2S5zj0SEYzKkjPVdu7pzthM8LJBP1iJVZGvwfwqMaKflsNnidInCQ+WFUWyY+WaCtyQcMBhHL7S3E4tdXO2kiRQBYBQzGo5TTEPT1vkfDB20j1JIJtKiHF19VzfUzG6bWxyn7fkJdfsjVytTKdczxyAK2LC7L5B5Bux+J7/jCD4FJFpwcZ3DHDRQmvYweo4Aiwln/+NQ73fX1fgTR3ksVQp/BtMMb70lINtI/M32v8J+6Kvlu6e95JGHlDKRZTrP3nTxeUx89h/etRGl4+1Tntr7eM5EGUy/yzjnjOTSQN+vu7G8+cf/hxvVnZ4Iti5C5E6R8zxoD7Eh++B/O3PPdTqBxLNT1m/7mE76/MnXnSa73mAxsHlj6KjLBqQTJ+4HT49FZ6WSeNo5lDKGD+TOXQnnJfOWLqPViCogmrBBh4B/7WpIJk3KXUOl5/FjFMiSjpFRzuYHyJtdhitqiv2EEljdyZOfYFB1vxX1oZSnhEnpYdA9loF/4jydoP41OTRQ0kwX0wTShuw6TdbX6CDJBM5QsEN+UOLldmFEBFtdrzl8oWUQDGeEn2jMx9YhO8cZsIJYCptJcFZNaBqWkAc9Pduqj19pSskys0paIzzDSCJ+NLwAUEqicr/vR38CKnFFSJ1J2Hj8XseLkVa3uRIo2RMHoAtxEiv8BvhSPspW+3noN/2KrYMTFUJHCCJ0T2+hvnkuBfRmmhmIbYZTWBh7TacGYlhLXiDLekVjT84N4lCx9vUcoS4t2A2i+hbhYayOZ2AR0UUjfw+D2KxHG4pvp7rkVscQvQyw2f01nEYsojFDdn0ttT0Xle/OP8wVDECGr85te/RGptL6WKVcYhouhjCqxbgAbq6CVedFw/Ypv7D1w5IOn7/1hEFn79UBdqUCpNYkU6JzVuGR3+1lnU763VH6tCe0Wju///RdcVMRVZiRkT3zSRZVwlcie7yqr9IPfPBPCAI6ty2g7Yg7PF4pivUB+lmM10nLv68hEWFXlmGsRbpM81TIgii+s6N9LJnxXkrC8FX9UxXfsNU18CihxCkHE1JzRoZ03rUe96Ti4fJnGUvI+17XQZiiu8qPVjQHoiKA2tFJ/Ziet3j1hu7zZ1V6xmmQxuQbS5xcgZGhe7FUo11lSr3DpUljZpEEmoyr5fwhgeX2xTaalpADurE72D21NvNGr/3jq6Ot9l/cc8vT1DtHXezTThucG8XdfhGw4OYrFhxqnAKN7YWgnpD1jzClAZkgqeWe3lJVS091HBRdtKrLI5rQNcR0Jkgk1tuf+noP7ntjRbqf8uJmBCJq3AQ8kUnz/9pVv+YNjBJu1MoP+1FJP03JPNazE1DaWzmPqAlEjx6np+wm5UyyaJpLN8yPv9xVVTjNztVwy8T7EtflrMsMHEPfjPwLX09d7Cd09I8i6Oh5xj+1HWIZfAbwGySp8KnuQQbE1q4EIlyakTuj1iGALevdYVRj4T9fCDgR0bmXHxJOv3bT3h/u0xBQ7qRKL0OX/1CW/uLvbzlzEHBSPsD0RiBRGOwpGpMExQqbSDgpCQ3XLMLExcRgPdrJm4BaWDD9go93HkKy4ZwTHzkKIxfMkE+cgnECVL903xf1+rZXjeBAx/bup3fgC4LPE4v9MMtGKdCGqhZci9NhxKjqJlSDkUVWEEabE6m4CF9uQjWEC+LewxU8R8/JkINxgaa5ehW4MYCmkOGxxPZzQBnsmNK0hglpLXwT//L7qtLBOnlPvDbcZoK5tCGjVHoKzOqE1jGG7cOsByWi4cLF0U/OhAcOA1U2wS/KESv3lCuD0CloxU8HqJk514OuOy0fNZKKbWLya33dmiGXwnxQXRCvS1KR73ueaC4RfqBWZQ59DGqf8EGmp+hCwzIsn7MOP6YzsBtvbDAtehsnEIaSdLdJHuHmJf4VarkmFZyV6ZEbj+yY5c1UTLd517iptlKPhPXVu+lN17qTZYafYHlrNQLCsy2UYeLVjhtjXcgpLRh5CochZjQw0rp3pCZR1yWuzB1me24OLIhleyZjZzIKxrTiYGEqhtA3ZYcJ17azK7eDxyFQ/KQM4K5HCjMOvEUu7tHiydnxJstI+jMwzTXY0gDIttDPgPYu/QWhj2oDzkWLQt9Ddk6Gv9y1ILGO+MZ1qKN0fpnkk/FdFxR5TzfWgyj+eKX7iuxMfB7bbRvhbQw2xXzZnDrBm4FZMt8D+1lOkErkCBSOEqwwMHLQ2UGjyVj1bF1xC2B7TywbvVh0TO1Haybeld+dig3d9wzzr7U+9NmSeOLZpp7H4YZKJGynnJLIppot9DdGaXlXxzf9FAsMzZYR8FylVB3glMzQCRybeFVRPDih4Pz7D6icRzWjapqCBvsM4D6TYqVFnTmXnPHLbNQgR3orTOznSGOBkJRwoKmCIVr+hFc7yNmQXynIIXM8uPSSvvc37uWyioJgowN4JTUMAsg5OwVXG8gatljSU7wrKO8/YDDZVoIou5miUA6rgsmzbKF8+LsZLa5+hJqb4nUqwpNqBTwmyua+m6BL8XsU1s8hc2I+v8cbimmTi74DvoV0lwkCJy8R1ITsMboGprSA7DI0LwTBdRFGZXguTHtJkhhSG5aimRVnLDL5+1cnxJ2YY+XUuytDKxNAOXfZApUCYwp72sxisX0nAzTIWXlDW16AKfDefEXHTrM8UU0g3ZB7jkfAJNLhpDOUihHZKtAWtCedHUWHHs0A0rhHYFo/iEO2Bvt61SB/kRuATdPf8ZoYxaEo3YDMAdtbwPnOBPfT1tiGZbH6M6jr6ej+JrDFfuz/WcFSF+9Ubu5qLDeZgaQO7UliAjL1pOLLkowebT7yhPpeyju//HYZro5XBisG/MhHqYDRSntnumCGe7LqYtQO3YGib/qYTGKpbDkqRDTSr1vR+LJ0HCCoIoJ3rkKyzZwTHViCIFvGCKtds8f79LmRy++9GI0RiJyKR+cp+wZQc972SlLiLkRhCAzO6+qb9zUU2k0mkwvLzSJ50iGJgampwu8fRD6QY1aguJC/5boCeKzcnEQI9SCb+AdjgavLK+74BtIfLImRKa1xX4xxOY6Hg4CTcPYBGNrVFpfetUYyLV94EcfWA9GEImuL+yTuwdxLuT9W4e3AfTJG/YBFhv1DO1V4qhjewQ2kuuemG287tuXJzbWqRaujuydHX6zco8VE1sDpviDD4IlI8BLKwfXeBjzCy6E8EfkQysQCpRF6Knc3gFITV08kX/ei+teBb/9rw/esGorSkEJrycwCD3DiMHwS0Rmub1Ph+tPsrYvGZRr/LQJ/oc5xnjKoJSHnvmoGJ8Jxj+waSNfXGBmcipNBRrYyARhmGdlloH8JtWIAxkhYLSJm4wSasI49S77qcdeQxLJ1DoXFUYD/b84ruHk13z24kQDs7hOPp+8CbAEXzUofhZAYnH0Ssgc8jQVk/lqiRZ9mLCPNjStKGXNB2Uco2Qg1BcZVpVdwHKq2hqqfQhknGbCRcGBFeqKlTAzDZmBt4YM2RRADXxcSZ6sGMdgkXRqcJBIDBhpX01cdQuJXppdrSxVaH3kw9mb7ea8vYXI8hjrWEDjK7WWgii9kvUT8TCWT5D6aaIFcIBcZtnr9/C7KIZ6NkzlNMabWRfqtfQLTOdYil4mc4mUox6Wqytos+nIE/7WdSC4W1Qlxh1fB9YLeSZ+s6eqrpzNTAATIOhu1iHcnBz3YpfedhhaOVi6SCVgbBfWiAvRNFRlTbhd3juF/div27vYpC9R4KdtDQH39ihFtv2MPw/Sn0H/fB/dl2ApZJ0DIYyiu2jgDT+z/MFauQoOkR4Mt097zrKZ6nEpuQ/rc5vNoOqgv9MPBXL4mgD7iO7EicwR2LGd0Djg2BOrDCUN/h9QUoQaiJIj00twCnE4ufj8SgxiikC94StUEPo92FzFLToKAnbdQ5Bppxs4ndoVi1wwJIIHwQeb9zoRV3gNviUVYdl916roEuGNpVfvtM23Uxx/ZNWUBOXQfm2H5wCygcgjqDIaRvOqDzL2E63cRc8fcImdwWrPCP6DhuFdBOd8/VdPdMIGutBXHTtiB7wUmI4tZJ9bUNohS5zC0NdQq65CdHgIIKKccImU8svJT7l7xSj4UW2A5Gv3dMwP9OKVT5qZzB+ljhocVXZQ81bchPWs2OLo6p38FIa01AY6C0i0ZhaltoLRSMhaey6zVidRZjjUpVqzVQdoVl6Mo+ct18nsPTwbF2GU2STNTS8ktRKjQC3o+ew/fORxpofwZpanMd1C4g8877B+A+4O1IEdk6RCNvpphiJtkycMg0OFe78ItdXGpr9S/IS31vz5Wb91W7AOIC26CUVKKaTHEElUk41xMUVvnWphHf+Jk1zq0Aso7i+l2aWCPadtE7xlDepUrcXJp1LdAVwW0OYi5t4F8UZG4+gHXnIaXaOhdgta2kT+cI4DCkXQrulgLSxnT+EB6YeafF1EQy8Q7gfcg9WYglN9NmaSMVq2/HF2rjh9TUNNKuWAnNS8VKSA+CdrwdwRUm0IHHoGGBpq7988TiewCIxX9MMrEFw/wYWl+OdvOIf3k7s2i5icb4PkBPI7wqh0LSLo8gca7lFNMV/b8/gGSN4X3+0Pk7vriysNP6n/FQ16r+tlNDXYziKoM9weUsHrynaAFpG3Pi4LT8zZJzKSSB4qsz3UtViIX+Me/H/6zy3iot89LEDD+1uvLh+AV7TyVLEQALG1cFzXSwhdHwIlzD0g8ue6W1YPSxBWsGbp2ig5BBBAlI/or/mBzgB1u7Lr5xoOn4LwP29q4LZTzavS6+4wt3377qrYs2HLzxwaas0B8pQzEW6mIi1Inp5uhvPqE0bVQhHES/Q9Lfa+JAy0ZWpu5EeVPdxTQczLFAX69YcccYx9plZCKLpvkpfHvWiL2HTcTiWeCtJBP/g2h3LTW+7y++85DNZRIREi9HhFI1P/h2y6DvbSfwXmKb5+IXfz1FgaYAHI1jKCl5dzTa1aKLOi5sGSq71wJiYfxplmvk07ZyHxsmjWhf097jpnbyp3cSNBSGF8ewXE1DfBF6z7gmXFePUoqcDpAjSKhOOcBmn0LjWUUycTayyfjWWgh5NrPNiaUU3605/WjvA6WkH8DIXtD5qVoRXBvG9mvG9n+XQw+ngHfR3XMHhx7ejgQQNyHz5E6Ej6f2Au3rrTvbjHwuHWi1RuqWsqft9GoaId49HoqPJ+ysCn1we2jNyYOB6AdL7vUAoiCsQjaT01rSe9e7yvw2aNWUPaTCA7dy7/Lrps5vOpJYVXHnlYqZPz9dhOG3dOxHa/Px3WHV4K+/7UgWUh3yLPx00SOIFTHjO681SAONg+KxBS/GNSxfKLmmLkyLHQSKyYylwurgQNPxt3n34Mcf0ijjMbp7xpwUH0i2dzsn9P8WpR0cFWBX9JzSxjmlcIC3InO69D1Mi1Pub91EzqxnZeoOQs4kJo4C5xUuXGj09V5Od89fZ3oeTxfH2mX0MmboHXqUUEzzkwyTDcCLkQyIkRrfqUcmXcD790xkegHEErnBI0abDQdK/q2VgoDBhKMZ3zfBd245wNU/34WT6Icf74SBjALfypXMKhOhzpjJXJb0zqL2PA2rmwn6lgiIJeJolAIjYjExMTqitdYYhqFN08xagcDPe67c/MAc7u+ZwDLkHguIZZBGeIO2zPCdAEKbfjcwgVPQU5aB64BhQl0UCmn5sSJgWjB9DRhICuZpwC309S5DUinfgVhvESBNd8/eWe7h3wNO5hX1+UG1eOQhlg7dX+0YDdwaH080AfeHde7LJ2Yf/YcN6Uc/hXQr+yawIR7FRho1XQ6cV5cf7nIxDFcFlKMsgvZkWVP2Q43HVV4jr8BVMKxgXMEnlWRgjSMp218HoK/Xoq/3S8AR+nof9+gb5o++XkVf7yeAmfzeeURRPBmZ++OI26zgjVkqqGfAbJuKoW0acwOlhx050rB6Rgnj+YrMggr8c9tk8ntI7OoPSFX338SjU67i3HhkoX3fslfrRxddyX3Lr60lDEBc4q9DaGhKHQVVKWOONK3jyQUvLFMglFhNP/Yy+o4ZjjW53SLkpR5BtPY5tY+bB35ALP7tsk9i8X6gn2Tijwhfz39RvQKyD9l4CsytRL8dSJJMDAGvJxa/ucZxX0H8zr7b549K8WlL8eTyk+L9y4E9WxJvagjwZQ3BB47oHVtHVBTZ0CJIZfH3EYHwVaoLqmDE1PmQSftoHqb325GmOa2hokAwlHRwO5xBj+X5nJMbfGz4yOEXtEQ7Y1bAuAdxuz1XcBcyb/wkgX4kcWAaF0wFXoLkbH+Lsf3vxMkxpY+Em2FyQGoOQASCn3lUfWvxhe3lCMmZnyYdoLZLrxTnAK6rTAzXoTlbNeSUAT5OduTjuE4bgcgEgTo6nNQr41HWeJXNL02kaEG0y0UAE6FOFBrlSjgsG2iiYHpTWGs6JneilYGSmsO8ks32w8g6fJDunl0IZ1IlrqMYR1sE/Ii+3jW1rIVEirVIYdlu4NaS4qlLtbAch6pMXo1kHvnNpnylbAQRAo8gBW3XIlbDC5i7t2AaCmbEH1MeeJ9thL88EYzWN+WnN7qb+o4KoRRG6+TeC10V0CORxUGUOhM4N5HirfEoP0PW+UsLVt36glVXraTcj0NkENfndd69llohYUQYTvu+Vd5X28dihI35j9X+eDRwbMntkonjkJz+VmqzO1YijWiFbTMc4z/oVcTiM9PBStD5COUBwEPAicTiKZKJDwD/NodxlcJFqC4emeG6iwDtCSi/HeepQMF2+a3tTsU61C93c3Ago36LTI7/7Lly8+7eG27bhLgmpqXTrm/RzuaFmIaSVNXf7JFq6FLUWVq/aAlqSYO33ZW4sW2XyaDJ24jFvzPP+37mkEy8FwlYQlGRqG3ROp7ZbwZHgR0c2XoqrjOV8UGgrlh/ABRbDAHa9Ux3w0Xq/6AoJc5DKm0/TNEf/iu6e94w4/j7er+s4RrbCLaAYn/LRva0nzU1WmSD+lx8e+8TiCUgGS9NSyaoa9+XaIyfgDCCXkyV4HX7xE4Wj24hb0TYHT2bbEDKa5ozBzjh4I24XnMcyy2QsRoLlrZ/FnIm30p3z3Q2W9E6L0WK5TYjFAt+wVcX3T3TdqdEilNDhbE/1+cHGzOBVjLBlpsRwsjDaw//+S0d49v+xtAFjOm1fBoR9oHSD5S4j66mu6dIT9/X+1KEbBKq7B2lO5eLYjSyhKbcYW26eaUx2NeyMZ+MnhtAqQLw1vj23r+4qBtBL1XTk128ZaKUoyxMbeMqC60M+ptPYFf0XJD1OQGsiEeZSKQII3HIlyAUHcXhSB3OJsQKuhWJIVxTeQ/esb5rbUphDthpztz9v1N9F0oewDhwBt09M6U7P2UcawuhBQngxpFFMBvxl7/R/w6pZKwFhWgStcW8j1g8QzLxNwhLaCvycq4jFh/1jvgS4t+bvYdhEQbwUZKJ9yMSux/4dVnj+dICr2RiiXfdJRR75yg/ffTqlSx6KKXXn95JHvisnFdt88673LtfDKU5twtOjhY7OC1pgBPb4MFSWi4gbSv1u72aN6yXvS9U4r4OGISBz5FM/IVYPDmP+z52SCZOQFw19yIL7FNMTy+dDq1hvB8y3gOItDXTuOhUgo2QHfEKz9R0gaAU1EchPaiBApH2IQKRBkYP1EvqOg7webp77qSv927EersEKZr88Bzu6H0K7JzV+PrB+ljj3rYy70sWsYL+DXGDFdMfx/Y3EGn7Z6TY7kIqNy6tWT50NwvGtpK36tgXPY9soAnLybD+0E00Z/sx3QKuCmFoB4VL0J4MGOhrNKxUfb1XVWmP+U7EYrAoxtJywE1eSnEAsEsthfaJXR9bN3Bzs7+NPrHgRS8cql9xIWDsa92kF4w9pvzaiNJMB+8E09I9XYgo+F/V1xuTX7kAEUwTVLfwp+aFg8loeKEO2xNqNNSldkXPcTPB1iHXCBrevf0ovr33dOAhA11feSIARwW+ORHqqHdRr4zY44ay0zhKQoGLRh9hf8tG8laDn47eAEzEo2SR+fCgx2D7YoobfJyi5v8iaidEGFRR+trSe9BedUmpj0nJtTciKdFHHceS3G4V4pYJeT9+oHW2zAE/vbCaG0eX/P0RpPvZr0s292rjCFLk87Gm6Iklv/0EZAHsQVgylzF389QF0Fq01skC+7+zjdPedkXVpusfRjYRC2ZmJfDUqQywuPcR1Yz0cHghwKlRzWkd5Zs7wP4J+MXuasPWXLNa6CrMkj97K3UIuJJY/K453e2xRDLxeoqVzpoagfKqKGSEjqIUbSvBCmsmj4hbKNhgE24NMLoX8p7bNlgPLSvGkHnRgZ2BoV2AdtDuBKJsnEh3z2z0KTWRSHEBUgFcuqGlEX/yL+JRXPp6B5ieZrwxsabnEEIO6W9gCiA6vp11A39GozBwyZt13LP8tawduJmOiR1oDCw3hzRpAT+G6WWtuAp2At109wzS17sE6Qz2JuR555FnvweZd99EXK6vQOJb/6RR9elgayFvhL/clD3U6vu582YElIHh2oyHokTTu8tuSBXrL2d04SvZ7O6kmC14P8XsNRvZF/LA9Q8vvPLskDPZYbi2Exv6a7NGYWiXbKCJ+5a92l9MN8ajXEFf7y8RRWPq+qXLcGvXiwYGmtafByQ6xrctXDtwKz7ZnInNvcuuIxdoBMlku6KSWyiRohFZ4xuQmEMvoMOFMbX28M0qUhgl1bCSXdFzayUXlGHp0H0sH7wbsyKM4g1+I909D896kqeAYxlU9ns5TCKbTw5Jn5sNQapbEto714R3zIlIaf2tHpXBdCQTb0F8k5MIz0zpcZ9BfJm/Q6qeP1RjPH5QsxKGLwwA6gMsuWQpT/becFstaoxa+fPVEAHe6mX8xLx7GOmITA8XaCBtUxV1Fuwe1Yzniw5NAK1xEOtjpiDtMwm/T0Er4iqcWRgU0pAZES4iXUXxcl3JA2zo0rQsP0Bdu4VhuLQsh9YYtK5waVmhUcpCqQ6UKloPyvArbRdSnaZ9TkikWIVsDJXabQb4dTw6pTFWKyV04lEOI8/FJ1IEIFIYRaHRysTBIuikMXWBuvwIILntjhEgG2hioGENDtZUCiMyl1uB8xFNfDsSrO+imJlnA3+ku+e/EcqXq5ENuEXDT20j8K1gYeJHzdlDrQYupi5g6gIRe4xIYYSQMzFNGADoEmtPY5Azq5ZwKKQIsXSdnoqsTz/jbB9iNf/0xP4bO1YeuaN+der2ZtPNYWgHB5OwPebHTwAuS6TYSEWWWqVOtmjkkTYkfvLjwfpYLh1sReFi4NDfeJwvDDTwsmpEc/Eo4/Eo74tHucy7hzFArT98k2rMHsZ08ywY28rC0Uer3fc0DNUvRxtVBYdmnvUZ88GxdBk9ibxAC5kMI4jGcRoz1xj4NBLtFZ9PIj0MohQnRysykX9MMnEE8dv9hFjcJZlYgQgMX7isRjbApSQT5yK0uYZ3rvO88T2OSPhS3IMsnEuo3mFtCssbaW4J6vMQLaIU3wTeg8ejVI05swrM3htuM8Kmbt68kKaOCEbay7+wdbE9pq3hiZGy7+WAUMBwuXZBP5H8EHrMJFu/lEN2A0ETBjIMbOrgImLxo98Yp683jATQmoFfegHMymMakUDtCHAPC062qBJYq4qxg5AuYY5uXCTFZn7evRWG4JSXZQwpIJT3ppQmWO9rqQ6SzSUtUwN+UNbFG8t+nho5o4/NVE+ieCweLev3cT9SC+Ovh1FkHhKP8rlEit8iaaHtgDEaWYwevk+ZbgGtFOOhThwjSKphFfVDg8jnuHmr4ZtPLrh4+eqBwKZFY4+2AznPT28iQugD3n1qxJVjAo0KthBs/BzJxAuwIqdiZ/w1YgCmVqZGO1pRvQLSR2UbydLPH++8mMXjjxLKTCvjcJEgdSU2UUxHbQTWAh83cAuWzrnK8zqYWgrvxsKL0EaRqBax/D+LCLeq49ZSvl8APu0agfMeXvyyU5qy/YZjBNR4qAvvOWXiUclRTd/3vwtdZb0qZE9OBtzsVqRf9ABAPIpOpIgDP4/kh1dqZaKVgeHmqc8NVrv8NEwG23lswaX6+P7fE9C50jGPUWR6OOo4luR2D5FMvA3hmMkgG+IOpDK4Vsn4CMUgWqLibw2Ii2cAeTn+ZDYo0la/HvgUycQpiOCovM5ikok3IVkM/mL1n8GVVF/A53s/s8KREqTp6SSx+EHPR55AtBs/qFYmFEujb5MF7gIGz19Iy4pGWSn1FvSncV2N0RSE5qCs5AsWw/W7NOMF5SDPd83GhrHLIvkhSyhrbMITe3h48jj2TpoADZvO2Dx7/GW+kODkzyhm5byLvt5zPUoE/5iTEDdKs/fJV5Cg/+wxHK3LhQEInUTrSikyg8qK4xDCbXQq5RvuA8hG/HEkxrWWQJ0UrU0eASd3D67990/HXYQoH9PyzCkGyn18DmFo9anK31zRb3gIr3IeYCyyUD++4NLcgrHHCgWzPru7/cw2wNjfcooumGG9eGTLXxwjkNiy+KVbtTK/ur3rwqZ0sO3Q6tRfwsj8/ioiCKd6Ovvh9QcXvSy7wdnTGVL2HkDRssxhcHse7RqAoVGu1kzFBgoqiKEdzCoKq4PhmrhGpVBQaNYfuaVq1yFVPUW9gNRgpPGWAeKfdxG+nzI12lUWjy18cdlHSCqyb2lHgHBpbpkG9raevgf4ZjzKQCLFWa5hLR2pW/paJEPItxpvADjw+K3r2rXzUMDOhC2dR0NBQYq+3pfS3XMPfb2huFhXT7ioMVdZJ2qtTK0MhuuXT7vvSgTtSX3iwV/rSH5EKfThAqF6i3xIof26jao5zEcDx76FZimSiTWI5VAaJ/kxshncj5jIFwJ/hxSlrEFeoEXR61GatlULO5EXkmC6pWFTXRD6VsdcMIFXu+A/PkfD48P8+KTT4tUyCQTJhIWwsq4FXqI1a2odmi4w8YvdNFy6FOoDcn5LweEM+d/sYeLNx9Fmu15upIJ7j8ADKfVr4NU9V25O77z9BzevDAxfWDRHND8bW09/Nghwa8+Vmys5pp4++noXIu9XNFFltFDftY+Gzn8hFv+Rl9d+M0U3yiQYETqPNzDMGTVOALQucHjLdGWiYQE0VM0BH0cswxOQxIEB4FNlvYKTiWuRWJSFzDWFLP4fI9QM3Ug9zSTwOWLx0jqTGZFI8a9IsNbnR/pSPMo7px0ovvxNwHa6e7aWfN9n3q1Mc3UQvqA1yEbnI4e4hZpLPnMBZbiFxPk7v3wF3T2T3jWbgR1a1ocaiizL1TV3BcLkDShZXIWMzfDO/8B1DuTMujeYrn2KVooj9at0NL3bMNy8a2i7tBDfu6hRONh8krN49OGwKvd0+llG494457rmNLLuXEShOxm4QldcdzS8kIeWvqL0o554lP+mr3clogiUPhsp0jDCf7Z0YYupnQmEdmUq6J5IcTWyl2wH/iMeZXLf1r/8csHY4y8x3fyUcFSQ0TDqKvM+QzuNSig6lAY9GWgdmQx3rkjVr1JHGmsu+SmsPXxztmNie0hjuEE3O4HES//XG+71VTPFjhKeaYHwbuA/Kj79MrH427y0zJsob6bhIpNnvq2jcohQeDlS9buQokJwNOImZULFe4QH1Mr47JXMyUQ7kuN9FhK4qxlkzjnS+KazJLx+3xFGHkhR96bjCCqKguKxYf5y6hnxzd411PY9u89dHpxIBJQ2UDDi1rnfTa3KgboHuLTnys3Tuow/bfT1NiBuPQNlNgMGzUsh3KyAmzj08ABCi+y78RTKgI7jpXCsElpDbhy0DcFGYdQc2lkMDHunoHkpRForv+0CLyMW//WMY5Yua79HNl3fQtRo7WJnc6AjWBFXOIxQSMzp9TMmMpQgkSKI0E6MxaNsLfnsEjwqlXi0OgVGIsXHkdjWfOdsqeLkxyD2x6MsKzuqrzcyHFn82YNNJ7xmom5J9ozMfVPseiW7rNQkxOID9PUa2zriVw7XLT0uE2y733Kyfz1l30+pLwzfjwhe/9qug3ny7Wvebh936A9Lusaf/BTiKlaIt+BuZMOsY/bMw0r478H0L1a8acVEsJ3d7WfroYYYSKrniniUcS9TajdUbWZlIxaIRgLqZyEZgblqBYi7tt17w9Lh+y8vFQjlUIDOKhh2MZpGIouDW5a8bK5p92w4eKPdkt5rGrgjpnY00E93zwmzfvEo4JkWCK9ENK9SvJNYvNfLO/8oT6EJegX84HM9olH8AtH0tjO7W6IWt0olJpjOmfQwsfjGmt+QrKZbEd+yf45twPFaY1FhtTgaN++lOTw8CJ1hGMiw994jKI2qW9usW+KLpBZhvMDhRouekMUrEHNaAV9N7d/2W43+BxN9oM3KfYLunvFZ7uvpo6/3SuAbKKODcCs0LfZ9YZqhHY+Qn1xHqYBvWAgNNVg+R/ZA1tt3lQIzBMEG8fNnR2TTq2sXC2G6C+IJYvHjKj+sCslE+yPybuS8YwcVGeEVIVAHrStKXVFbkdjSMuB/icW/O6frAIkUFvArZNMBmQMXxqNkKo6LIgHU+W2YWhO2xyUdT2oT/AX+9XiUv//LgL2sJbPvOsMpDG44/PuXAWdpZTbqcAtG02LlP8eSp/l7YvGiD6av9+XIOs0D/0x3z5/o6w1puEHJ3ANZR1KQ1d3zbe97FyDP90nErfhaxD1cSX0/dSfMYQOttXs9uOTq7WORxS9HFJS1p+35/nhDfvBeRAhVs0omEf+8kCQWe3n00t3zMe8e6oE3p63mE7KBxjc0Zw8Zhp6e0VESPxl0lNV0sPlkd1fHudMyJ+vyQ7RP7KR9MknInmAy2M4TXRfpjsld168duPUiNZUzzb/Q3fOV2Z7F0cAzLRCakPxZnxR+EFhLLD5MMvEJ4J946k0zfJeSz2VvI/5XC/gl8Ebmpmm5yIJfR/XMIBfZzH1XlkIm00XE4rV5RpKJyxE6glLcDjzquJx47xHO2diOCppywoI04skUXOq+vpXtGvUxZCF9Hyki0hFTm111/MdLVlAHvJtizMRFTPLXEIvPxGl/bPDoT8PURR9DGSvLPrdzMLyryDTatLiaZl88NlWZau3FgyNt0Ly4gHYttKtQZqlA0MiG9G1i8bfMecw7/nQamZG7KExYmAFID6kSAYBn6ZR+w7dgJ4BXEYv/gTkgkWIT4jbzuSaCwLXxaHn1aSLFfyLrAYC63CAt6X0Y2mG0bjHj4Sp9FbRm7cAtdE5sA6C/aUNhZ/S8JEr9HnhPOD+yYu3ALVtaM/sjFF0dxRI9MwytK5DGbcpBepJ8eIpmvq/3OCT+AJ6/P29G4net/Lu3Ai9vyhwc27j/+nYDnaOoxZ9Cd8+eqg9DhMuPqL5Bz4UUsyo06KzZcOBI49pdu9u7z3YNyzJcO3fOrq8OmdpppzZjQh55LyGK/TZcpGDvf4AbXNRmV1kNLqbauvASNJqTD95AqWhylUXejBCyJ58YqVsy+ujCy9e6RsDGSwwAqM8d4eQDv8R0cpR2dMsZdfpQ84alK4bvXYAIpSfo7qnFinDUcawL08oRi4+RTJyOz6EO3yppbP9D73M/uAbznxQ2Yo6eh5iLfln8m+ZxDr9i8GYkmOxzroDkph/xft7nfdYAPEIsPjTLeU+p8tlSYvHzzWQisHWYb69u4tp2b2kEDCylMC2Dt7zjyvg3/C/03nDbW5DA5PEZR93YFdFfKrnXynuYMSvqmOGEV2RJJl6CtEssLnYrBO1rwckJ86gxw/TLV8v09WR+JgVOLkB+UmZRsEHI6mQDzyD+1vdXOUFtHHn8rVQqAGUspdMUJ00x6+XFSIrpXJD2ruEHUBXV05oX+H/vGN/OusN/mqpadYaDbOt4AQNN68q+0JTtp2NiO1qrnMJ1low8mF868uDLEEtj9e6W097XmtlXpqWW3iFOFlJPghl0CdZ3s/E199LXa3Lo4asRl087xRhAO2BaTq5vydD9NORTynKyTY4RMJWbn/TOaSDJHUWB0NcbRTiOTkbWTgVLbxn8h65mOKbaF1TQSS9ZOPbYkrA9zuMLX4xrWKEHl7zcPW3fj4epTZoXQGKZZ5X83UCKV58AzreNUEQrA0M71OcHGa5bioNRFlifDLbxyKIrKVj15wDZUGH8my0TOy52lXlfqmH1z7QyPtM5vt0wXJvK9p4hN11YMXzvBrp7buIYBo9r4ZkVCIAXlPvXKp9v9VguP41kDYWRDfggc++9Oo4Un/zIO8fUdyoNodpsxIDEHC5F3E0xJGPkpcTiu+cwhlr4KdMzTG4mmfg1EH/DeoKlY/TGN06sKAwAeq7cPAYlgUlxRZWmMfpwEebXZwt7EcupvC7DMMGYgxFYmCUjNj/O1L6an4TMMNS1O8B/EYvPpZK4CMmOejm1NhwrJDGMchgl/38LycRXiMW3Vh5UiXiUJxIpvoJw/YDQU1SjHO8FXoXWavng3RjFvHqUa7N05P5pAsHUtueucLWpC+NKlILTEXdY/dKRB+cWwHULBk7h9yQTnUjM7026CgGkAhe0tWrwjqCDhVYKU8YpbTUljvcY4D/jOBKDKXWDlSb7lJ7foLhstSq6aefkh8f7clt6D4Zr4xoWE+GueiTI/4VqhyPWQbXYkIG8J21o1/S38JzVQMGsQxsWBS3OAqU0Ozo2U7DqC8BwfHtvB6IMag3rRsOLPr698wKjYIbRSqF1WXquVrKWZ1MujxmONdvpdCQTYZKJN5NMvMerZi7FcoRMzGcjXYQIrffjN0svRwbJFS8gL/I1xOKTiA8Q//hqXrFZPGWjSGQ/htALbHqawgBi8SeR2oc8Eqi7E/GjXoFsmmGlRBCUCKuZ+P/982qKfQN85ICLicW3Pa0xPz28HmiaQ/P66XAK1V6Qr/yVWxbKUx5dexLJIvrhvK8nlAxjVG42DZ0uLcvRbavQhun6PskqCADf8oRzTSRSqESKNwDHK+388Izd3/nh5u29J8W3937DazdZilTbZDLXnfwm9YUhSqeCgUt9LqXbJneXObBHw4vIBFvRygjbRqjTxXgIocxuADIGzuzkktIwWqPdFly3A3iNlg182r1pMBQ6CJJO6mLhKpOc1fBTROm7mO6eLH29TciavIXpMRFV8lN5fg0ojVIuKoAoGHOaUAYuAZ3DcPPUZadSlRsfXXhZhmKjLP+hOoiV1otYU9XeYyvQBG5WgTrUdBxHGlaTt+rZ3nEB2jBRCva1bGIssghgr1fA9iEk4N4GNDVnD0Y27r+enNVAxmqhJEsJJff2nzwLloGPZ9ZCkAXzU/y2hPAukolzSjZbnxe9FBuBh5F4QxtF2mqQyeVXk7YA/49k4g9IoOqpCrtxyjMRPo5oWA/O+QzJxD8iFsqDwIemOI5i8f8mmfjSSI5Lm4MST/A3f3//q7Bc5rahx+LfJJl4AklpHURSK0fmPN5jgUL6ZMYOSCwgWA/Ny6pnEpWilJdIewqj78c3Al/EtF5PIdNYjLXh4NpjoFys8KeB68sEt9CVnwTsKONs6uutA4J094yUXP2DiKup6CMKRJQONeoCZtbACZtTf6i6Y5wIrEfiT9OQSNGB1By8DCgsHbrPChfGQqJlswFRgl7pHx/Ojyxff/hPEaVdbBXw++xOwVEm6w7fbPXF3jj1jFzD4uHFV9E2uRuUoYbqlg2fv/PLJyHxrqACo8pumqE02CnPVlHXPohhDCHzqUagpwgDB1PncMywfnjxVbeeuahNeluLZZCgusu0DFXGZvh/sY1wKOBmTVXMmprT+lbApgM/46ElVzMWWWQO1sfer2GLktRv37r+EOIF+CsSo6s1PFUwIyN3r3hDGFVcqQNN6xhoXOtVkE8N607nr1/qMmVNlnNyac2i0S2kGlZRNzyMQo+Y2s4j+9mnn4lGOLXwTLuMOhGz0Zf0IeBykom7kerJWkHZ66hezFW5Ljch0nVadkn/JAVLQUfdrH1UfbPU1x5MhHRtbgIhmfh/yOYCUgV7AmIFAND7iFryylX6p35QolIRrmiutdnb6F9HLH73jNeNxe/kqXY7O9pIJhTj/a+eopfOT8DkYakqngl2FsnsUb6mCnXtEGxwCNb/F8PJ9xNp/TGGeSrBxr8yvOsXaN0M+vcc95Jk2bmSiRORdNIgds7isZ9/iLH9HUiGjAG49PX+AHgD3T0Ooqi8Hcnt10AK191DatuFAdeOEGlDN3SV7gOlyHs/VddTIkUdMi+X4EU9Fo49hsI1vbFYVPQeXz58717DtYVgzVBoR8rU/atb2lauozG0i1sS/HaNACkv1z1cGO3WMp8DeF3YNAplWGjXQeFqJe6JhZStKw2F9Ps5/qoChx5+o8b4jcKdg1BweWTBpWSCbW9OpPiJR8/xbkSpmxEz7YAKCLhZP44wo1ZRLeKj0Cwa3sJYZBFamWvuW/bqD56+94fNiKD7Ft09xbUlRIavLvl6iWkKY+GFC6pOAlVRbQGvebLroqHjDv3BViX92V1lia9NBQg4abwspRYNk0piLdlp534G8UwLhHHkhoPetYOIBl6PPPePIoIhWvG9qUrNOWAjlb5Ohb61n/dcuIh/w7MuZoghlAaTfL/iPXO8NoirBIqK5MUVf/8HNb++EOuA20km1j1nmElnRxCnIKml/sZuz6Ho1/OTTy1mpSDUiAo2iI//1L/9LFJRPodz6Xcz3h8lPRgUTje+xHQf9asRQXCj59p4IXCta4aiunX13xrD2y/C9dLe0ylUIIwTbtFmeY7/BKKs3IzvL5+Ov0WEgQ/TdAuVe2BZLULOatw1Fl6QbswN1CnPWnIwsEqClwqX1vQeVgzdQ9Ce4EjDanZ2nD9FnrZ46IEsJXxMrjKVxsDFRBmKgllXqCuMfJ5qvTDSqc/S1/sjunvuNPp6Ox9YfPWHWyf39CwdeajZrBqyAheTiVCHQgLQK4BdlPcpr1x1pV64uWj8c143vvTw0TX5JJkjzeyJnsVkqOPddPdU7jE+voTE6GLIzPVLfRRAa3oP0fEdpBpX1/h6cQip+pXvTAdaddgeyxvaSSsIaQ2OGTIPN65nzcAtaAwMHDQqrNCvfTatA3imYwixeBpZHCE8vzkipX0N5pOI//7pPJRqW726dg0fWlDPlgofPbNc61HgBfOpTmU6WVll0ZF19wDZmS5axe0eAK6axxieXcTiOUKNh8Qr6ln45SmbPnTZ/wN1aKvExWyF5DN5p/MrTpw8soL0YIgi5061eWFQ2ndjwcnNzoKTz9TR9R8xMqmlOPlyk83J46hg2kFdRJHr/uXez6vL6M/LcSoVfvKRyBLlYrhaNsscwncFQCLFubvbz7r90UVXHE62n83e1lPZ0XEequIWFJrVAwkihRGU1nSNP0nXmHislHb6F489UlYoqbSmv2kDjhEkHWxnLNhpIjTj1Z5NPb4w6e6xNy1b/NHYyL0dFoVeJcHPaV9SaJRovI1ITAakDiBD9XU2gRDB/brGc6s4/3RI7q8aLajANu0tnVqxnuUj9xCd3AXQ7DUfEvT1Bunr/Ql9vVmE3uJPCA374VT9ysJA/ZoprT3g5jj+0O8IFWYvFnaNAFsWX6UONJ8cGoksfgjo3tl5/lvvXX6do3DRysA2w+SNCFqZDjU6qD2TeDayjH5DMlH5aak5WBloPlro9H5K8XOkFuBjyOIunXMOEkyesY1fFbwSSX1tRhb6qyv+/tXkuLr6ht26cPlyIamqJG2oYb3UYlF9bqK+82ys8FbsXJhg/UwC4S7Ev+ygjKxujbnZQiEacnOGEWpQKAMX0mZt3251ZAYfBr15lqOy+N2npEbmTwasYfKQcBr5Q3QdUAaO4xwJ9t/37wp9iO4e3xq4aQ6jmVYQua3rQlYfsYyG3BEdtie+ZbWv/jPJxOpEY3wY2SCbXCNgHmw5aeo7QXuC5cPl8caQk3ZdZaENZRiuS1g2qonNO77YrWUzLt6sVc/Ozs3sZDMdY09y/OE/zGR1jzKdl+s9iE+86gw1cOlOfovDTcexu/2sVmhIIQkhlzJd+dTAjUhCxMkKLtEVhVtVLlIWvtHApNU6el/stXY4P7LwzD3fmTGor4D6XIpUw6r+CoLBdyEU3yCKx98h6//JJ7temD1z97eXl59H05BL+YV/PqpS4uQCjezqOE8DLfEo9/anuA84aTzc9Q+AYbp5/1YOUZqe+yzhmc8yElTz48/4Mo8BXODjxOK9SHbTByjXWL9XJgySiRDJxItIJi4mmQiQTJgkE18imZgkmRj1Kq39bKJWRPhEiMXLCo56rty8HThtz4R61cODPOLqOVlDfrn+8werL95NXftxNC3aSri5mub8BBJ4Px/JLLsGiP2l+cKT74leMn5P9EVDe0Irx3cHl6e3hE9+y7yzvJz8T5mWjlv2CPcD6+nu8Un+NuBbC7lxT5/39ktloIIN2sqkWhT6Q0CCvt5NcxlGIkWEYtXrFGwzzBMLXsSWxVeZBCJv1qkn7mBw+9bjx+77HkUK8LIBB5zpbjdlWJi6oIJuVmul3LHIoseRCupDLkaZxnmwZePUv1sz+2Yb+n6g4PVHXuxlQZ2O3weE6lq4gUvnxHZO3fcTn7V0FdXdvQpJEX8H3T33IA2FpiyPkht3vUvlXBhyIau996rBDdnjTQtGH2vOBlsaB+tiM96Qi2JMCvpeW/Gnl1Q53ATWrzt8028cZU2bv+ngtJDKTUgCQ7VNXfufx6PoeJR/TAfb6g81b3hPLtD4gGMEfmVqezPdPc9q/ACeDQtB8AhiRh9t2IiQm4ugMxBf4RuJxceAz5BMXI9sTjuQfGmBUBv8FolPGAhJ1neQlD7/GX6aZGILsfgfvVTQClrOInqu3HwE+GPvDbcl+tP6Q5cs5X2mkkK0Gl/JUSN75TmNWHy3x/J6O9M3xfOIxX332q3+h3GYTKT4cM4If2J3OKYRK+5H8752d8+d3Pu1a1DmpzGslYSbDIKNGqUUSjkoYz/lOeeH8AsQlVl0dWGgwi2a7IgCFcCwWnBdCDd/n2RiE7H4bIv4Ymbo2b0+lcDMj0vzKDtrRkcevcRoPAW3ShMVVxlU0korbSvCLRmUMgJa/9NJB3/9VWDjRLD938P50QbtKdWHG9Y+tr910zq8+Toe6mQhj9XKmHKBlUgs4G0IuZuLEOppV9g9aydnaM1wZMllC+DbiBVWi78/jMTIoLvni/T1JhFGUX/92kgq8UMuxmZHWW3pYJtj2WmjzhlDgbKw1crU7dahpuPZuuBFrBu4mdb0Pkw3N+2+UvWrGK5f/r/x6DQm5W1UEdqA6pjctS1rNfS4qP8y0EEN9DedQCbQMnVQQ/Ywq1J3XGo52Yv2t2zcdrh5Q+Vj7YdyUsN4lCzRc/8Tzv3PGs/mWcGzZSG8gerFVPNBgfLahBySQzzXXGUNnFH2SSy+k1i8l1j8t1Pl+oJzEZ9xBNH+LwS+TnFRaORZXjifG+i5cnPu8nj8I5bBpUqxq8a4XeBbSPDz+Qfxq3+e8nt7skQYTEM8yheR3O0NwOvi0afYEOT0v/s5p/3tGlqX/5BIq8a0FIaJ1wTnDMo1xThgkh2VBjzAlB6cHRHvtHaLRA+B8EpkHtdEIoUfzK6JoD3hJaIrNGjl5GpagkF7kulkagoCEZfmpRmal/we4Qq7vz4/eJGJrRwCrmNYOhtsaUf84gAcat7ARDDqW8Aa8fPnET+2TzF/DmLFpZH1diGyyZszxsCU4lDT8X7c7Q6m9wcpGXyJW6u757fIe/83pFPbm5Cq4SsfXnzVgUNNxxXqCsNWnTNmlHwf05XbcMwQjy+8jG2dL8AxyssdbBXUE+GuPoTXrBL/isQOKpEGvvvX2N9+azSy+BrbCG7XyrI7J7az/tAfQWtMJ8dJB39NS2a/asingusH/nxCy+Te0neYBV4Uj84xhfxZxrMjEKQ5/TK8RiDzgF9AMoqYYJPev7ciftqXIhM6y+xCQQFrSCbmQu/wD0idQ2mNhB/kFDVSNu6LSCaum61AaRpi8VuJxdcifEulsBHytHfMELB87iMW/xnio/0d0v9g42xfiUcZjEfZX6071VPAfYjCUAqD8uctZHPZEYTQoaTqQDuq6L7WUlcRaYfp2XCV+Ajl2UXTcLDheJkrXv7/SP0yt5p1ANCQH6RAcCq9Ufmp0cH6Atp9L4cf+VfkOU/NP5O8oQBTF4aR4PcIUNDKnLx/2atHhyLL3g68F+nJcBMiFIKI1XaIygxOOF2BKmkWiEZpSg603DwnHfjlNfT1Xub1dvgctQOm/0pf7zB9vXfS13sjEs/7Nt0976a75zt09+yju8cZrVsy2Zo5EBQRXb68RiKLygJvQ/UrnEyg2dEYGc8NlQ/o/E9XDd652W9wU4buniSSqv5lJClkHEgMR5ZsSqzp+Tkwvqf9rJ8oWKlROReD9vRuGnIDhO0xLMc3EmV+LBu+r/TsAURRfV7g2XIZQSx+iGTiOiR3vqZJ7cFFNJg/IxrFr5Fita8jNMLLkADYJKKRPYKksK6f5bxBYC/JxNXE4r+sekQy8X6Esnk2GIgP8evAaSQT//QUNvHvIEUyCyluYM8m/cTRQyz+c8T988xi2+9fRnb0k2gnTKRVmEuLWE8ysQNJF70NsDGDYvVNS/XS5ZxJSmWYPTvmb2f5u3uw+YSMYwb0+tRtNw3VrQg+0vHCy2sdPBHqoM1J4+iAa+l8Qcat30Og7nYOPfwKRCGahoIRyebN+tfGoxxIpOgGPgE0rB+5+752y76OttWjTByyyU9MAD9B3tNtSCLDJMVe4zk8YV7KweOxcpblRhjoDuDX9PXuRGo7SjNBSzO/TETZOpsiDfXZ9PWeTHdPKcfTIzmr/pRwYQxbBQlo6RudN+sJOllOPPArdnacTzrYZjtG8OKJUPSBptzh0xEB9yjdPcO1niuA1wPhbd4PAFsPTSQasoc3++4hjUIrw2/MpRSQCfi5KMWoSsXMMZE1/c8zXv85gmeW7bQakol/RjIRIhQZBisxgXR6ej+xuON9bzni668UagVE8/smc89bngCaKtxE/viqNUGfDVng34nFPzrP70EycTIy9oUICdgHpu75uYRkYh2yiAtAoqzpzLMJ6TfRC5yI1n9gaOfbsDP14uoxoG1VsV1mEQ5ea0LtOorRfVJM59VFTO1kgTpoX0Mea8RV1gsiK856cKahJFKMMb2ncin8RjE2QvZW2fe7DJaTZfWRBA25FIZ2flBXGHnjVFe3vt4PIRk7EYquTFvDvyhp+lJOff7wDz9GZkjmp3Dul667R5Bg/wYt7p5migfkkGYwTYDtYH3VwHmbQpeZNTOYyMPUrnx2Ef6yyGDdihc/uujyC+ryw+vzZt1QwapbGckPv2hD/2+DYXuctNVMOtBMx2QS16uAL5h13Lv8urxW5hnxKA/XHsLssO/+ygWuMm9RWmMbQR5Z9BJWDt5Bc0YSr4bqlrF1wSWgDJYN3s2KoXtEJCiTxxZeVtoZTQO74tGpfhHPaTx7FoKPWPwzJBOfx8/sKaZ/laIB+EfEB+r70r9I9fEHEH+/b/qWwt9YK23yekS7rzaJ5ttGUSML/FWIlTI/xOIPI8G8Zx7JxNuBdyCb4zuIxe/yPg8j/uRPICX/fmMTf927JBPvIxZ/LgTIvoIEch2083fYmTrxcBiiyOYnqgkEE2+TUoYpvQ+0huHdHomet18G69FA2myof7hu40vis1ev70PoWGrBRDbwdyJxomnCwHALxFJ30ZgbYCSyhCe7LvYLz14J3EaKy4CVrYteWjix/zeGoZ0sIqQPAFepYnpsEcnEVdiZj4przJjug5HYzf0uRofCnRJo3lMIAHkl19hnYV+j5140CrI2KlmMfYFkA3UaJrZ3xL968v5fnNCQHTBcw2R7xwX5I03ruG/ZtYQKo5yy72fU5wcxcFGuxlYBQvYE9bnU7ybCXVvmMZ6qMN38Z7QKaldZKuBkWTpyP48tvJymbD/az1byKsT3tp/JaGQx9fkhxsILmAiXZbc7SMbW8wLPvkAAKWQSvJJk4gaEU6ZybCHgcyQTL0fK7U+Z4Yy3IC0TK2FSnTAux/RWmz7ew/yyXPzer896TvG8kEycD/wrWmvQnSjjNyQTo0hK7myxJgP4LMnEDcTi24/5WGfGKcjGUkCZdSijgHYDouIrMAOVbovpKGRgckCoNKa2wQaJK4zspRBBq4h76hxCcLPRpGSAX8aj/I/X/2AaVh25faq/QX1+EFcZ7G0/E2R9fM0/brh+OQ8veml+zZHEzQ35wduBL9LdU8tv/5GyXg/ToTR0Osqqr+RQQtaK5d3bag244Bpzj0eGkDqdUxCFzUDe1+Pe/8cPNp/41Y6Jbd+pzx0xFC4B1+b4w38IbndzHGw5iWXD9xNyi94k5RHZAfrUfT9eqqCJaM/otCvPAwoChnYyrhGoQ2sM10Yrg9HI9JCjcm2CThrHCHoupDLspHog+zmJZyvLqDZi8R8gzSiqYRFCDvYqxGSt5u86ghCNGVRPd6u85zTSWq8sEuTVGnwKCQzO5rLRyEK5G7EonqS6QHouYzXZUZMjj7cy8Fgzo3vb0DrG3OeIYm6xlmOHZCKAuOtagShKudR3fR6tk6DThJpyjB9WHH5UMby72KinFHYWBrdJcNn1N0OlcbKaySPo3Dito08E1wzcWrXSzkcixRVQu2c2MmdGkIyaaQgVxjm+/7csGN+KoR3hwNHQnO2vecLRuiXBh5ZcnUbm4ofo662V2p2hYYGW9Fo/Qa4MW4GQpfPVhKZv1UwR5VURBrXKFEBcaJsoVmiP4pPfdfecRnfPBds7X5Cw3LxhaLcsVhEbvBPLyTodEztqnBqUUGZcVfOAueOTJk424KSHtFK5Ay0bU4jAKr+edjmx/zesHbiV1UduY+OBn2NOzRvy8Sjr41GePArjeUbw3BMIgvcwvUrSpajdF5CX8zNEABxC/J7fQMxdv3vYXO5vG8K5UiwJTSZMRCi9A2kyU2kSTyCCZAJJq3sF0EksfhYQJRY/g1h85xyu/dyBnb2Xsf2NaNcABdkxL+NmXoh7PS2eLfjv32fEVdRHP8WCk06l68RXk5/Yi1/clR8vqUYuweQRv7duyYamwS1gKwvbCILWui4/eEYiNWN7y0+X/qJcm47xbSwYfYyAk3EQ3/zp8ehUpt2fEGVCA86G/t+6bem9oF0MbWO6OVDSRL4mtOakA786E1Fi3gb8gb7ek6f+3tfbTl/vTzm0ZSmTAw6BBo0ZgFD9n5DU6k8gc3mUEuumYmefa1yulgWmvXNEEGshjNQATPlZ4lEOj0YWf70028rBQmmN6RYOTPdwTRWzKcR6efoZed091wPnG+g3Bdz8+k1LF3YgwfU/UvJI6vKDNGYPI6LLIFwYY/HwQ5hOjqMyjmcYzw2XUSWk2GcxycRahCn0o8gE9RdgCxKM+z7SvrA4Z6UQykI0kHLa2enQSCXleuBCzyL4HpKp9AL8TWU6wsBnkQ0oWXb9OWQW9d5w2zIkULe158rN86XGODZIPXkAyKLMMEoZaA3uvId2EXA6ycRbicWn100kE21IQeJOYvFd8x6j8Op/B+mIt5X6jvfRuGgtcJDJIweA60Arwq1gBkDmyQFk4zGnmk34hHtORSaq1j8kMxRFWGpL/zBuG6FJhV5gaEcrUB6JWxvTFRcfxSCi1mzo/y3N2YOSMz/wZ4CHDHQ/0R7/qNcjKZfrlGvfV1cY/rCDCYZFwM1gG2EONR3n7Gs7DWrMa9PN05g73IpkBvk9fC+mGBv7hvyu856wl3PY2YuRDm3nIEWXq0tTgijJqpkjZjrUJ430lTU/fbuMHOik5Svetiu7aXjpyIP/rLRraWXo0fDC/asGbntDwM38idrKXgPiIXj6kPjLVAwmHqU/keLFSM+CtwDaMYJeQEpjahtTF1g6fB9d40+wZfFVOxKpxtcCyXiU24/KmI4xnqsWgiAW3+YFKj9FeWMN0/v9p4BDMqFJJu7x8v/3IpPLr1qeCQoxYcOI1vIhYDuSylor4wmKTXs+VzUzaQb03nDb25EFehvwh94bbnuqPaSPNoaB36PdAtqV5R96SvRJEeCj02oxkolXIBkkfwS2kkxUcjzNBf+CFJA5GjbqQjah4b/Q7o9Jp+5k4rDBxGEY2lHqDppqqq5DzaL6a1dSCMMt5Wef6B9CAvqlvqRtwAd2RM9/+XBkad42QmqwfoW9u737Ae9+piGRYh0lGnakMEJL9iCmW0DhYqANA/1OpC4AgHiU0XiUd8WjvFgb1n9NBDu0qR0M7eAYQbZ3XsDu6NmmVuYUa2jQnuCk/b+ge9c3OK7/d5i68D0l8bUwRU1ZcuD7es9BmGLrEZdapTJ4IvA97cUFSie1AcNqugvId5NOgyq3sKb55bTEKCj5CT668LJyKpC+3hetHPrrmwNuLmPqwnbHCL7Z0oVYZ3rnG42KdVmxSE3g3fT1XlptbE8XHqX3zxEPgZUNNLO3ZROmdqZanFq6QNgeY8H41hhSYPfrRIqe2md97uC5LRCKuAWf2LA8l7k00+V04D88GopfM3tArxKKubuZfFzmuZfmhN4bbmsC/h/i8soigbWr5zPIYwah3b0WM/gR6rtc2lYJ2+j84PuXjwcGSSbeQDKhPOHwNYr9cwNI9fJ8sQpJGbI0RgOFyaAzOdTq5tNhXKdFDlEiDAqVJLNA40L5ibTjtizTHuFecV/KTQS8MQ5QLFDaSHfPl49bdfydjy26/Ly7Y2/40taFl/6bbYYvm6GC+pOU7FOOEfRC0xrA1VKGFhisW/6aRIoNiRSLEynOSKQkyygeZWRPxzm/dCOtmIEg+cZlZMKddBUO0V5IobQYoWsGbqUxdxjDLdAxsYOT9v/yykMtJ/wQZYwjG75opX29S5EK5tnm9gsrP/D4kE5FuKd8aIRGdtQmUI26w/PgTO+JXkN7UqsGbrvtseS20uy6ryDzxFWwOmyPf74lc+D9iOBS/gVKKwBKpJBCWn8eE8Sj3IFUqacA9rWfoR9Y8vKyWzO0Q9CerEOSVVqB/0ikplHhP+fw3HQZTccDTM9fLjCdEvl0kgkDeA1zS4WrzL+eL+ZbH+ATlpW6lea96x4zdPfkgH8nmXCQcv46arMO+9bYIUQYVx7XilR+LkeKnXzh7R83k/99CokUmxGLbfc5Zvi3lpu/Au0GlDcKc/yAoq5tupVlVJnaSkFdu6dZmDgwGcSJePdzE3bmp4jW7vcEfojunoz/9XiUeymhf6gyVoX06y0Lruetena3d7PqyF8w0IYLjqOCwQMtG89H2F41MpcOJ1J8AXj1CW7qJKMuipsdJuymOWXyXrQRQAFDVhtbw8dRVxgBrbE8zbSuMNQcSTvvdztPGDJGdteRG3sh1YnbamFaPm7WqHfq3fG/oaSvAsWCsg6LwkzrR3n3Nes+E3YmzHWH/nQTh36/CLE8GkDi6N4hDYgylSm9YDUBo0WI1E/7Q1/vqxG6kp3AP3nz/SkhHuUGT4D/D177WwfTMvF7eigmgh2lQ7WA6xMpXhCPPnstMmfD88NCiMULSLDwl0hl8yNU34x/iuTJ1yzuKcFUH8Y5wqU8y8BBWlXOLhSSiXaSiff2nKj/+7wF+v7WoA6tbNT1bSE9hNzTcwviprsSWYCPIhv/J5EMjgSiQd+ANDc6jdrPMYhUaN7N9GBkiGRi3Uw0H4kULwB+gQRIv7h96Uu/4TYsqrD8XMhPAkqjHSkmC7eUVSRXukBcDF1QAf1I5KQPA2civEaX0d1zC0J9fBvSm3m+1tvVCM3JNBxo2ci9y159J/C14boVuW1dF07krIZcS3pfo+nkmpB4xHFI/OrcoD3ZZA5tx5w8hDHRjzW0Ha01Lop2e5CgzpOqX4mpC1PzTwFGfhSdmwyQG/cDt/PF1KOyMXXEnahD3v20vVfPvn58gTAnt6qJ3QS8xLNWv8j0dro4EhyarTbIAPZ77TsFfb1vQGKOL0QyAGsK9nngR8j7/nk20PS9bLAFhwAuJgUjwlCxOM1HHfCesl4MzzE8+5XKTwWSXrgGyQz6HDLxv4Nod53MzTqoyl9eA37zivcgZvgK4DCx+EMlY7IQf+HVyAL4hTe2dyGbjLQwLKZIuEBaKV5JLF5kVn0+IZn4OZLiN1NGie82qHbMJJJd8wpP6JchkeLzSNV5AAgvyPezbuSvMLqPsj3GCIB2iow6VhgdXQtOXno6WyFxI9lZdKCO/eFVR/pDi3ozRt2nPJ/wUYFXS/BPNf6sgVfFo/w0keLBJcMPrls+dHdEoyhYdTy0+GUUrKJSu3bkHham7vEKx2SIhbZ1EIiggLsbzsw6mLlzd3xxwECXp7dGommVGYwIjc/84D/VAgE34Gn/qkhbURZXq7VzzHBRrUFrlOFguBZOpUKaUxL4/ihCU3NSpdBxsLSJnaaaBVDEqPelk+nukXqgvt57KNJ3+9fdgViyA8An6e75wQznnBGJFFcF7ckfLh55KGxom/6mE0iHqpY2ucBP4tFpfVKeE3i+uIzKIZvH497PjwG8vswz5ORNw3zu3UCsjnci7qgtwMMkE2fiNTNBtMoFJd95G8Lx30BROy7txuqf8xtIfcUzD3GvXY4I0a2I4NvidbabCx5A6kJqwXctVIOLaEwvAe4kmbiEWHyo4pi9iOCuBxi02tFWBKWMKVoJlAFWHRTGwZDNU/sVySN7EAeCt6149NGZ9oU3nbm07pNzvMf54EGqV8j7+EIihVba2bt86J6NGoXGIGhPsGD8Cfa1FssGDoWW0Knux68mNrTGEEZUkqGYtlUgA/yziT5XV9Q7KFwFyqWCTmIu8KdnkIK/afr7fghx0xpI/GumDbkq/Leg0HjCQCPFeaGSlNFrEKWqkwqXrq0CGgw7a9UH97RsmuyY2BFozewLqHIhlTdEeIUot+gPU57dBMVMsHbgq/T1HqS759ayQff1RpEg8iaER+0KxN05UMqPtOHgjQfHwl2Ybp6JYJSCWd04s5yc0ZA9dM2BwztXLxx79FsG+rsVnE3PKp6fFkI1JBMTTJ+ktWIEtT6faTH78F1NvqluI5kcMZ66gP0Y0qznmX0ZycR/A9cii8fva50GTpo1LVRcPX8GLqj4i78YYa4xGteGQmYM+A6hxg4ks+ezica43TW29cbG7OELMoFm42DziXpZfh/LJh7FmBxQKEOCxK4Nw7unLqQbFkBuTALLygDXRqOwzYhWbgHXCN4dOuNN1fjvnxYSKQzEinwf1SvfM4CptOucvevrEXFlGRjY7Gk7k/2t5Yk2S4fuZdnw/VhuHhdF3mpg68IXO2Phrs8CH4lHKdDXeyGilPjulWFkw14AZdbobCnYc8UBRKM+pdpknclUrHFsBql78JWpDDJ/pjJfNdDftGHP7vbuO/NW/b3Ah5vT+zjx4A3tprbLLqlBa2W5BTP8lbA98U5ESUwhStvDlCttpSgA76e757/KztfX+0u8OIx3oTzyfAvAG+juuZG+3o3AHdrbfzSKjNXMg8tegV0iGFrTezmu//cE3Kw3ViNt4N4OvNhjhX3W8fy0EKpjD7V5Y3w3sj95as1bA/GRn01tweBrGFbJ/9fw9IpQ3g30Mbd2jE8fycTrEVfPC5EU3TbvLwqZ1H9hFtpmJEPqzIrPpjS+kvNVolxITAzARD9AE6h/pL4jS11HBtwT4smv/wInu1nSFJVuyh7KpoOte9TYfgg2rqFxgekHj1X7aoklWCFhJc2NeVW4xRCP1loZoC03O58e2XOG5376d+DfEyl2IUoCLem9LBp5ROetuvCe9rNUwYzo3e1n2isG/2qBS85q4HDjumnn29d2Oo3Zw7RN7sUxAgScLOsO3bT13hWv+WB8e+8pbOc9yGZ6JcV3+XOk6r5AMdMuj8zri5h/9l0lFns/TxveRAhTHuuIUFFDpKCwaOyxaxZtuOjuRIor2id2hY7r/32DiV1tfikHw3is65JzTj1w/V3ACYiS8wQyLx3vvNXipw+W/vLkji1NK43giy037+cwoOQcw8j+8IVEit+eqwLfNXVhShlVaCL2CNGJXfpQ8wYZo3ZZ3//HKWEgx7kRpPZjCWINP+v4vyQQ3oRUDfsv2p9UflBrKn+7BlzkebQgFdDXML+gu3+t+ZrpvrBaNs/vPTUkEx9EgsVQe2EsJJloJxYf9L5jIXwsEeB/iMX9tMa899mUcq7l+ZVB1frVzvrCwP86TA6EmRwIg7oKZVyG1pYyTJTWumtiuwG6C4DMUBa3UEdrTOHaIgyUKgaTG7pgaLwkD1Fj6YLOm3V6Z8d5D5wwp4f1tLAf6Gof3xHYcOh3AeX586MTO7l3xWvVgZaNarhuGQE7zUTII9PVulj+pbVrunnDcnJopVBKFwy3kKkvDA3Ht/eehATq/bn2UqQfyNVIdtgkIth9n/844vI4qkkkUyp8ye8lKFXCZlp3DmBVWBCOKl9HBhIjXA/8bsXA7SMm9hTpXuk4FBDQebX+8J82anGbGYj1dG6VMZbiljJ3UV+vucxqus107TLFUIN2VKBBCa1GA1r/xDGsE0ynPASmgPp8SoE0NjrxwK8IutM8Q76bbKT2sJ5Z/N8QCLJhfRTZ1LPIRtUAXA/8HgnsrqdKn9oS+ItlI6JN5JljamSVc8wH/pj/8hS++1TgE235ayjIdNdOxhuTH2d4Ek/bBT5EMrECiR8kkEKxeopuiVmfwdTi92gkKl+I9v+r3RBolNaAVl4FdcArN66nkJZg8dBO8BdkeghVQnFdtmFpmx3R8yYGG1Y/E+yTjwEnLh15wCrtchZ00jRmDzFSt0ylg20EjRAnHfgV9YUhcmYDjy26LOdi6I37fz4RciYjrjKGDO00Kmeq0PILCPOvX9MB4gZ5Eml76aP0nUa9f+eZu8IyRNFyrImSdyeEgmKBmCV/ruk2VJ5Pv9KdpMGq+IIJLKSv14wLj9niUgFQDXX2aNX4VY3B+OzE0NcbRFJTzwnZE8cVjJAOuFml0NhYaGUqlNGglMue1k0RlHr5SHgJHZM7KH3PGoPJoDy+5YN3EymM1hgpt9HdM1brj880nv8CQTKOdlDUsC3ErBtBFs/NzJ1/xcfVzJ7aNq3ohvn7aDNIAc73gR0kE5cj5uPPprTzo49hRJssRem4HSST6k0kE7cggbcYxc2nGQmu/8A77h7ERfE1JBA4o0uibPFbEVAWWttFE6M4JIVSYAQkSGwEIVBnqMxgw5TyaQQleFyqndlZ+bGk7EGhS7VHdfzhP9Tf3vj262ca41HCB4D2ghF5MRXzr2BGpoKbK4/cTkNuAICwO8Kaw7eEbDOE5ebDjrImLF1oQpSFG4GH6e65m77eU6pcL1bxe+lm7Adw51rzkgI2IxZ3C9Pndak7ysdeRPm6FvHXlwWlK62JkjFOaLEyLdsIuntazxhfNnRvU1BP64l8O8Iq8G3AKP1bjVqEGeMZJX+7DfhbuntGvN+/CbxCQ1DhYrl5XTAiGDg8tuDFZIItNGcOkgs0MhpeZFpODgMHV5lewaDGVQGywWZSDWsBCDoZLW5PA1X0LGuEh+19NYb5rOD5H1QWOuxq/YY/jKQsVi4UF9n0ZvOlzub+mVvAtPZxI0i/2n5EcF1McePQwG+A13iV108PouX/K7JYRyn2NJgNNlJ4VhpP0MjiPJuilvo7xEo4U3vnlXxTpU20qrV4NeDaOW1ODihVyIKTFfZtL7MGFG5rDIL1sruM7LFVdtScSqc0Q9C8RPoWlJ61fZ3GCiomUzB+oPS6/uYUoLvnGWk69OjuHZetHkjcEHImDYDBxrU82nUJKIXhFjh719enKA982CqAoR20slxL58eAu+nuuWTqgL7eAFIfspbifc1HGSlVsEvXg0Y2+/M9wbMYec/vQFK8g8h8+E/vp1TQ5YCHkLqU0gD2rGNzIO2qQASlUBoMXUCVzxsHKXA8H6kPqXozZedUASZCURpzRzB0dU4uBeN09xT5WcQ6GNblblAcFWCwfgUag4g9Sqp+lWSEKcWaw7fQObENjULhMhJZwoGWjYyFF+AassUsGbqfVYN3lF7aBr6j4GN09zyneiU8/y0EcQ1Vm3RvB7qqHO9P1ml51SVwmd20nm0B+j7UPGIJNHnfSQO/Qkzy3yNuqUoSPYWkt30VnkK+cjKxHvigd+7/QlwJfsHUcubu2rKYHkBUCLlcqQZ6EZJ++i/ABqH4sgIRcrMKHW1FoNkzWOys9CJwvbBPsB4CdVPqLShXoSwMkym+pUA9RFohMwRa5zHMj2MFlwJt1EeHyI1vID92TsnDPfRMCINECnXigV/F3JaNr9638CJ3RW6XoQppWu1Jjk/dqg80rFMOFtW2M7/yGJ03kHnznbIDunsK9PWuxww/gpPdQHWv22wBfY3EGnxp9BeEgfXPdPfs8q5zAPgZfb2/Bi5F5ukfEAWhQLlACCB5/nO2mv07NyCstK1tFcbSGaPiSw7ivnwcsfYrMwE1En0xXAwMNGPhhSSjZzMWXkhTtp9AYZLVR24r66EAsLP97C+u8n8RIXuBLgadAXAxeGTRlSwbupem7CFQimX5+3AM8bS2pfcAGq2EjTXoZBipKxrgSjssH7qn8mEYwNnPNWEA/zcEwu+AQcobnmtmdhP5gTYbESifQyqhT0VS6k4/CuPKUewFbSCZRI8gC+9ViGlqUlvwKOBlJBOtxOLDNY6ZjmSiCRE0rd45NiMuKQvR3ucb56iayVHxfwPYQyx+jkJ2hoC0Hp1RIEhEzZWTOHkY3Y9ycuhAvQgJw5warAJo6DSktaWX+du4EJRyaFo8SV3064zt+zdOfVN5LUMsDn29/w8RrAeo3pHvqCKRYvnCkUdubcoeWtF86PcaM2gQbsWcOAjaoYMDKjryCBmreXrr5ukwqNbCNXpcE0M7jqty/GcRTf2bTI+B+e/rYaRw8iuIBX0J4ho6PCUMStHdk0+k+M1UEV9f73aE3LHXO8JP2ph2O7WkUgUMhXYCbsZV0+eniazlEYSB+AlEwQkicZPXDUeWnh900p8HSLafzWBDMZQyGlkMERiuW8op+68nbE+ggb2tp7Kv7fR/3pfix/HtvY8CP9RwgYNZZ5ZkpuXNOtYdvpmIPYpGecapJpa6A6UUyrW1gascpdHKIFVf7pCoz6Wo0mTI4DlKjf38Fwix+GGvQGwrRSFQQObdTG6fNu+4vwAfJRbXXp/me2ocP18EKbJhWsAgsfgtJBOLkIU0l7hGCHiEZOLTwJfnQq2NpMA2IpYIyP0PMr/K7PliL/L8SzFKlY2sSmZKAbAYO6CwMwoUKjeGHngcrCA0LRFLAVBW2CK6ViwJMyg/Ert4D2teVLu4p7vnXxDr5ZmBdj8bG+pbAWgHUwXsLGryEH7FMchzqLMl0FhLnS/5bHoG2uCTb5TeFdPwMuR956ieFFEA1iEW9DeRlNW3IXPj9fT1vpXunp969AodwDKlne/EBv+6NJPclg7bY39U4h48r+QaCi9HatqjqHFvVWCq2mvVH4sfe3ljwQgN3xV70zJtWBchhaKjyPer0tY4VoQHll1DY+4wBSPsVxEr4LUDDat/2zmx4yIg7xhBw3CzWqOUViZhZ9JXWbygscxeSxdAg61CaO0wGWzDcG26RreitMvetjNAKSwni4vCKBeDmmdyPs4Dz3+BIHgXRR+oQjbb/0Tu72NUN6l9YqvbgE6SiRTFtpwZynOjcwht8xVVzlULBqKl+4Usfd7nJzM/QrvFiNvHAP57DsfvQRZ9HcWmQr8E7kfSaWdq+j4faITOI4RPB55MtCDuhLsQl1VVhs2KByiC0fZehxdDUGjZ+Id2Qefx4DVSx7CkzkCwDXj7HAXlMUEixanAixHr43vxKAVgkdLu1Gx0UaAsTApVz6FqKC66+LcX09d7BvAPdPc8Kn9031hjSDGmd/krdV/6LsowYi29HJkjOS1K0g9TW274b2PBpXWuETABa9HIw+6i0UdCyrXDSDo2iNAZ884ziSggrVSJzT3VKGW1ILSrzLWGdk67c9VblgIvQp5bBqFAeQGy1qpmB7qGVdkCUwHvSrZ3vzk6sctUuDmFdhXadJSFqwKYulC5mZcN0dQ52zHCgYCTIWJLuC829FdA6b3tZ6iJcBd5s4GQM+63GXWAt9Pd86t5P5BnAM//oDJAMvEzhGGydK/5IsIrsw3hHiqFRoJjpdrEk97xv/LO01rxnWpZRbOhgDRP/zqx+P3eWNd4Y5ov/kgsfsnshwHJxLnAZ5DN9pPE4r/yPl+FVBcvfwrXh+rZJZVwEXdcL0JsNzfhN94P6UGmKClK0bYGgtO8Txq4kFj81ulfeGaQSHE64rIMIhbYL+Pbe7+SNyM/N5zCMr9wSqMYqFvpdqV3Ts2fCqE4qWUTmxIK2gy7huuALhRDKLLxLkWeaf/001SFLyxVleMnkPTYDbpCs9aAbYTZ1nWRjk7sUNHx7ZS6UjxBdQTR3DNAwYUGNcP8kJswyJn1hJ3xaX+fLXPIURJqG4ksOvTo4pfUedf1GwF9ElG6Isi8u6jWOKZdRmt39ZHE2JLRLYZGGXkzEjBdOyQ5sQU1g0AAsPe1bNJLRh4I+HYEaDJWk7N14aVP5s26b3aOb9uyavDOF3jX+x+6e54TRWjV8H/FQuilnJnSBc4jFi+QTHyOoq/Th0YmTgTZ5EaQOoXrEV9qtY2/ljCwkQpHvw9A6XEGsfhbKo7fwfyzQkCySuaGWPwOpAKyCOnb8DZk48lR1Fb8RueVqKZpzWW+GN45L0eeaydzudeGBeICSqe85vYlMIOaostLef/+12dcGPT11iH3c4DungJyjyFkYzXR+mUaLgg6mRZdEnRVaGYQBiCpwB9NB1rfbpjBdWE3U6cKk0aJcPS/0oCkXX6p2mmqbVuqmHJabc41IPGyiSrfI+BmWX/oJjUcXlQmDCjGDOqQeFW6YITXj4QXHt+e3l2Wj+8qC63hQMvJJNvPxsDFdHP6rOS3lVEuYGa8D8DLOzI40LKxHbFQQojV4wID8Sh3AiRS7EMs4lr07aVCUqOU2tF5wdiS0S0X7m099fi9Lad+Y/nwva11+eHAaKgzt3zkwZDSNsU0Zo2rLBwjmNuy+CUD6VBH16LRRzG13wEVwva4uWnfT9co9LuBS+nu+WCN23pO4f+GQIjF7yCZ+AniP7WRSeI39EhV+UYaiR10IwIgimg2c6HNroSFLKoc5cVsmmqbuMQq5sQRXwIX8ZU/HXwEqR+oXHt+VWupUMh4n4cpT1GcqxAzkZjCd5Fud6Xut9LzFeH1KiDcAoPbvfaWCkJNP8C0FtuZ0eP15JFWB6MwEer4l+hJV3xujmM5OhCXzc+ROoxd9PW+mDU9h5D5ZgIh080fUSIw0mp+RY2LFBypP+21m3j0+s8wPjZTbvoViBtxpiw5Hz7v1jDwn1pSj6fmnfdF5UK41kkULi2Z/TgozOJWrRBlIgW8LrGmZxTYXZ9L2U0HDlvSGU6Tqo8Nbl1wSbtUgChvQAauYfHQkpcVTuj/bT7kpG2kS6FPce+4GM0GxfiIg0Eyeg4KGI0s0uPhBXmEwvqLQGM4P/L7E/pv/BPR6wCIR3lyx/YHLlo0+sg3TCe37HDTcY3J6NnoYo/mSgUoC3yC7p6dyRSvASK7Os7Lec8qNNIQY8nwA2gUe9tOJ2/VE7LHyQaaQ44RXAqwtetijj/8B5R2Ub7LU/aUBQidydys+2cZ/zcEguDvkEl6HhJk6vE+v4npPloTWdjTszeeOkLIZrcP2Uy3UYMbH8kCmQ+VwCTS1OPp4MVVPgsg7p0PINZDJyIUQ97PU3GTgWwYH0HYIQ8A56H1yTj5blw7QCGjMC0INTOtU69hQnSdCATDAsNamcxHPrlk/InfgMYEqzlz4D8GH/7VTe0nv/Sx6Zc+ZvgC0KhFm16hJMvmvQglwmVAyjFD1yE9Is5g7lkk/qb9duA3zsTAG0ofeJWN2qSogPj8PE1ayPSqJvVIbEB9QqONygM0KBczaOBUD2xrB2u6zm4jadPNwIVI0VzjZCg6+tCSq2lN7wsVzMjwYP2Ki1HGA9XueTyy6MG+lW++Or6994BP7PbEzkcbOsee/EBztv8DkkIqj/Bw0/GYro1rWGQCLZnlqb7vxYbv+YiG/S5qp4m+HLiUvt5euns+ArA6dfsX8LjNlo484NoqWEg1rg6tHbiFkDPJocb17Gk7E5T6HRLvOpBI8SK0uyBcGK13VMAoBEQ/HA93sXVheUdOQ9vU5waZDLbhmCEGG1dzb/BaNu77CSFd1nfHYH4szM8q/u8IBCngurbK5ymSiXchqaWKYsB48zEYhUKEzInE4qMzHPcK4OtIQ5RxJBDoZx3lKdYl+MHvzJwa8cyMvyJptaUYR9g5UxRdO0ejg5tYTcKYej19vX9ANq+SzCrPImiqwpOmlPQ0AHKEVk2G2v9To0CZGq1RaGUboU2UNEA/VkikMIHLzzbrVhluIewagUbDLajRyKKrgA/Eo1zrZeTY8Sia7bwcUUbWIBxDswXxfZfOkUSKzvO1XaakzOBbDAI/pbvnY/T1nqLgJl2eeg1ltBAiDBxMil29BCYOtgqgtIutgpjadk0KjqOCytL5anuESXHvGI9v723d2nXxPaORxadngq3ZTLB1DHhBPMpjiRR3IcVtPu4Gzp5KYY2K3pZI0dgcaLqzJbv/RP9+NYrhuuW0T+7G1HmURsdSd+41cd/sjQEDfaYDExojoOBD7l+/8h5LF/Ypef4K+Y9aOvLAWNfEkx1BZxKNwZKRhwg4GVrT+84N2RO9Dy59xZpMoCl4xp7vRUP2hALY13IKuzrOn3bzbZO7WX/oj+K7NII8vPgqssEWlg3fWykMfHy92ofPRTy/BUIysQDps5sDdhCLj9Q48qdIoKmRmc3s6u6M6ZgpnTUMvIBk4teAOdX4Reiir0EYQu8FLp6iu04mTkZM4Mu8MdyFZFD4uL3mSJKJICII24DfEIvXCli/H3E1XO6NfRwJpP8cKUgymT93UzX4zzBT8lkv03o+aIkXpIdEKNRVp83ZHVzeMWq2dLjKUpabUwC2GbIdI3jM4weJFMuR2pHGZNuZrBy8C6UdFJp0sHVhwMl8LZGKvC8epcieKrw0n6Cv941ISudc4kWHCLe8v3virm8rwwK3ejZSFRz2rvkgfb0HmC4QpkFN1/YBMNA80XUhawduA8MwHEKG5eZqufMV4mrdg8yfyPrDf0q7GG4yes5P9rdu+nw8OiWsLwU+gVQw/wb4TDw6fRAt6b1vPuHgb04ofVAKrdOB5scas/0btDId080OG+gYYJaewIAG3yAzdD4CrK28QEDnw6bt5AoEQ4ZyMdwCC0cfxTWsJtDnnHDwRnOobqnyhQHA0pEH2dt6OrZV9HgG7DSrjiRQrotrWFhOlqXDD7C9M86C8SerPSuNxCafF3j+CoRk4hKET6cZmaBDJBOvIBa/1cv13wA8SSy+16tVeDGSRbQK0ZSrdbAYQ15gE8XAWSnckv/PVMn8C8TN45BMPIZYBFchKbBhhArgeyQTO4CNxOIPA8XgswiPNyAxkW3AZ0kmjGnplXLc95CMCil+SybOJxbfMW1EsfgkUuhmIBQA9UCCWHySZCKDPMMCR0cobEfcCD5maADkwth+CNZp4R/Cy8yBHCaHggs1SrFlyVVqyfAD2YCT2ZcOtv/j8vVnPxNVnl/D0/D7W04iZzWwfuBmXO2yYGyr2ZgduObhJS+7MpEyHwC+GI/ys5LvDlA9Ppql2I7VRVxrX6Fl+UcCOn8BrSsk1Vb7ZL01PU9b8ONKQjFx8myBWY2acsNUwtA2C8a2ivfb87Vrcm5lbUCJdFOU0MIoqDNxJ1enbl+/es2mKcstHmWMovu2JpYO33+eoZ1pgnPp6MMfRyr2fSXsKaUYZ436QMGKDNfnBzuV1qXxCeUqKxBw0oRtP/PJN87BcvPYRKjLDbJm4BZasv1Tf9NuXkxqe0wLm21V+TmE7CvPCxxVOtxnGN9HtGITuY8o8CuSiYuQSswfAVtJJr5HMtFFLL6FWPz1yOZZq5/qJKKdD1B9w/cX8Vw45eu9nw1I9ejLKWZm+FhNtZ7KsbgmFv8WUtHcjfjhbZKJ271KZEgmmr1zX0KRHiOMWBm1EYu7xOIJYvHfekICxIc6QbGg76migFS/nlwhvL7EjAtZQ278F0iQ0Kcr3/Fw/am232MuHWzT27ouuqHjpCvWLl9/9h+fxhjngzLqE1M7oIW8zMWkPj+o6vLDDYiA/XEixXUlh/8O+F8kgcFBArv/higbxwOvA85NrOn5cWJNz6k5FTxfo8IE6qBzA3SdCJ0bUEagmrngAlfT3ePQ19sFfHm2GxkJL8RWQWxVux6yNbMPQxcwnTymkweUo73GpD5U1X9O/V7KwDovNGcPPeJilrlFlayNvyAWpol4Aqb1snBnqhRABOH2zgtCDy+5uksr03AxcLznYLp5DG2TDTSyr2UTpcIgazUCLuv7/8CmvT+kJXuw7Pb8B9Ce3qPO3vFVslZT6WVR8s6vey51RJsNz886hGTircxhEXhwEF/zmciG9WPEbeK3cPShgQPE4ktJJnqAz9c411Ppd7AFMa8vZ7oQ7kc29bOBm4nFd5BM/D2iOdYjFlAp/gtJqfsSRQHjj8lGhN27iMXvntcoRcBcglD/nk9pxpXrQHZEbiXcgt+UpuwmNbga92CaoQdSvO0lF8TLCQf7ei9DLKOFiHW2kuKaSgOr6O457KXHQizuJFK8B/i0d3/9wIZ4lOF53dfTQCLFe5EMEUKFMdYMJGhL7wZAY6ANk3uWv5a8Ve8rznfHo5xVdpK+Xmnl2N3jJFIopGFLEEksOAuxpIILs/uN1SN/DRlOThFuniq+U5mR7zG65zUVQ8siClAzovy065IN2vvHIzY0KVgCSmnUpImLixE0cGvGiVxlMdCwWrdO7hkLuZnKuVd6/kpXmEY04ZfQ3XNbrfPXRF/vAg23aNQSJa0/v6VEeVvi/n/tnXmYXFWZ/z/nLlVdVb13pbMvlY0lIQlhLVEoEFABAUVFAbcRdBSxZsafuzMuOKPjjKPt+oijMiKOiBsIuOBoQYAWkrAlBJKQVGdfunpfarnL+f1x6nZXV1d3upNAkuZ+nidPuqrucm7Vvec95z3v+33Rup6ffsneoDO4YmHm0ZAoG1vYIgBCde5eg2wMdjWstqUwjM7IAgaq1NLMmTt+SpXVYzvC1A1piZxeTV9oBm1NcXJmLXWDu5jd/QwFI0xb4zms3n03QXtgTIG80ut30USmerHtCs0K2Nl/aMru+AHx5AnVwZ54BiGdqkFF8lS8WStgozqcNaiRWYxR6glD/AW13vBlRiemHS7egzJWe7cV2yRQRupWlDEYK8xzOyN178sZRI32zyGWaJtQC9OpEMNCehI12ygAdUjXpHObyiSWUkUB1c6GYK1ECAsIeLdQzlErlwezWL9Oi5OSV15Q+fytLRHg+ygDdBB4F/HkukqbpjJEUBE0uyr5no8a6dSZqIihPxNLbCw5907hWvVn7/ipFrT7Rvjgc3oNjy/8Oxi+n/6YiDIyHKW1RQBflXCjIwKBtzwHQwAAN61JREFUzvBcA6HREVm4rjM8b2Gs47EZkUIHmmMRtnrQvAmS0JGRaSmtf//JqAV/byDhAt8knvxHWlseAs6v8KVIMca6hTxE5JhE67e0wHlBN/dM2X7lB/RyG7xkRRs1YPoU8WTLWMcfl9aWauA0lPFvAV4ni0EOEkF3aA712d0jfwOjlg2zr8J0sgwYDczpeYqQ1UumehHtNUtHnaImt59l+x5wgnbfYFdobui5WVcYSqhuNNW5g6zc82skYkSls0qI4pioL9h835Pz3nHziHWlE4gTcQ1hNpMbpRuoafoFDM8IKj0sFqo+8DlMTBp6oljF81fCYXik7D2oH2V8V954xsA7JigZjrYJtvHTqPUKTwnWRBmEd2PlbsfOB5SkhAuuC91toJn9NMROxQzpfRbfr9K51JVKvbopiIGKoKp8/nhyAEa4V8YkEWUA5cp7aUinoihdmVvw/AXp1I3EEj8GTkbKPaf2rm+oske7gaucPmnYOcfWgxpC9KFmQOVcj1q7ErosiGkD23DQZcPgzvig2SBq8gfxRryyxF2BdBD9+y9g9Cj8GUCjteW7KHdiJcZbxNZ21yzP2mZYTu99IRiye/WSHaTAfVfQzd1XvlOFAyrDpAou6RQ1DYH/oLWlAfjCpEfH8WQ/nsRLa4s3kyqeX1KbO4AjAhjFSB5X6Lw47QIGA8MBCW3RkfmYHlVWL819m3GFztq573CX739gwcZZbzRdzfwqauY+Kqohb9YghYZwHRxhoEsbBx1HBLCMKiLWiMmqEFCozR+87UQ1BnBiGoQ2VJjkZJLI5AS29zrhEVroZcc4VLRIOQVUNE/lEBo1kveMhXf+w0mOKyVQPG/FkIcxWMVIH3+g2LYUvbtOB/kMuCPvFdeqoWPL64gnfyg3p37pwiWGQCBgVz8FJpNZfaxIp65Fyjuw8yaaBnrAM8rfKbQ9alF93pdDMjuzMbtzrNG2OLftR9t1ac0DbAEfZyufJZ48ULLZpQw7pgWAiy41KUV1ob1oDNTHFSKAKp23EVXnw+uEJ4UA2ZTbteWJ6e+O9oTmGCv2/Das4Xr+/5tR7tXyAkqV0FC5E+VtMFEDjCxKPuVw2UnZ4EcKjQ2z3kjjQBqJxv66ZeTNscZawwTsAVbt/iW6q5Idp/VvNZ+efc2PpWZck4jy7lSGpVR4Xiw9xHMzLmNRZg2adNjRePaIWcfy3b9ym7K7vedWAr8jnnzgCK75mHPiLSrHEjmUiNWhKpqVMpGO3HPRjPWdjGUkKuG9H0C5O8aLjMiO89lk8dY4PkAssWkS+/2R4RKI3nX+nFhiH2d9YBPSHUMBVvwXWx4w6gL8sCPHrZ15DmzvZfvadq5IXnnBriO4jpcH6d5G13aTji3Q/jy0b9boaoPM5pDZufWH03O7myRCE9nuirsLQJfWQqEyfZtQHfV6Wls+Q2uLFwL6SMnmAJgUNCEL0tKCxd9+3IG0LNkgCzRKNYM93GgwEbZ6Tku8+M3uVXt+PU/D/QDqvsmjiko1T/Q4DAtKlqMzgciiQ/A/qEGJBLVm09Z0Dr2hWbRFz2NHND4hYwBQl92D5lolwQCdhKyeC1BCkySibGGMAVRPeA5bml9LRySG6Qxi2oNors2iA3/FMwYlHcOKI7je44ITbw3BI53ah4oCmeyo/WiSY2Qt2e5imyYycutFGYr6kve8h3+s9YPxcIrH7CaWGN+tlE6dD/wnajb05WJ7v0xJRqWUsLmbLf+3W+7+UPTZC4UYqz3CQdPz6MGvceaN/zLJNh870ilBttOiZ49eyV5LwBWG3DD9Mlbuv3fc36LCh15BlzjDgQxvK3vSXAm3SfR3ajghCYX+QHN3T2jGjNk9G5EIeoPTZH1+v1cjwO0NTPtadSHz8bFyCQ7RpqLPb4S7dV1x0xUMRwj9CuXHr1RroRRv8DHWzLkAzCaerCQdc2haWzTUjOXN/YGmaS80X7ysPzRjrHONO3uvy+5h+d7fIYs5gALJE/Pf2W0ZkQsSUZ4FSGVYgvo+akqPNadzLQs7WouzN0HWqEZ3bQKusuVlJ32YePKCw7re44QTb4YwzC4mN0s42uxAqYb+AuUe2Y1a35iIMcijbuKesvcFalG4ddQeh0ZDdT4fGHerdKoRuAuVyTkHpTd0O2XGAGBpPUtX1fafC8U69xWROtINYWc/y7r/fu1htPvYEEtIHKu8hsMQAhWbf3L7/427fjFGL1SF6mQXF/3oW1ELvZT80zT4gI7zSwHXa3D903PfmtrWfKF8ePGH3TWLb5bPzLuWgUDjxagclZOenH/d5Tmj7nClvj23UCkrUQENnjtQRy30v49hefhKWAwPaMa6M3Io3aXDI550iSe/RTx54br5N/yoPzTDC/PxzmejjO7TqEpqq1ACl+0Mh4dbQE9P1Sz21K9C4CKFxpZpF7qWEbkTlXQIQCLKVpQRvBXlrrI017bnd64tceVJquy+sYyBhdIKO6E5EdcQPD7CsDqpwdGRXJgI3o22jVjiIPA+0qmrUcloEx3V51BrB5Uij75NLPEp0qkck7smCfyBWOLBQ2w3p3jcbPH8nnTCqLYLIB45EB5Dxr9kQ00ipUDTzkbJa58Y5PtehxDPImmq9LEAUeUMHJRl6zoT/JENhhf4q1GdVPlgQaAW15cCX3I144vAlQgRQt1nf64+450pIJXKKE2cLdMv6lmx5zcNJbOEsUbHEwmR1uVwaVfvGLXAfWLse+9FlFvJRcme9KDk5T/GsBsrh3pGxg/NmTh3ooziAtQ1PQq8vhhwUMozqQx/QrnvFqBcaw8hxLa2pnObdzSerRbuhfgu8JHyqLVElL3A51IZPg/Mr8kfCGnSfYaS322M394BLiaefOpIL/RYc+LOEGKJv6EqPy1HLWKVU+p7PRJKj1FAyQWoGz6dupp0aj/KJTDWQ1l+05ZGHZWP9iTDPuePMrn2a8B1pFOXHGK7bahF+QgTqNomcA7twJKu0gUW+u8n3NrjgTNv3It0VlFes7iEnmDz7wfM+lHazxPAZbhi3o9QSUqVTKuOci39LrG1ZTUqOuxjqFH60Ai7qP1zb3d4Lh3hBeWj5XJ6qfyL9TA8qpdAp0SrNMtuHOcaZ6Ayz2cTT36dePJHRUG5U1AFmAZQz8da1CDpiElEOYCSvrgcWJ2I8poKxsDbti8RpS0RJZWI8kBxu3OAdVJoexGihQrGoOwYMhGl7fS5s58XuJ93EUPPqY3hwqgvV+PolN095py4awilKAmHZ1HGAdTNngISxdeHs87QxbAshoVSBP0oqhO1gJtQD4BXQrAcB+X6uRpVve1a1DQ8wvDMrMBwXWWJmvq+gVjCIZ06BZU1XT+JNjvAdmKJ0QHYpaRTMZS202LUjezVWx5i6LYYzEDf3koRMAAugdpdCAF64NOsvO5nk2jrsae1JYmSE9FQo/hRA6Q9tctvP1C99OrT9/66vvT9CdxQEvgq8eQni+dagKpRMQsVfVPpEBuYsfIMnMJHcKyzMYK/QzPu9DSvUhkCwI01uQPLVu2++026dKKotQiDktmAqDxrkKg1rruAP6DKZxYKIthkyEKg0u87xjXaQNBTKB1Fa8sclB9+C/HkkQoyHhe4rd+8WyCH6q2M8f1+kXjycy9vy44+J7LLaBhVY+A21EKpN72Lo/z8Cw7jiFlUTkIAJQ/xELFEmnTqJyjf+4soTaQAY0/ZnwUSxc49gDIGLsPfuSy2dS/K93kAaCteSww1Spxo8p2HxkRKZMYSaTztpHTqLNQIKoaKlwcYVqUON8FAO7gVl2veyBnvPTHD7FpbFgKfY3jUXjFkpWmwLVSbO3A4ocACJcUOQGpJsgo4vya774zVu38hvA3KWEZm63qc/HKQAs28hro5zajsdBJRCsB3af15DfAehu91KdX95dUlrnRPCtRM5VPFv/cBSwxpax3hBXbT4A6jPAN4jBt7x5jGACCefDk0pl5WNGT5QnH519KPkso54ZkaBkFxPiN9tEEqFSefGJuKgnNQqnukpLQbgc+g/KhexbFKDABXk07di5L8vZyRo33vppqFqtnwHeDzpFP/gjIGBmMPRL0M0UrnnlwHHUusBdYWZ1lXM5w1XWzlULKULPm3E3gt8WR6Uuc6xhSlIy4E6lcHp3fW5g9I1IgXVIcqKH6nXnqY7hbeGihRwIRJTTeraW3Z6CAGTg3HltUW2iOOGH7kKnS4GvbgaUOvHEtnIPMhigahhJ9QnNEV2ylKff7eWL8kzQ2AnF59acjp/19U9M4ytY0j67O7bS8CxxYBukOziQ5uHysZTaO1xSxWjHulcICRtVMk6jeYgVrY/ibxZNsxaNdRZyoZhEcYXVd5Is9uL8pdULrtTytumU79MypWeyKRRK8u/mtDlee8DlU4pRxRPP8/oUaUr5pAuwXqtyvvU1RseDolhqS1J4qamaxCRVksQ0Wg1AE6Ztih0KcjXS9d91snmjEo8i3gRsB8au5bB8/bftuLhlvwBg07UIuR9SgjEZAIYbiFoe/3MPyOK0BJS08b3H4YzXXByVdyu1S6j4p7aEiEEEhZTDgbIuj0nyYRjwjk0CxVgGZISy/oEZASTVoY0iJn1BG0eynW8dTEcK3sKOo7aj+MCzpRuQJVz8OrIPgkcNNUNIpTYw0BIJ0yUJ17JVnr8diPCrv8fyiXy8MoH+sHUNK1/4S6Ca5C1Sw4HFmLa1GzirESV0pH35OR5RhLl+bjxBL/MakWlpNORVChqAuwslE6tr4B5KmoGciXTrSHIZWhkTIV23mZv8lY1+OgRtgjjGulp+JwFqIO9XQd+pjiP4h/ZGRJzdaWjRRH+KXHd4WBkA4gcDQTw60YOerpHHn3mybBtbSgQEphygKOMNGkPWrdqNhWF6VD9XmUa7UJ6CzWgZi6tLaEAC9g44/Ek+OF5Z6wTCWD8GnU6HaykVP9qJu81Idc2kHniscca/F4ItyMKpaSYOxwvhxq5jFZNdVK/JlY4lDRRq8oUhnOYziCi1Chk7N33DFim9Ifd6ynYjI3wESerEMcrwclc/3nEe+2ttyMWi+rGj9VXlQKBpBCuRs9l2ShOzjj4bDdc7GUEl06wtVMTGe0YnNJWyUqIMJAPRt9wI3Ek3eP2snnhGIquYySKJ/+ZA1ChMoLcKLk8yOxmhZK+34eyvCMNar/KUr35nDXPUp57Cgc48SltcVE1TZ+q4PW+ezsN7UTnjOX4ixASIdle0bptx0OedR3PQ+1ptSL8it7v++Y/f0EDctDlOZ1tLasRo3Om4F7UKGep9gYhoFdYZ2j4m2r3IpqIPRF4IdPz7vWQsr7THvw3DN3/SyoTaximzewkSiX53dpbfntiTZz9BnJVJohbEOJck1a8OslJIsqND8b5coyUTOBIKONwn+jwhIXc2TXsBeYO6q62iuJ1pZvAjfLomF3EWyYdZV0NFMsan8E080StrpH7VYpTrP4/5CcdNk2fcSTQzPL/vV33BopdH7W204CWVFNf2QG0f7tlEbxTMAguChZ8DtpbZmJKhZzdvH97uIhbsnpkXdprnWJIQvaWMcc67oA9tQu31ibb/9YZ2T+Q21N8Rtqs3tvi2Uew3SymHaWgMyWa6mUrlmXkkUZmSqUNtZNxJPdh75Mn+OJqTRDuBk1ajrajKuTMs4+EhXr/wVGZqmONYu5sWTfw0GiZC/ueYUbg4+j7oWh71hDsnLvbw/X3Qdj//5d3h+pDK8Pzrzy5nN33A4M/4gh2c+AY+FoJpprF+sdVLypvLe8e2cTqgYxwI9RxkCgXIp1KDfNSVXOwKj6wWUNH/f+nd27cbmDcU9t/sAjM3s2XtG68KYrnpn7lsso9g2N/WmWtKfQ3QISbYfpZmcLpEZJZLJ3qQyv312Dmj1NSOLc5/jhxM1ULieW+AOqqMbRxHs4eyaxPahs4LcTS3wLNeUvHfFPJIJoMkahBzUy60YtkH9tEvtOLVRS1Kc4hMZVpR9gvB9lnM92wlA46yfygTohK2wdze7AxhgyBh6y4p/0Af8IvKpkofassmZ4A7koMFeUfCZG/nMZlpEYs+SXFEIHVgadwcsYrv0NQGd1jMdj7+Wxhe+3WhfdNP/xBe8xMuGYZmOULrSVD0AEKrfF5wRj6hgExe2ojvFo+cEKqHCzjwATybr0Hsy7iCW8Bbb3oMowPoMawQc4dKHwQ9brA7YAS4gl6lHJcm8AVhNLbJvAvlOVMOo36OEQ33FZx3lYuGhWKsN9SJmpye0/e+mBP9fk9UjF81a5lTXyih3qUF548a3XE0/20dqi0dryGipLpNeiAhV0SoTzyvCCIXaiitlUlFoXUuZLto+i6n+XrgXkEMIEyJu1vDDz9bTXLAEEroqB0Co8cGsAUhnOTGX4wcMH7Zaup391Na0tZxSryPkch0ydNQSPdOpc1GLZ+YyO6BkKtTvEUSRq1P08qkMHJTcwEfVGFxW2+mtUMto04A7gB6g1jo+i6hZ7shjlDKIUJz+KeuDLg18kKrdhWbE2hI+Hkky+G1Udr0qqgIBRjNcbTSZMtD0cW6dLa2Vt7oChSVu4Qo2a9Qohm5XOY4sqhHClK4w9QWewBvXbV6Ey4c9BDXCuYliArhyvIz+UCKKDWp/aRahpjnStf3Ot/FlSyoCQFhpyQKh7/fWpJcl/QCVGDqKy3r0w61HPzKL2h5jRuwmjJItdDJ+vNrUkORNYrzmFqrN23hmosvsEyLxQARQ3nWj1hl8JTD2D4JFOvQF4Jypyp56iFC5qkbcDJWnxf6gcgWaGfa1txe1dlE/UWwDOMnY1tVIGUS4ijeEQUgvlzqkFnkJJZZ+PivP33EkualR10dAaQDq1CDXyX4Hq5KpRGk2fJJY4/gvQHAtaW2agNP1XTtQgyEmsE5Vu5IIrQLMxMbCQQscSQXRpSUNavwSu8Ra2JUIKNZAHwMFAarrsD0SdTPWiNUsyD69kuB3/iMqQfxR1f1SKhPP4A2qGuOgQTbdQ2lrXEk/uB7x6z2eh7qvHiCdzqQxfQMk4D5Sce1Ruj+4WWL7nHmrzB0G6aMMTMimKGk6pDN8Hbjpp/4PM6NtU2n5bwMnEk6/k2exxydQ1CKWkU9WARSwxOpkknVqJeqiiKGPxF1RWcZbR1aPyDMtFeKqR5QvzNsoQVHqAvQfeRhmeG1ChikFgA7HElBADO2aoDq6Hop7TOMllQwFEEnK95jSr1srUVdrDhbxWFI8bK0+hdDXYEQGkptvPzbw8YdoDX5/Z89xCwy28+OK018yoH9w9vybfTnv1YrKBBkeXVra3akZBCv0jia0tW1Cd8wbiyTW0tixFSZ7ojM6kL6UFJaOwfrwnWSJcVxj0VU1PPzPnmpVjqYWmMsSAv6JmBl6ZzlH6WLO6nyHW0YqLrqKnpMv+2mVSatp35p/86ltobYk8Me+G7TmztnnFnt9Ql9s3Yv9d9ad/YF73U63AZuLJY1nXxKeEV4ZBmAzp1JWoqXoVo0dGOeC7KJ/swyiZiW+XfG6jDMl4DzAMywAcBE4aV2YinQriSQW8kqOHxiOdqgf+l4HMhfTtqajpU8Srcf01VNTOlbLoChlnipAXsB71Ww9R6QeztSB5o4ZdDavvOlB7ShzlFnSL/2u12X3U5fYyaNbLjshCgRD3oVyLt4+SY1bGrQU1aPDup/KBhgtcbmnBdxpu/rrKzS+2TZggNAYDDc5Tc6/9aiJaUTIegFSG6cC5qJnsfajcihHM7VzHvK61uMJESBeBw2ML3y+l0FMLM498b1b3s99DiCZLD3Egsph53U8OudEKWpVrujmpqe++BziDePJwtD18jjK+QahEOnUtSo/+4uI73kOYBs4tFsbxOqLdjHRNjJV4VornvjKA6KiZgZLhuA7l970GNULbD1xBLLGBdGopykXwFLHE/slf4BQjnfoR8G66dwlynQIqdthDuQSU9P8TuPvtYmbviMFBpf06wvPZ2nyRlTdrd6IkHbKoe2F6w8AOTt3/+6FT72w4U+5qPLMuEaVvzDMro7AcNStNoxR8f4oyMI6Eu3fXrZo5o+/5hO4Wxl238KKf9tWcIrfOuOT+RHRi1cxSGf4L5ULyvrttwPwqq9dYtftuNNdW0qm1y+T2aa8GeGDF7t9cWpvbZzjCFLq0KOghLK0KQxboD0SJDmxHw5UlIbFriSf9qKTjgKmUh3D0iCXuAu4infog8K+okdkdwKeIJUofYJfRSWQTidwyUB3GbysYgzOAf0EJ3dWVHG8e8BTp1PMoHRsVDptOXUAssYFXNqtxLEG+d7xOfmhkLScRWCSgT5bJkI+1c312r1yx9x6xdt71DQitA+WGBGBG7yaElDiaiZAOc7uf7F609MyxjQFQXHQt/W3vBxpobZkPGA8tvuV1i9rXfEtIF1sLYro57/qG2qimlPpQyOusvk3C0UP7ib56Yl8AfBzl0rwcFXF3A7A4Z9b++sm51zY0DO4UBT2id4XnacATQESXBU2iCQTorkWVbRMQWWwtwIvTznebB14sP8f0iTbG56XFNwjjEUt8D/hexc/SKRNVqW2y2kNZhhPVnik75jdQonpVVJ5p6AwXARJAA0rT5nUV2jcT9SDvmLTy6YnH/eR7VqiuXkcJdI5mol9CQQ8jkTLoZLMuytdSmmVc7HBzUn2/AsBFxxEGQatXX5h59AVNOk9sm/aaD0qhBwHyRkQtKUuJLq2C7jqHb8TjyR0AZLiwszomZ/Q9jyYdbBGgKzzX3TTjDdJwrb5IIVOH64pV+35T3FGtdFTnD7y7uFbwxkS0ciiqRyKKjUr0u7nk7UwqQ6xgVC89UHvqQZTrk0QUuWn7C2vyekSrJoNXZckWAYQAwy3QNLDDsbXg46abP6/k67zzsL8Ln6OKbxAOh3RqBaojPp/JGwQLFcFRhcqZ8I55M0qPyePQMw0pYeBggnX/vQs79w2k83XiSZd06lOoLGmAe0mn3jvFF6z/Gdc5B7gI5QOdVH5NcUHYlQinOzR74NnZb95Sm9v3mdW7737doFl/4d7a5UsXdaypKVuL2CSUJEkQqNFwLNPN6wLXmN399FmuFnhVwBlk04w3gBDsbDxL1ub2y9rc/pyG3AV8qGJjWluEpQVvEdK9Rpf2swL5z+NIQDzRFZ73uk0z3hBpGkhrObPW2Ve3/D/R9Jit6Zf0GHP6dCfvVYPDM4l5o8YALgIeREm0j0sqw9uBW1ABEJ9ORHkqESVPcfaSW/ujuqxZ99X8tsxFp7i5RYBw0egPTqc6344ubbSikV7U8Yi5edpFLy4/8PuHUIvoD6BkyX2OA/w1hMmSTt0I3Mbh5TOtA+ajjMFa4GpiiT7SqVpUnePJaRj17oVsJyUFbD7J9NPuAp5GdVqeS+s6Yok/HUZ7TxxaWwKo2tZXA+JQd3WpW0WoYIFeVJjyg6Xx8akMZ8zs2fDQ4vaHI5ocyhd0hYq1N0uPV4qNidAEa+ddT96sVZtIOTin++llizMP7xqr6tiBjX/498aBto8hJQLpaNL5oxa/5YpK26YymMCXUO6c3UAyEWVzKkMINbhYeuq+B/qb+7e+Dwh7SSyPLbjJtc2wALKJaOXQ3FSGBSj9pBUMG9gsKmR7WSKKyqJubdEGAo1tAatvjiGtEc9EQQ+zefrFLN/7O0DiaEGEdNlXt1xum3b+9Yno1KgyNpXwZwiT52tM3hi4wLXEEr8knapBRQ3tKYkaup7D+S0KfSg3iQbSFRihz6LqMnvn9PqAig/9lCKeLNDacj1qsd48lP7HgNnohOxedOl0gfSqz/WVJ0sloqzv2LP9Tk3a7y++5QrVQY47C9GxcQjgaCU2XojfLV5y+g6WnD5i21SGG1EumcxSYcSFdHE1U0rp6gJ5/ljnSESxUDPBT5S9nwW+AsDWrR9GhUv32BhRASa4XttDqQwfSkT57ojvZv1Ppy0KzXku4GTD+2pPpTsy3/sohLp35wLPAXSG5i6sze2fK1Uh+hHPhaVV0RmJkTXrCdgDuOhoQmJpQVCuWN8gHGdMNemKl4PDqVfQRizxSwBiiT5iiV1lIaRhhrNOJ45Rpf6XxfFuIBxGhQg+hBq9BoBdlEooT2XiySyqmpWEod4pz2jbIKutzt8Z0u4RSJ3hNZsdlQ7bNNj2LYalS7QSDZ/RBy75e3vTq7D1ECgRvM+iZiAjeHZf5s3zc+mWebm2pQE3f3Zv1cywFBpCOghcckbtwYld/Jj8FmUk6w1sU8fmnJ13UFXoBvUVfSeV4W1DW7e2iKpC12Nzep4JN/dvYcXee2jsHxERGgbCRf0mCkak3RWGREpKdZwkOi/MuBSALdNfi6uZ6NJiIBBlb/0qOHR2tc8xwHcZTZZ06l9h7BjuCkhUwZpLxznmAtSIa7xqbDYjZxES1xb07AY7C4FqqJ0NQrsAlZF6Gaqj+z2xxNSuZlVKa0sYVTPgLFRi1y1SxfO/CzUAkkLlIry3+PrzqM7+E8STfyg9VNEl80Gku/JV2267zpT5qnHO7ACaiy5sPSg3T7/kYGdkwQ7g1kSUoeILL2zfNDdg959maVWPnKR3znRhgyi6nnIE3XWRM8XsrqdEdGAbWbNe7mw88/MDwWlfTmxtMVHRTvu9WUzb5taQ7trvcYWe7Q9Gf7IstrRynkpryyqpcimGBoCdoXlsmPMm72U3sCQRJUNry1lSRQsN0R+Isn7+9aVv5YHfA29NRLHbXnjs32b0vfAJ3Slo/VVRukLz2F93KgWjemgHzbWl4eaxtCoWZdaI6X3PZ0238DzwXuLJZ8f5Xn1eRnyDcDikU38DVjNcdaqc0uTVNPBGYolNhzjmTqCRsd075XlWXl2F8gTaKmIJP/OzFBXPvxKV21GNKoF4z6F2S2X4qpDOjVLoxpltd9RErM5R21giiIaLJl0X5B5Lr7psINC0vXHVmwdLjiOA0PyOv71nVs/Gb+vSEnm9Grthcb5WywcYilQSbAydRpfRSONAmua+LfSbjdLRg88vbf9rVCiXTSvw5n6joc6QhS2mk41Iocuu8LzNB2uWLqtoFFpbTpZqwDFkEHqDM3hq3rXey27gzUBo+e7fvq4pu+MjpbtnjTqeiL2n9C1vcvSRRJTvAKTa3deDuAMhGhnpeZCoQIoccMvcrvUfm9/x+ElCujkNxxWwl3jy1EP9Fj4vD/4awuFxEfD3KF/qWuCTKG0kiRrJ/z9UJvMA0EEsMZEqUmtQSWhjUe4Wr0KNdEtlBSxUhze653olo0bUTxf/TYzWlnPPDDTeUmX1BQt62DWcytGZhixgaSEcIYWGMzPoDC4Mrrppo/d5KsN8lLbS0sbBndWaawlHGATtPszebUFqpoOu1hkEkoIIMLNnI0sPKi/fdPW7n4pyaWVQGdaPR+yuU/A6XumKxoEdJ/WGZl6KkmEpZ6tEbBLIYsiyYGfjWQA0DrRZc7vWVTla4L7tTecVuiLza5uyIz1nB6uXlCdye39fDMogILR/B2qFdES0fxumk6MjEiNv1jgMl6ndozv5RYBp60FDd62cIa1ZtLYYxJMTUfj1eYnxDcLhEEsMAv819DqdegClwz8P+F9iiXsP46gfAs5ECZWNtWhd/n4NylXhjcLWwjiZrz4To7XlPcD3IoXOKoCQ3TPmupEUmpchXEyCoB8gleFiVOnUi1CJV5ru5AVIhHTRpI2W64R8N4SbINeDdCXR+gDT+zZXOpVAzR4NVJa6NvJDRwg5hrJJPOlorS1nZsIL7rT00OX76pbZvaHZf6wb3F29bN8DCXA1iV51Wv7e8AvNF+MIE02qMYxEZ1/9aWPNgp9LZfguyj05FynFyfsfFI2DbUhgXtdanprzNiNv1kRQqsHv74wsCM3u2YDuWkIKUSUlvxe+MThu8F1GxxPp1CWo0Ml6Jr/g7/2QB1HyGm1Hr2FTnHRKoNxJb8XO7yLzwvuBwCGeDJk16wb215wamte9XkdK2ROava1psO2k1JLkGagYf68DR0iH1TvvorrQPnQAWwQRQharkZVmF2ul6qFD5xMqlNhGhdWOWG8qaKHeffUr6hecdO64zU5lCKBmkcmZPRtuXJh5ZJpEQ6KZGg5Pznkbzf1bmNP9FKCxLXoe++uWA0jNtfMhqztYMCKupYfWo9xQNxSvURhOlnPTP8YRJgiB5hRwNFOasiA1abc+Gnv/ObYRMuoHdxPt30reqGVv3fL7HT14dTEBzucY488QjidiiQdJp16Pkrr+EpOLxPD6k+komY3XHOXWTWWuQhWQ0Rhor6KoaFs2LPaGTkPpCwF7MJIN1FuPz39Xd9AeuKNghD/9qhnVLhmupayGQcPgLoJ2nyePjYaLi0AvjupLz1XBGGSFMgQ/Bh4BvifAlMXFaAl5w82vHMsYpDKEkO73Q1bPGwNaIFgwIgag9wWnZ0GYQkoEFgKX0/f8kv5AlHVzr6NgViOFmhxF8u1yxZ7f6qaTswRywBX6rWsW3/wTStbRXGEghVI/la5Ax8JwLVH8/LzT9t7LU/OupTs8h+7wHK9556NqPzw67i/k87LgG4TjjVhiLenUyRxZWN6yo9WcVwTZrreQ769G0wpkuzx582GNbCgICJQv4gjpiCqr17SqlwQsI1JVIgMxA0BzLRa1r6Eut4+8HkEArtBxhEBzc5gyd8iElq6q2d+vz+3bJnDXEE/+DYDWlu3AJULV09gk4B7it+wvFgi6onj+PxFPtqUyzBDSeeTUfQ8sasjuRiJoazyHPQ2n01/VHHlh+iXM73yCqkKPMkRSUpM/wKKOR9g083IAAnY/q3b9UjNkwYvSCuvS/jrKIA1dgquZbGm+kKUH/4omrZJrK0pmFDLEMo+SM+voCzSxoHMtEmr6QjNfS/Qs3yAcB/gG4fhkzGSkCeJHGU2E1pargBtRRYh0OVzrYgRuXcwEXL0nPcKNp+Eyv+NxUZfdE94087LmksTlx4A3xzKPVjX3bxESQdDqxdUMhHTRpYVE4KBhHKIyazbQcNNzs68cdLTAY2T4+0SUNPHkelQY6Qgk4usS8V4ARzP1p/dkdhCMhpv6t8+rz+5FSokuCyzOrCFr1tFZvZCBYJSgPYAhh9VSHWFQnW8naPWRN2uI9m+jJEsb1PcUAg5QlMUIWn3U5A+QDTTQGvs7dNfi9F13EbJ78cyokA6zu59Gk84IZdamwfTnaH3sbuLJ58f9MnxecnyDcHzyJKpTr9hBTYDao9iWqUlry/tR2bLjrtWoDDcNRzNF+cqyCzhagLrcPu2U/X94keYhRekfA5fU5g9eCQjlRpFkjVp2Np3DgsxjhAo9uLqJ62TRcJEIp1hRbUR7ZvVu1Oqye6qfnPeO81zN/BlKAnsU7Rvur2oU+gdtYRgIXejSomkgffJAMIoubQEuhvSC3SSn7P8jjy94D439aQwn510pmnTQpIPh5jm77XYy1UvImrWlbiwByLwevg34RNDq49R991ObP4BbrLGcDTaSM+vYMu1CYp2tRPKdaKiyorocvVRQzPz+OCo3xOcY4mcqH5/8ACU9UKlmsje0Kg3nK6eKdKrxJWrbVOGfKAnlHWsl1kVH794uAp0vCBhZcU0g0HDzhlvomzawfaikaSJKNhHlTQU99FeQUnMtCdAdnkdnZAEHa08CTaC5Fqj1gXs15MfbGs99S06vHtWUsNVNXXaPCyz1MoTLeX76pSfZWsDw3D4Sga2ppZCOSEwt9JZgyAKrd/0vjQNpNJxRB1WWyaW5fzMzejaNyEIG7L7g9GurCl3hpQf/Qk1eLZRrSAxsavIHaRxo45SDf2bjzCuRmj4RrZfy6oQ+xwB/hnA8EkvYwOeAz5FO1aGez4+h1DXvAk5DFS0ZRNXf/TYq5NVDALtIp2YRS/S8jC0/kejy/hgvLEcbkoQa3tYrtCOQBc3N9aNCfX8LeCJ7DUD75oXv//LsrqdfVZfbH+gOzWJX45kCYHf96ZpEpyG7M6e5zt2bp7/236XQ72gcaFspBEIiRrhUBBLdLYja7L6nVu/+hUY0OcrP5GpmfkvzRYMnHfhzRHcLFIwQzb0vCNMeYGfTuWyYdSWn7757xAg9ZPcRtAfLD1U859D1ukF3QEo1Yw0KJd1hNg2mT23akSavhStWm1PyGy7VhXYMd3xVluK+Px13I5+XBT/sdCqQTr0PNasofy5vJZb4l2PQouOf1pZTUH740OE+AdII0x6a+8PpfZv/mXhyH60tcZTBrgee3tlwxju3R1/9VyBW3MXrjbtQRmQ6gOFk3dU774oE7AFNxx7Vwea1iGPKnK1JJyuUTtVbiSdHJDsWZw7/obmFDyw58Jfq5v6tQ26eztBcNsx5M9N7NnHywQdHXIODjl5hHaPk/FmUHlYd0FxeXMgVOq6UGGUTVRcdRzNZP+8dA2fuuNMwZSEAQ8WJJCBKDvQF4snPj2qEz8uO7zKaGtyOKm3oM1HiyedpXr6JhoXQdBKEp018X6GD0BFOjl0NZ70vtSR5QyrDQqlk0WtRrrzTIvnMd1CRQB4GKoHwp8B8pBtekHksctaOn9ZU2T2aXrQXJaNzHDQCbla3RSBoi2BIqvoFbyhvUrEm88dcLbA6OtjWWRq62pDdQySf4UDtKRT04fQFica+2lNxS7qBvBZ2e4PT+2xhbLNEwO4LNpvr5749+0Lzaz8nqVxMR6M0IksAQuaN6q7N0y/Zkjdrf9UVnvtqVO2EjIBfCjgo1C5dwGrfGBw/+C6jqUAs4ZBOLQdeBLwA7wxK5M1nLDR9LsGi8oc5C+w8FCagAzhUkU0jpzrYrwJfyevVQpeWI5C67hZwhbEUJTFSSgjl5mNG7/PM7nm2WKS+EgKJhigaClfoQSk1IXDfRWuLDtxXOlNIRJGpDHv6AtMiDbndw80VmnLbCMHaeTewuD1FwB5gb/0KN1Oz1O6OLMhV59uN3qrpdEUW5FA5GR9guJ7GiursgW9QIRTalQKN4fYXXV39Ybvni6ctWPgNAKJXgNKSGqa1xSyf5fgce3yX0VRCZdxeifJhP0AscaTSyVObdOrHKEmF4sqyhGwHov8A0j104mwmvIDnZl819Hpex+PM7X4aUIu6G2e9MdsbmhUaa/+F7WuY1bMBRwtgOoMj1g3KsbXAUARQCZaAD6OimuoPVi/u2DTz8neY1sDtZ+280zDcHFJo9Aen8fTsa5CaAaqTXwNsQQ0Ie1E1Ps4GXgU8i0p+e5JiSKlwbXHetu8JvWL8wmgkZDV4gXhy9YR28Dlu8GcIUwlVO/mQKp4+Q/wdSl7iCgE5hKgnHBWEGhFdbcjC2LJQNmZRumiYnY1n01/VTJXVS09oNgPBaAhAd/JECp3kjWry5rAWYXd4DjN7n0NzC7gYUuBYGm7lXAhZUc7ClPANAZ+ViMag1R8y7UHNMiM8vuDdRAe2AYJM9SIpNcNz0VybiFasj7ELJcLnyX6/CKzUXFtfvvdeJmoMYMjltWv8rXyOR3yD4PPKRRnQ4eIw6dS/AZ9CaEpwbhyDgKbJ7tCckZ4eIeiMxIZfSodgoYcVe+/FdHIgBJunv5ZM9WIAOiMxtjS/lua+F/K1uQM/CjoD/4pSZG2UZet7urQRlZPYghIaLa0qFLY6tbld62ivWYomHXmw5qSsFHoPaqSvoZ737kN9LYkoVirDZcAXp/VveWdN/mCoPPLJRVWKKp3TFBfDpYDNjKwP7nOC4LuMfHxKSaeWAb8ATqV3L3KwfdQmEsiZ9betnf/OOVJol1U6zPSeTSzKrMFw80MdqYtOQY/w+MJR+Vf9iWhRxlxFP/0MOEmq9YZD4dhaQLjC1HTXoqCHMF2VvtIXbHY3zL5KSKGXrlOnElEumsBxAeh6+lf/Vp1v/6QrhTCluha3WAHCxJYU15Ql9ID4oEA+SDzZMdHj+xxf+DMEH59SYonnSKf+HniY2lmImpnIvn0wZBg0hMANG+I7UmgLUcJsArXgGgLcgD1gLM48DFKOGFVrOGiqE/UG2KA66XW0tlwCeCHC/wzcn9Oq26rc/tL8klHk9IhuSBtN2kgBAWdwKAmtJt+uNQzuojOywNtcAIlUhosSUf4yka+jIbv767Ywb9CFmO6IgHmw5iT21K3ILm3/6x312T3ri9dsCLiD+EdGW0+fEwrfIPj4lBNLrCGd+iZwC0IgamY8La3BBdjZegARarKonXUDqk5yK3AeKpz0q4BpOtmPggzpcrSk1MHqJTngTyhDcMH8jse7F3T+bQXwx5LNfr2n9rQvukKfPbNnI144atFtM5SmYIkghmuxr24ZeaOGvFHNSQeHlwfGmPsLVEGnCRkE4sl2o7VlJZKLXLSetqZzN1p6qLN+1Vt8vawpiG8QfHwqEUskKfGDC8QWrKxACAszFARmJKIUUhmuRqnLDiSibEtleONgoCGb16urIm7niDUGR5hdexpO/yRweyJKgdaWM1DGobQUqivBNNz857dNe43ekN1FpNCJilxSPnoASwSQQhcCl8FAE/vrloGUNA7upLl/CxLorZpJV3hupas7K5XBTESZWNhnPNkF/EpDWT6fqYtvEHx8JoIQPyAQ/iTDyWX/A5CI4qBCNSm+/l0qo//j9uh5H1q27/4zQdoC6WiAIa3Lz51Z11py1JOLxxuhqg0waNbrBaOa9fPeQU1uP/MzT9CQ2+UK6JMQMmUhYCHIG9VkIguLu0p0J4+ULkJKNKfAgo5WDtScgiZt5nc8TqTQhaMZdXmz5n62pi/3cwF8SvEXlX18JoLK8bgMWAqsIZZYd8h9Wls+j9KcAvgK8eRXSj57A0r/yJN0cFHGwO0LNg8+M+eaGkcbjkCd27lux8KOR6uF2i7sQrg/EBVVdj8Iwa66VVQXOoj2b8PGxEQtADvCxNUMpJSYbgFRDB91hWHp0v4i8eSXjvCb8ZlC+AbBx+elpLVFSZHHk70l7wWAfagEQiRK0O7Judf2DgSjf5BCX4dajxiiJnegdfWuny8V0IgqoTmEy8gY1VItJEsPIaSjZgzSGTIIEpHXkCniydcfzcv1ObHxtYx8fF4KWluCtLZcjtIeygGkMkMu2nqKIaXSS5IWGnmjJiuFfjEqY3gEfVXTzz1YvXQzjPb7lz/EpbWZkRIhpctImXQpkDlUIR8fnyH8NQQfn6NBOnUtrnMpg5m99O9/APgCsFoCA4GmgfXtTi1CD6cypMXiD7/9/Be//Tiq7rUOsKduRb9lhD3toKsrnEEcqD1l+fT+LTqUFHIYAxcNRwugS7tXl/ZXgCeALwLzURnL96Bqbvj4DOG7jHx8jpR06lak+xk6twvsHEingHLr5B1hVkmhGRtmXSl7Q7NAjfAHdCdfN7vrKS1k9XKwdonbFYkNoPr5CKNlzBWum0ts+1YAVZNgTCTgCpPu8Jxt0RVXLj6q1+ozpfFnCD4+R86HyfeBrTKE5XDxeVOXFg4m7nC2cAAIOHqQndFzvf01lCqqQakxkBJdWirRTAgQbAMWFrcdswiZQKmOSjS/6IzPpPANgo/PkZBOzUVpBQkYcuOM6KwtrYr+4CErRIrS/UKFLpbtu58qu4+cUUNHeD7N/Vs7gQXlxy9HotFdNevRgWD0C5Oo8uDj4xsEH58j5B7AIFiDNENgZYfqD9siiMChI7JgZH2wyoyQTl3c/hBVVi9COkQKHUQKHVKoNYdDIQXu7U3ZnTc1rXqT7w/2mRS+QfDxOTKWAyA0aFgI1iDPBU+yF3Q8YYTsHvqC09nRdC5IyYy+56nN7kNzLWwtQEfNYrrC89CkMzjT2hc2pE27MY1BPULQ7keTVknhmZGzglK1upIPXOBtxJO/esmv2mdK4i8q+/gcCelUBmjyXkpgrzHTkmB2G/VkjGYQgrmd65jXtQ7dLQwrhmomz824jHmii5p8Br1vF7gWbXWrEU7eWtC11vSOK4aDikoNg4MyAnlU5vRniSe7X/Jr9pmy+DMEH58j442oCmMagAB3lrXXJNfNTMdmZ/UydtScWixWM1Rislh52GXa4HZqgxpa9zaEkwcE8zsf54WmxF5bC87R3YLUlKCdAfQBPcV/twM/IZ70FUZ9jhq+QfDxORJiiVbSqShwDfBtIEhXGgp9aMD8gQN0mo1kzXoi+U5PsRSJQAqNQbMBlx50Jw9oaq1BSrms/cHvoYTv/hOoQ9U5voN40p/S+7xk+C4jH5+jQTqlAVkKgwHZuXXER7vrVrKz4SyWH/g9kXwGirWRC3pIbph1ZS6gG6HT9vwGYWeRCFdIp0fAW4gnJyZR7eNzlPClK3x8jgaxhAv8Abcgy+OJpNCY7+whHK5F04xiOUxJ0BkUZ+76uZZ1tc+sn3FVS0EL3adJ588CPugbA59jgT9D8PE5WqRTEazBH9Kx7S0SVwOEIwyenvd2VmU3oGc2gTtCisgtRg/dQDz5s2PTaB+fYfw1BB+fo0UsMQC8ndaWuIB/cNDFs7PfVLCN8JtwCmGkp0uqdOZKQklfOEYt9vEZgW8QfHyONjNWtm2sWvbzPmleV5c7MEdK+R7HqLpKF/p1YAskUgzr07UQTz55rJvs4wO+y8jH5+ihiuh8RcKHnWxPWO/ZgSp9qbG9Kf7NxbkXHybbcQuunQf+C3iQeNId/6A+Pi8fvkHw8TlapFOfBW6VAAc2gnSGPiroYSd49k3+jNznuMaPMvLxORqkU83ArUOvywZaAbegk06d9jK3ysdnUvgGwcfn6PCtEa9CDSNeykhTAbj0ZWyPj8+k8aewPj5Hh1LdIaibgzTDUOiHYC1aqD4P7D1mrfPxmQD+DMHH5+jwjfI3RLgRUT8PEaoHuAv4xcvcJh+fSeEvKvv4HC3SqV8Ab63wyc+IJa5/uZvj4zNZfIPg43M0SafOAX4IzAIOAEliiT8d20b5+EwM3yD4+Pj4+AD+GoKPj4+PTxHfIPj4+Pj4AL5B8PHx8fEp4hsEHx8fHx/ANwg+Pj4+PkV8g+Dj4+PjA/gGwcfHx8eniG8QfHx8fHwA3yD4+Pj4+BTxDYKPj4+PD+AbBB8fHx+fIr5B8PHx8fEBfIPg4+Pj41PENwg+Pj4+PoBvEHx8fHx8ivgGwcfHx8cH8A2Cj4+Pj08R3yD4+Pj4+ADw/wEks36kvUXwWwAAAABJRU5ErkJggg==", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + } + ], + "source": [ + "figure(figsize=(6, 5), dpi=80)\n", + "\n", + "tsne_pos_lowfeat = np.load('tsne_pos_lowfeat.npy')\n", + "plt.scatter(tsne_pos_lowfeat[:,0], tsne_pos_lowfeat[:,1], c=color_list, s=10, alpha=0.9)\n", + "plt.axis('off')\n", + "plt.savefig('./low.pdf')" + ] + } + ], + "metadata": { + "interpreter": { + "hash": "f1e6686d8157a72333d6c7290e12a7b16aa121841fdb309d8a83e0abefbefaae" + }, + "kernelspec": { + "display_name": "GraphSSL", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.12" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/OpenOOD/tools/report/report_gmm.ipynb b/OpenOOD/tools/report/report_gmm.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..ced812c0258413e63c869f5c74d45ac27f7cd492 --- /dev/null +++ b/OpenOOD/tools/report/report_gmm.ipynb @@ -0,0 +1,300 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
datasetfpr_gmm0fpr_gmm1fpr_gmm2auroc_gmm0auroc_gmm1auroc_gmm2aupr_gmm0aupr_gmm1aupr_gmm2
0notmnist10.3010.5211.6796.4896.2096.5198.3898.1798.44
1fashionmnist68.0564.9566.0478.5378.3081.9093.6193.5594.86
2nearood39.1837.7438.8687.5087.2589.2195.9995.8696.65
3texture90.1491.6391.3871.5065.9173.0295.5694.5795.89
4cifar1090.1990.7591.3967.3560.5468.3391.1189.2691.76
5tin92.4492.8493.3864.9658.2066.2790.4788.6391.18
6places36592.8193.1594.2465.3958.5766.4375.0671.6076.87
7farood91.4092.0992.6067.3060.8168.5188.0586.0188.93
\n", + "
" + ], + "text/plain": [ + " dataset fpr_gmm0 fpr_gmm1 fpr_gmm2 auroc_gmm0 auroc_gmm1 \\\n", + "0 notmnist 10.30 10.52 11.67 96.48 96.20 \n", + "1 fashionmnist 68.05 64.95 66.04 78.53 78.30 \n", + "2 nearood 39.18 37.74 38.86 87.50 87.25 \n", + "3 texture 90.14 91.63 91.38 71.50 65.91 \n", + "4 cifar10 90.19 90.75 91.39 67.35 60.54 \n", + "5 tin 92.44 92.84 93.38 64.96 58.20 \n", + "6 places365 92.81 93.15 94.24 65.39 58.57 \n", + "7 farood 91.40 92.09 92.60 67.30 60.81 \n", + "\n", + " auroc_gmm2 aupr_gmm0 aupr_gmm1 aupr_gmm2 \n", + "0 96.51 98.38 98.17 98.44 \n", + "1 81.90 93.61 93.55 94.86 \n", + "2 89.21 95.99 95.86 96.65 \n", + "3 73.02 95.56 94.57 95.89 \n", + "4 68.33 91.11 89.26 91.76 \n", + "5 66.27 90.47 88.63 91.18 \n", + "6 66.43 75.06 71.60 76.87 \n", + "7 68.51 88.05 86.01 88.93 " + ] + }, + "execution_count": 4, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "import os\n", + "import pandas as pd\n", + "import warnings\n", + "warnings.filterwarnings('ignore')\n", + "\n", + "# create an empty dataframe\n", + "df = pd.DataFrame()\n", + "\n", + "benchmark = 'fsood'\n", + "method_list = ['gmm0', 'gmm1', 'gmm2']\n", + "\n", + "for method in method_list:\n", + " dirname = f'./mnist_0408_3/mnist_lenet_test_ood_{benchmark}_{method}_0408_3_fl'\n", + " filename = os.path.join(dirname, 'ood.csv')\n", + " sub_df = pd.read_csv(filename)\n", + " sub_df['method'] = method\n", + " df = df.append(sub_df)\n", + "\n", + "data = []\n", + "dataset_list = list(df['dataset'][:8])\n", + "for dataset in dataset_list:\n", + " df_sub_dataset = df[df['dataset'] == dataset]\n", + " metric_list = [dataset]\n", + " metric_list.extend(list(df_sub_dataset['FPR@95']))\n", + " metric_list.extend(list(df_sub_dataset['AUROC']))\n", + " metric_list.extend(list(df_sub_dataset['AUPR_IN']))\n", + " data.append(metric_list)\n", + "\n", + "header = ['dataset'] + \\\n", + "['fpr_'+method for method in method_list] + \\\n", + "['auroc_'+method for method in method_list] + \\\n", + "['aupr_'+method for method in method_list]\n", + "\n", + "df_report = pd.DataFrame(data, columns=header)\n", + "\n", + "df_report" + ] + }, + { + "cell_type": "code", + "execution_count": 55, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "notmnist\n", + "& 23.66& 21.00& 32.42& 0.87& 6.16& \\textbf{0.33} \n", + "& 94.82& 95.29& 93.21& 99.84& 98.56& \\textbf{99.89} \n", + "& 90.52& 90.67& 86.82& 99.68& 97.28& \\textbf{99.77} \\\\\n", + "fashionmnist\n", + "& 16.44& 5.96& 8.66& 4.48& 49.54& \\textbf{0.59} \n", + "& 97.30& 98.65& 98.48& 99.15& 89.15& \\textbf{99.68} \n", + "& 97.62& 98.78& 98.50& 99.12& 89.19& \\textbf{99.68} \\\\\n", + "nearood\n", + "& 20.05& 13.48& 20.54& 2.68& 27.85& \\textbf{0.46} \n", + "& 96.06& 96.97& 95.85& 99.49& 93.85& \\textbf{99.78} \n", + "& 94.07& 94.72& 92.66& 99.40& 93.23& \\textbf{99.73} \\\\\n", + "texture\n", + "& 2.43& 0.94& 0.67& 0.23& 90.69& \\textbf{0.02} \n", + "& 99.34& 99.75& 99.81& 99.93& 77.26& \\textbf{99.91} \n", + "& 99.58& 99.84& 99.84& 99.96& 87.56& \\textbf{99.95} \\\\\n", + "cifar10\n", + "& 2.04& 0.72& 1.31& 0.15& 81.71& \\textbf{0.00} \n", + "& 99.31& 99.72& 99.68& 99.90& 79.83& \\textbf{99.90} \n", + "& 99.35& 99.73& 99.67& 99.90& 83.07& \\textbf{99.90} \\\\\n", + "tin\n", + "& 2.08& 0.70& 1.57& 0.48& 82.94& \\textbf{0.00} \n", + "& 99.34& 99.73& 99.67& 99.83& 79.85& \\textbf{99.87} \n", + "& 99.37& 99.73& 99.65& 99.83& 83.36& \\textbf{99.87} \\\\\n", + "places365\n", + "& 3.03& 1.24& 2.74& 0.47& 78.37& \\textbf{0.04} \n", + "& 99.18& 99.63& 99.51& 99.84& 81.65& \\textbf{99.86} \n", + "& 97.89& 98.92& 98.39& 99.50& 69.40& \\textbf{99.61} \\\\\n", + "farood\n", + "& 2.40& 0.90& 1.57& 0.33& 83.43& \\textbf{0.01} \n", + "& 99.29& 99.71& 99.67& 99.88& 79.65& \\textbf{99.88} \n", + "& 99.05& 99.55& 99.38& 99.80& 80.84& \\textbf{99.83} \\\\\n" + ] + } + ], + "source": [ + "for line_array in data:\n", + " values = ['{:.2f}'.format(line_array[i]) for i in range(1, len(line_array))]\n", + " print_line = line_array[0] + '\\n'\n", + " for i, element in enumerate(values):\n", + " if i % 6 == 5:\n", + " print_line = print_line + \"& \\\\textbf{\" + element + \"} \\n\"\n", + " if i == 17:\n", + " print_line = print_line[:-2] + \" \\\\\" + \"\\\\\"\n", + " else:\n", + " print_line = print_line + \"& \" + element\n", + " print(print_line)" + ] + } + ], + "metadata": { + "interpreter": { + "hash": "f1e6686d8157a72333d6c7290e12a7b16aa121841fdb309d8a83e0abefbefaae" + }, + "kernelspec": { + "display_name": "Python 3.8.12 64-bit ('ood': conda)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.12" + }, + "orig_nbformat": 4 + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/OpenOOD/tools/report/report_table.ipynb b/OpenOOD/tools/report/report_table.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..d0258f8c3091e1364972d64e0e4c939d0a35fb2c --- /dev/null +++ b/OpenOOD/tools/report/report_table.ipynb @@ -0,0 +1,390 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
datasetfpr_mspfpr_odinfpr_ebofpr_mdsfpr_gmm1fpr_gmm4auroc_mspauroc_odinauroc_eboauroc_mdsauroc_gmm1auroc_gmm4aupr_mspaupr_odinaupr_eboaupr_mdsaupr_gmm1aupr_gmm4
0notmnist43.0937.7044.061.772.640.7888.7789.8588.4499.6799.5099.7975.7277.8375.9799.3699.0999.57
1fashionmnist24.3612.9513.375.7926.200.6194.6296.5596.9598.8895.8699.8093.7295.8696.3498.8596.2899.79
2nearood33.7325.3328.713.7814.420.6991.6993.2092.6999.2897.6899.8084.7286.8486.1599.1097.6899.68
3texture2.541.081.050.2740.090.0099.4499.7099.7299.9095.0299.9499.6499.7799.7699.9497.6399.97
4cifar107.053.063.180.1854.430.0098.6899.3199.3099.8894.1999.9798.7299.2799.1299.8895.8699.97
5tin6.282.933.130.5559.520.0098.7899.3699.3799.7993.7099.9698.7899.3399.2599.7995.5499.96
6places3659.924.594.120.4558.070.0098.1999.0699.1799.8193.8299.9694.8797.0196.8499.4291.3299.88
7farood6.452.922.870.3653.030.0098.7799.3699.3999.8494.1899.9698.0098.8498.7499.7695.0999.94
\n", + "
" + ], + "text/plain": [ + " dataset fpr_msp fpr_odin fpr_ebo fpr_mds fpr_gmm1 fpr_gmm4 \\\n", + "0 notmnist 43.09 37.70 44.06 1.77 2.64 0.78 \n", + "1 fashionmnist 24.36 12.95 13.37 5.79 26.20 0.61 \n", + "2 nearood 33.73 25.33 28.71 3.78 14.42 0.69 \n", + "3 texture 2.54 1.08 1.05 0.27 40.09 0.00 \n", + "4 cifar10 7.05 3.06 3.18 0.18 54.43 0.00 \n", + "5 tin 6.28 2.93 3.13 0.55 59.52 0.00 \n", + "6 places365 9.92 4.59 4.12 0.45 58.07 0.00 \n", + "7 farood 6.45 2.92 2.87 0.36 53.03 0.00 \n", + "\n", + " auroc_msp auroc_odin auroc_ebo auroc_mds auroc_gmm1 auroc_gmm4 \\\n", + "0 88.77 89.85 88.44 99.67 99.50 99.79 \n", + "1 94.62 96.55 96.95 98.88 95.86 99.80 \n", + "2 91.69 93.20 92.69 99.28 97.68 99.80 \n", + "3 99.44 99.70 99.72 99.90 95.02 99.94 \n", + "4 98.68 99.31 99.30 99.88 94.19 99.97 \n", + "5 98.78 99.36 99.37 99.79 93.70 99.96 \n", + "6 98.19 99.06 99.17 99.81 93.82 99.96 \n", + "7 98.77 99.36 99.39 99.84 94.18 99.96 \n", + "\n", + " aupr_msp aupr_odin aupr_ebo aupr_mds aupr_gmm1 aupr_gmm4 \n", + "0 75.72 77.83 75.97 99.36 99.09 99.57 \n", + "1 93.72 95.86 96.34 98.85 96.28 99.79 \n", + "2 84.72 86.84 86.15 99.10 97.68 99.68 \n", + "3 99.64 99.77 99.76 99.94 97.63 99.97 \n", + "4 98.72 99.27 99.12 99.88 95.86 99.97 \n", + "5 98.78 99.33 99.25 99.79 95.54 99.96 \n", + "6 94.87 97.01 96.84 99.42 91.32 99.88 \n", + "7 98.00 98.84 98.74 99.76 95.09 99.94 " + ] + }, + "execution_count": 5, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "import os\n", + "import pandas as pd\n", + "import warnings\n", + "warnings.filterwarnings('ignore')\n", + "\n", + "# create an empty dataframe\n", + "df = pd.DataFrame()\n", + "\n", + "benchmark = 'ood'\n", + "method_list = ['msp', 'odin', 'ebo', 'mds', 'gmm1', 'gmm4']\n", + "\n", + "for method in method_list:\n", + " dirname = f'./mnist_0408_3/mnist_lenet_test_ood_{benchmark}_{method}_0408_3'\n", + " filename = os.path.join(dirname, 'ood.csv')\n", + " sub_df = pd.read_csv(filename)\n", + " sub_df['method'] = method\n", + " df = df.append(sub_df)\n", + "\n", + "data = []\n", + "dataset_list = list(df['dataset'][:8])\n", + "for dataset in dataset_list:\n", + " df_sub_dataset = df[df['dataset'] == dataset]\n", + " metric_list = [dataset]\n", + " metric_list.extend(list(df_sub_dataset['FPR@95']))\n", + " metric_list.extend(list(df_sub_dataset['AUROC']))\n", + " metric_list.extend(list(df_sub_dataset['AUPR_IN']))\n", + " data.append(metric_list)\n", + "\n", + "header = ['dataset'] + \\\n", + "['fpr_'+method for method in method_list] + \\\n", + "['auroc_'+method for method in method_list] + \\\n", + "['aupr_'+method for method in method_list]\n", + "df_report = pd.DataFrame(data, columns=header)\n", + "\n", + "df_report" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "notmnist\n", + "& 43.09& 37.70& 44.06& 1.77& 2.64& \\textbf{0.78} \n", + "& 88.77& 89.85& 88.44& 99.67& 99.50& \\textbf{99.79} \n", + "& 75.72& 77.83& 75.97& 99.36& 99.09& \\textbf{99.57} \\\\\n", + "fashionmnist\n", + "& 24.36& 12.95& 13.37& 5.79& 26.20& \\textbf{0.61} \n", + "& 94.62& 96.55& 96.95& 98.88& 95.86& \\textbf{99.80} \n", + "& 93.72& 95.86& 96.34& 98.85& 96.28& \\textbf{99.79} \\\\\n", + "nearood\n", + "& 33.73& 25.33& 28.71& 3.78& 14.42& \\textbf{0.69} \n", + "& 91.69& 93.20& 92.69& 99.28& 97.68& \\textbf{99.80} \n", + "& 84.72& 86.84& 86.15& 99.10& 97.68& \\textbf{99.68} \\\\\n", + "texture\n", + "& 2.54& 1.08& 1.05& 0.27& 40.09& \\textbf{0.00} \n", + "& 99.44& 99.70& 99.72& 99.90& 95.02& \\textbf{99.94} \n", + "& 99.64& 99.77& 99.76& 99.94& 97.63& \\textbf{99.97} \\\\\n", + "cifar10\n", + "& 7.05& 3.06& 3.18& 0.18& 54.43& \\textbf{0.00} \n", + "& 98.68& 99.31& 99.30& 99.88& 94.19& \\textbf{99.97} \n", + "& 98.72& 99.27& 99.12& 99.88& 95.86& \\textbf{99.97} \\\\\n", + "tin\n", + "& 6.28& 2.93& 3.13& 0.55& 59.52& \\textbf{0.00} \n", + "& 98.78& 99.36& 99.37& 99.79& 93.70& \\textbf{99.96} \n", + "& 98.78& 99.33& 99.25& 99.79& 95.54& \\textbf{99.96} \\\\\n", + "places365\n", + "& 9.92& 4.59& 4.12& 0.45& 58.07& \\textbf{0.00} \n", + "& 98.19& 99.06& 99.17& 99.81& 93.82& \\textbf{99.96} \n", + "& 94.87& 97.01& 96.84& 99.42& 91.32& \\textbf{99.88} \\\\\n", + "farood\n", + "& 6.45& 2.92& 2.87& 0.36& 53.03& \\textbf{0.00} \n", + "& 98.77& 99.36& 99.39& 99.84& 94.18& \\textbf{99.96} \n", + "& 98.00& 98.84& 98.74& 99.76& 95.09& \\textbf{99.94} \\\\\n" + ] + } + ], + "source": [ + "for line_array in data:\n", + " values = ['{:.2f}'.format(line_array[i]) for i in range(1, len(line_array))]\n", + " print_line = line_array[0] + '\\n'\n", + " for i, element in enumerate(values):\n", + " if i % 6 == 5:\n", + " print_line = print_line + \"& \\\\textbf{\" + element + \"} \\n\"\n", + " if i == 17:\n", + " print_line = print_line[:-2] + \" \\\\\" + \"\\\\\"\n", + " else:\n", + " print_line = print_line + \"& \" + element\n", + " print(print_line)" + ] + } + ], + "metadata": { + "interpreter": { + "hash": "f1e6686d8157a72333d6c7290e12a7b16aa121841fdb309d8a83e0abefbefaae" + }, + "kernelspec": { + "display_name": "Python 3.8.12 64-bit ('ood': conda)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.12" + }, + "orig_nbformat": 4 + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/OpenOOD/tools/sweep/hyperparam.py b/OpenOOD/tools/sweep/hyperparam.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391