Upload folder using huggingface_hub
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- .gitattributes +65 -0
- Abnormal-CT-Generation-Healthy/LICENSE +674 -0
- Abnormal-CT-Generation-Healthy/LeanVAE/__init__.py +4 -0
- Abnormal-CT-Generation-Healthy/LeanVAE/__pycache__/__init__.cpython-310.pyc +0 -0
- Abnormal-CT-Generation-Healthy/LeanVAE/__pycache__/__init__.cpython-311.pyc +0 -0
- Abnormal-CT-Generation-Healthy/LeanVAE/__pycache__/__init__.cpython-39.pyc +0 -0
- Abnormal-CT-Generation-Healthy/LeanVAE/__pycache__/data.cpython-310.pyc +0 -0
- Abnormal-CT-Generation-Healthy/LeanVAE/__pycache__/data.cpython-39.pyc +0 -0
- Abnormal-CT-Generation-Healthy/LeanVAE/ckpts/LeanVAE-dim16.ckpt +3 -0
- Abnormal-CT-Generation-Healthy/LeanVAE/data.py +466 -0
- Abnormal-CT-Generation-Healthy/LeanVAE/models/__pycache__/autoencoder.cpython-310.pyc +0 -0
- Abnormal-CT-Generation-Healthy/LeanVAE/models/__pycache__/autoencoder.cpython-311.pyc +0 -0
- Abnormal-CT-Generation-Healthy/LeanVAE/models/__pycache__/autoencoder.cpython-39.pyc +0 -0
- Abnormal-CT-Generation-Healthy/LeanVAE/models/__pycache__/autoencoder_pl.cpython-310.pyc +0 -0
- Abnormal-CT-Generation-Healthy/LeanVAE/models/__pycache__/autoencoder_pl.cpython-311.pyc +0 -0
- Abnormal-CT-Generation-Healthy/LeanVAE/models/__pycache__/autoencoder_pl.cpython-39.pyc +0 -0
- Abnormal-CT-Generation-Healthy/LeanVAE/models/autoencoder.py +186 -0
- Abnormal-CT-Generation-Healthy/LeanVAE/models/autoencoder_pl.py +232 -0
- Abnormal-CT-Generation-Healthy/LeanVAE/modules/__init__.py +3 -0
- Abnormal-CT-Generation-Healthy/LeanVAE/modules/__pycache__/__init__.cpython-310.pyc +0 -0
- Abnormal-CT-Generation-Healthy/LeanVAE/modules/__pycache__/__init__.cpython-311.pyc +0 -0
- Abnormal-CT-Generation-Healthy/LeanVAE/modules/__pycache__/__init__.cpython-39.pyc +0 -0
- Abnormal-CT-Generation-Healthy/LeanVAE/modules/__pycache__/backbones.cpython-310.pyc +0 -0
- Abnormal-CT-Generation-Healthy/LeanVAE/modules/__pycache__/backbones.cpython-311.pyc +0 -0
- Abnormal-CT-Generation-Healthy/LeanVAE/modules/__pycache__/backbones.cpython-39.pyc +0 -0
- Abnormal-CT-Generation-Healthy/LeanVAE/modules/__pycache__/discriminator.cpython-310.pyc +0 -0
- Abnormal-CT-Generation-Healthy/LeanVAE/modules/__pycache__/discriminator.cpython-311.pyc +0 -0
- Abnormal-CT-Generation-Healthy/LeanVAE/modules/__pycache__/discriminator.cpython-39.pyc +0 -0
- Abnormal-CT-Generation-Healthy/LeanVAE/modules/__pycache__/lpips.cpython-310.pyc +0 -0
- Abnormal-CT-Generation-Healthy/LeanVAE/modules/__pycache__/lpips.cpython-311.pyc +0 -0
- Abnormal-CT-Generation-Healthy/LeanVAE/modules/__pycache__/lpips.cpython-39.pyc +0 -0
- Abnormal-CT-Generation-Healthy/LeanVAE/modules/__pycache__/vae.cpython-310.pyc +0 -0
- Abnormal-CT-Generation-Healthy/LeanVAE/modules/__pycache__/vae.cpython-311.pyc +0 -0
- Abnormal-CT-Generation-Healthy/LeanVAE/modules/__pycache__/vae.cpython-39.pyc +0 -0
- Abnormal-CT-Generation-Healthy/LeanVAE/modules/backbones.py +402 -0
- Abnormal-CT-Generation-Healthy/LeanVAE/modules/cache/vgg.pth +3 -0
- Abnormal-CT-Generation-Healthy/LeanVAE/modules/discriminator.py +130 -0
- Abnormal-CT-Generation-Healthy/LeanVAE/modules/lpips.py +230 -0
- Abnormal-CT-Generation-Healthy/LeanVAE/modules/vae.py +73 -0
- Abnormal-CT-Generation-Healthy/LeanVAE/utils/__init__.py +0 -0
- Abnormal-CT-Generation-Healthy/LeanVAE/utils/__pycache__/__init__.cpython-310.pyc +0 -0
- Abnormal-CT-Generation-Healthy/LeanVAE/utils/__pycache__/__init__.cpython-311.pyc +0 -0
- Abnormal-CT-Generation-Healthy/LeanVAE/utils/__pycache__/__init__.cpython-39.pyc +0 -0
- Abnormal-CT-Generation-Healthy/LeanVAE/utils/__pycache__/gan_loss.cpython-310.pyc +0 -0
- Abnormal-CT-Generation-Healthy/LeanVAE/utils/__pycache__/gan_loss.cpython-311.pyc +0 -0
- Abnormal-CT-Generation-Healthy/LeanVAE/utils/__pycache__/gan_loss.cpython-39.pyc +0 -0
- Abnormal-CT-Generation-Healthy/LeanVAE/utils/__pycache__/patcher_utils.cpython-310.pyc +0 -0
- Abnormal-CT-Generation-Healthy/LeanVAE/utils/__pycache__/patcher_utils.cpython-311.pyc +0 -0
- Abnormal-CT-Generation-Healthy/LeanVAE/utils/__pycache__/patcher_utils.cpython-39.pyc +0 -0
- Abnormal-CT-Generation-Healthy/LeanVAE/utils/__pycache__/video_utils.cpython-310.pyc +0 -0
.gitattributes
CHANGED
|
@@ -7902,3 +7902,68 @@ UMM/unsloth/images/unsloth[[:space:]]loading[[:space:]]page[[:space:]]render.png
|
|
| 7902 |
UMM/unsloth/images/unsloth[[:space:]]logo[[:space:]]black[[:space:]]text.png filter=lfs diff=lfs merge=lfs -text
|
| 7903 |
UMM/unsloth/images/unsloth[[:space:]]logo[[:space:]]white[[:space:]]text.png filter=lfs diff=lfs merge=lfs -text
|
| 7904 |
UMM/unsloth/images/unsloth[[:space:]]sticker.png filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 7902 |
UMM/unsloth/images/unsloth[[:space:]]logo[[:space:]]black[[:space:]]text.png filter=lfs diff=lfs merge=lfs -text
|
| 7903 |
UMM/unsloth/images/unsloth[[:space:]]logo[[:space:]]white[[:space:]]text.png filter=lfs diff=lfs merge=lfs -text
|
| 7904 |
UMM/unsloth/images/unsloth[[:space:]]sticker.png filter=lfs diff=lfs merge=lfs -text
|
| 7905 |
+
Abnormal-CT-Generation-Healthy/logs/full_ct_2d_with_body_mask/inference/valid_5_a_2_sample_0.png filter=lfs diff=lfs merge=lfs -text
|
| 7906 |
+
Abnormal-CT-Generation-Healthy/logs/full_ct_2d_with_body_mask/inference/valid_5_a_2_sample_1.png filter=lfs diff=lfs merge=lfs -text
|
| 7907 |
+
Abnormal-CT-Generation-Healthy/logs/full_ct_2d_with_body_mask/inference/valid_5_a_2_sample_10.png filter=lfs diff=lfs merge=lfs -text
|
| 7908 |
+
Abnormal-CT-Generation-Healthy/logs/full_ct_2d_with_body_mask/inference/valid_5_a_2_sample_11.png filter=lfs diff=lfs merge=lfs -text
|
| 7909 |
+
Abnormal-CT-Generation-Healthy/logs/full_ct_2d_with_body_mask/inference/valid_5_a_2_sample_12.png filter=lfs diff=lfs merge=lfs -text
|
| 7910 |
+
Abnormal-CT-Generation-Healthy/logs/full_ct_2d_with_body_mask/inference/valid_5_a_2_sample_13.png filter=lfs diff=lfs merge=lfs -text
|
| 7911 |
+
Abnormal-CT-Generation-Healthy/logs/full_ct_2d_with_body_mask/inference/valid_5_a_2_sample_14.png filter=lfs diff=lfs merge=lfs -text
|
| 7912 |
+
Abnormal-CT-Generation-Healthy/logs/full_ct_2d_with_body_mask/inference/valid_5_a_2_sample_15.png filter=lfs diff=lfs merge=lfs -text
|
| 7913 |
+
Abnormal-CT-Generation-Healthy/logs/full_ct_2d_with_body_mask/inference/valid_5_a_2_sample_16.png filter=lfs diff=lfs merge=lfs -text
|
| 7914 |
+
Abnormal-CT-Generation-Healthy/logs/full_ct_2d_with_body_mask/inference/valid_5_a_2_sample_17.png filter=lfs diff=lfs merge=lfs -text
|
| 7915 |
+
Abnormal-CT-Generation-Healthy/logs/full_ct_2d_with_body_mask/inference/valid_5_a_2_sample_18.png filter=lfs diff=lfs merge=lfs -text
|
| 7916 |
+
Abnormal-CT-Generation-Healthy/logs/full_ct_2d_with_body_mask/inference/valid_5_a_2_sample_19.png filter=lfs diff=lfs merge=lfs -text
|
| 7917 |
+
Abnormal-CT-Generation-Healthy/logs/full_ct_2d_with_body_mask/inference/valid_5_a_2_sample_2.png filter=lfs diff=lfs merge=lfs -text
|
| 7918 |
+
Abnormal-CT-Generation-Healthy/logs/full_ct_2d_with_body_mask/inference/valid_5_a_2_sample_20.png filter=lfs diff=lfs merge=lfs -text
|
| 7919 |
+
Abnormal-CT-Generation-Healthy/logs/full_ct_2d_with_body_mask/inference/valid_5_a_2_sample_21.png filter=lfs diff=lfs merge=lfs -text
|
| 7920 |
+
Abnormal-CT-Generation-Healthy/logs/full_ct_2d_with_body_mask/inference/valid_5_a_2_sample_22.png filter=lfs diff=lfs merge=lfs -text
|
| 7921 |
+
Abnormal-CT-Generation-Healthy/logs/full_ct_2d_with_body_mask/inference/valid_5_a_2_sample_23.png filter=lfs diff=lfs merge=lfs -text
|
| 7922 |
+
Abnormal-CT-Generation-Healthy/logs/full_ct_2d_with_body_mask/inference/valid_5_a_2_sample_24.png filter=lfs diff=lfs merge=lfs -text
|
| 7923 |
+
Abnormal-CT-Generation-Healthy/logs/full_ct_2d_with_body_mask/inference/valid_5_a_2_sample_25.png filter=lfs diff=lfs merge=lfs -text
|
| 7924 |
+
Abnormal-CT-Generation-Healthy/logs/full_ct_2d_with_body_mask/inference/valid_5_a_2_sample_26.png filter=lfs diff=lfs merge=lfs -text
|
| 7925 |
+
Abnormal-CT-Generation-Healthy/logs/full_ct_2d_with_body_mask/inference/valid_5_a_2_sample_27.png filter=lfs diff=lfs merge=lfs -text
|
| 7926 |
+
Abnormal-CT-Generation-Healthy/logs/full_ct_2d_with_body_mask/inference/valid_5_a_2_sample_28.png filter=lfs diff=lfs merge=lfs -text
|
| 7927 |
+
Abnormal-CT-Generation-Healthy/logs/full_ct_2d_with_body_mask/inference/valid_5_a_2_sample_29.png filter=lfs diff=lfs merge=lfs -text
|
| 7928 |
+
Abnormal-CT-Generation-Healthy/logs/full_ct_2d_with_body_mask/inference/valid_5_a_2_sample_3.png filter=lfs diff=lfs merge=lfs -text
|
| 7929 |
+
Abnormal-CT-Generation-Healthy/logs/full_ct_2d_with_body_mask/inference/valid_5_a_2_sample_30.png filter=lfs diff=lfs merge=lfs -text
|
| 7930 |
+
Abnormal-CT-Generation-Healthy/logs/full_ct_2d_with_body_mask/inference/valid_5_a_2_sample_31.png filter=lfs diff=lfs merge=lfs -text
|
| 7931 |
+
Abnormal-CT-Generation-Healthy/logs/full_ct_2d_with_body_mask/inference/valid_5_a_2_sample_32.png filter=lfs diff=lfs merge=lfs -text
|
| 7932 |
+
Abnormal-CT-Generation-Healthy/logs/full_ct_2d_with_body_mask/inference/valid_5_a_2_sample_33.png filter=lfs diff=lfs merge=lfs -text
|
| 7933 |
+
Abnormal-CT-Generation-Healthy/logs/full_ct_2d_with_body_mask/inference/valid_5_a_2_sample_34.png filter=lfs diff=lfs merge=lfs -text
|
| 7934 |
+
Abnormal-CT-Generation-Healthy/logs/full_ct_2d_with_body_mask/inference/valid_5_a_2_sample_35.png filter=lfs diff=lfs merge=lfs -text
|
| 7935 |
+
Abnormal-CT-Generation-Healthy/logs/full_ct_2d_with_body_mask/inference/valid_5_a_2_sample_36.png filter=lfs diff=lfs merge=lfs -text
|
| 7936 |
+
Abnormal-CT-Generation-Healthy/logs/full_ct_2d_with_body_mask/inference/valid_5_a_2_sample_37.png filter=lfs diff=lfs merge=lfs -text
|
| 7937 |
+
Abnormal-CT-Generation-Healthy/logs/full_ct_2d_with_body_mask/inference/valid_5_a_2_sample_38.png filter=lfs diff=lfs merge=lfs -text
|
| 7938 |
+
Abnormal-CT-Generation-Healthy/logs/full_ct_2d_with_body_mask/inference/valid_5_a_2_sample_39.png filter=lfs diff=lfs merge=lfs -text
|
| 7939 |
+
Abnormal-CT-Generation-Healthy/logs/full_ct_2d_with_body_mask/inference/valid_5_a_2_sample_4.png filter=lfs diff=lfs merge=lfs -text
|
| 7940 |
+
Abnormal-CT-Generation-Healthy/logs/full_ct_2d_with_body_mask/inference/valid_5_a_2_sample_40.png filter=lfs diff=lfs merge=lfs -text
|
| 7941 |
+
Abnormal-CT-Generation-Healthy/logs/full_ct_2d_with_body_mask/inference/valid_5_a_2_sample_41.png filter=lfs diff=lfs merge=lfs -text
|
| 7942 |
+
Abnormal-CT-Generation-Healthy/logs/full_ct_2d_with_body_mask/inference/valid_5_a_2_sample_42.png filter=lfs diff=lfs merge=lfs -text
|
| 7943 |
+
Abnormal-CT-Generation-Healthy/logs/full_ct_2d_with_body_mask/inference/valid_5_a_2_sample_43.png filter=lfs diff=lfs merge=lfs -text
|
| 7944 |
+
Abnormal-CT-Generation-Healthy/logs/full_ct_2d_with_body_mask/inference/valid_5_a_2_sample_44.png filter=lfs diff=lfs merge=lfs -text
|
| 7945 |
+
Abnormal-CT-Generation-Healthy/logs/full_ct_2d_with_body_mask/inference/valid_5_a_2_sample_45.png filter=lfs diff=lfs merge=lfs -text
|
| 7946 |
+
Abnormal-CT-Generation-Healthy/logs/full_ct_2d_with_body_mask/inference/valid_5_a_2_sample_46.png filter=lfs diff=lfs merge=lfs -text
|
| 7947 |
+
Abnormal-CT-Generation-Healthy/logs/full_ct_2d_with_body_mask/inference/valid_5_a_2_sample_47.png filter=lfs diff=lfs merge=lfs -text
|
| 7948 |
+
Abnormal-CT-Generation-Healthy/logs/full_ct_2d_with_body_mask/inference/valid_5_a_2_sample_48.png filter=lfs diff=lfs merge=lfs -text
|
| 7949 |
+
Abnormal-CT-Generation-Healthy/logs/full_ct_2d_with_body_mask/inference/valid_5_a_2_sample_49.png filter=lfs diff=lfs merge=lfs -text
|
| 7950 |
+
Abnormal-CT-Generation-Healthy/logs/full_ct_2d_with_body_mask/inference/valid_5_a_2_sample_5.png filter=lfs diff=lfs merge=lfs -text
|
| 7951 |
+
Abnormal-CT-Generation-Healthy/logs/full_ct_2d_with_body_mask/inference/valid_5_a_2_sample_50.png filter=lfs diff=lfs merge=lfs -text
|
| 7952 |
+
Abnormal-CT-Generation-Healthy/logs/full_ct_2d_with_body_mask/inference/valid_5_a_2_sample_51.png filter=lfs diff=lfs merge=lfs -text
|
| 7953 |
+
Abnormal-CT-Generation-Healthy/logs/full_ct_2d_with_body_mask/inference/valid_5_a_2_sample_52.png filter=lfs diff=lfs merge=lfs -text
|
| 7954 |
+
Abnormal-CT-Generation-Healthy/logs/full_ct_2d_with_body_mask/inference/valid_5_a_2_sample_53.png filter=lfs diff=lfs merge=lfs -text
|
| 7955 |
+
Abnormal-CT-Generation-Healthy/logs/full_ct_2d_with_body_mask/inference/valid_5_a_2_sample_54.png filter=lfs diff=lfs merge=lfs -text
|
| 7956 |
+
Abnormal-CT-Generation-Healthy/logs/full_ct_2d_with_body_mask/inference/valid_5_a_2_sample_55.png filter=lfs diff=lfs merge=lfs -text
|
| 7957 |
+
Abnormal-CT-Generation-Healthy/logs/full_ct_2d_with_body_mask/inference/valid_5_a_2_sample_56.png filter=lfs diff=lfs merge=lfs -text
|
| 7958 |
+
Abnormal-CT-Generation-Healthy/logs/full_ct_2d_with_body_mask/inference/valid_5_a_2_sample_57.png filter=lfs diff=lfs merge=lfs -text
|
| 7959 |
+
Abnormal-CT-Generation-Healthy/logs/full_ct_2d_with_body_mask/inference/valid_5_a_2_sample_58.png filter=lfs diff=lfs merge=lfs -text
|
| 7960 |
+
Abnormal-CT-Generation-Healthy/logs/full_ct_2d_with_body_mask/inference/valid_5_a_2_sample_59.png filter=lfs diff=lfs merge=lfs -text
|
| 7961 |
+
Abnormal-CT-Generation-Healthy/logs/full_ct_2d_with_body_mask/inference/valid_5_a_2_sample_6.png filter=lfs diff=lfs merge=lfs -text
|
| 7962 |
+
Abnormal-CT-Generation-Healthy/logs/full_ct_2d_with_body_mask/inference/valid_5_a_2_sample_60.png filter=lfs diff=lfs merge=lfs -text
|
| 7963 |
+
Abnormal-CT-Generation-Healthy/logs/full_ct_2d_with_body_mask/inference/valid_5_a_2_sample_61.png filter=lfs diff=lfs merge=lfs -text
|
| 7964 |
+
Abnormal-CT-Generation-Healthy/logs/full_ct_2d_with_body_mask/inference/valid_5_a_2_sample_62.png filter=lfs diff=lfs merge=lfs -text
|
| 7965 |
+
Abnormal-CT-Generation-Healthy/logs/full_ct_2d_with_body_mask/inference/valid_5_a_2_sample_63.png filter=lfs diff=lfs merge=lfs -text
|
| 7966 |
+
Abnormal-CT-Generation-Healthy/logs/full_ct_2d_with_body_mask/inference/valid_5_a_2_sample_64.png filter=lfs diff=lfs merge=lfs -text
|
| 7967 |
+
Abnormal-CT-Generation-Healthy/logs/full_ct_2d_with_body_mask/inference/valid_5_a_2_sample_7.png filter=lfs diff=lfs merge=lfs -text
|
| 7968 |
+
Abnormal-CT-Generation-Healthy/logs/full_ct_2d_with_body_mask/inference/valid_5_a_2_sample_8.png filter=lfs diff=lfs merge=lfs -text
|
| 7969 |
+
Abnormal-CT-Generation-Healthy/logs/full_ct_2d_with_body_mask/inference/valid_5_a_2_sample_9.png filter=lfs diff=lfs merge=lfs -text
|
Abnormal-CT-Generation-Healthy/LICENSE
ADDED
|
@@ -0,0 +1,674 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
GNU GENERAL PUBLIC LICENSE
|
| 2 |
+
Version 3, 29 June 2007
|
| 3 |
+
|
| 4 |
+
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
|
| 5 |
+
Everyone is permitted to copy and distribute verbatim copies
|
| 6 |
+
of this license document, but changing it is not allowed.
|
| 7 |
+
|
| 8 |
+
Preamble
|
| 9 |
+
|
| 10 |
+
The GNU General Public License is a free, copyleft license for
|
| 11 |
+
software and other kinds of works.
|
| 12 |
+
|
| 13 |
+
The licenses for most software and other practical works are designed
|
| 14 |
+
to take away your freedom to share and change the works. By contrast,
|
| 15 |
+
the GNU General Public License is intended to guarantee your freedom to
|
| 16 |
+
share and change all versions of a program--to make sure it remains free
|
| 17 |
+
software for all its users. We, the Free Software Foundation, use the
|
| 18 |
+
GNU General Public License for most of our software; it applies also to
|
| 19 |
+
any other work released this way by its authors. You can apply it to
|
| 20 |
+
your programs, too.
|
| 21 |
+
|
| 22 |
+
When we speak of free software, we are referring to freedom, not
|
| 23 |
+
price. Our General Public Licenses are designed to make sure that you
|
| 24 |
+
have the freedom to distribute copies of free software (and charge for
|
| 25 |
+
them if you wish), that you receive source code or can get it if you
|
| 26 |
+
want it, that you can change the software or use pieces of it in new
|
| 27 |
+
free programs, and that you know you can do these things.
|
| 28 |
+
|
| 29 |
+
To protect your rights, we need to prevent others from denying you
|
| 30 |
+
these rights or asking you to surrender the rights. Therefore, you have
|
| 31 |
+
certain responsibilities if you distribute copies of the software, or if
|
| 32 |
+
you modify it: responsibilities to respect the freedom of others.
|
| 33 |
+
|
| 34 |
+
For example, if you distribute copies of such a program, whether
|
| 35 |
+
gratis or for a fee, you must pass on to the recipients the same
|
| 36 |
+
freedoms that you received. You must make sure that they, too, receive
|
| 37 |
+
or can get the source code. And you must show them these terms so they
|
| 38 |
+
know their rights.
|
| 39 |
+
|
| 40 |
+
Developers that use the GNU GPL protect your rights with two steps:
|
| 41 |
+
(1) assert copyright on the software, and (2) offer you this License
|
| 42 |
+
giving you legal permission to copy, distribute and/or modify it.
|
| 43 |
+
|
| 44 |
+
For the developers' and authors' protection, the GPL clearly explains
|
| 45 |
+
that there is no warranty for this free software. For both users' and
|
| 46 |
+
authors' sake, the GPL requires that modified versions be marked as
|
| 47 |
+
changed, so that their problems will not be attributed erroneously to
|
| 48 |
+
authors of previous versions.
|
| 49 |
+
|
| 50 |
+
Some devices are designed to deny users access to install or run
|
| 51 |
+
modified versions of the software inside them, although the manufacturer
|
| 52 |
+
can do so. This is fundamentally incompatible with the aim of
|
| 53 |
+
protecting users' freedom to change the software. The systematic
|
| 54 |
+
pattern of such abuse occurs in the area of products for individuals to
|
| 55 |
+
use, which is precisely where it is most unacceptable. Therefore, we
|
| 56 |
+
have designed this version of the GPL to prohibit the practice for those
|
| 57 |
+
products. If such problems arise substantially in other domains, we
|
| 58 |
+
stand ready to extend this provision to those domains in future versions
|
| 59 |
+
of the GPL, as needed to protect the freedom of users.
|
| 60 |
+
|
| 61 |
+
Finally, every program is threatened constantly by software patents.
|
| 62 |
+
States should not allow patents to restrict development and use of
|
| 63 |
+
software on general-purpose computers, but in those that do, we wish to
|
| 64 |
+
avoid the special danger that patents applied to a free program could
|
| 65 |
+
make it effectively proprietary. To prevent this, the GPL assures that
|
| 66 |
+
patents cannot be used to render the program non-free.
|
| 67 |
+
|
| 68 |
+
The precise terms and conditions for copying, distribution and
|
| 69 |
+
modification follow.
|
| 70 |
+
|
| 71 |
+
TERMS AND CONDITIONS
|
| 72 |
+
|
| 73 |
+
0. Definitions.
|
| 74 |
+
|
| 75 |
+
"This License" refers to version 3 of the GNU General Public License.
|
| 76 |
+
|
| 77 |
+
"Copyright" also means copyright-like laws that apply to other kinds of
|
| 78 |
+
works, such as semiconductor masks.
|
| 79 |
+
|
| 80 |
+
"The Program" refers to any copyrightable work licensed under this
|
| 81 |
+
License. Each licensee is addressed as "you". "Licensees" and
|
| 82 |
+
"recipients" may be individuals or organizations.
|
| 83 |
+
|
| 84 |
+
To "modify" a work means to copy from or adapt all or part of the work
|
| 85 |
+
in a fashion requiring copyright permission, other than the making of an
|
| 86 |
+
exact copy. The resulting work is called a "modified version" of the
|
| 87 |
+
earlier work or a work "based on" the earlier work.
|
| 88 |
+
|
| 89 |
+
A "covered work" means either the unmodified Program or a work based
|
| 90 |
+
on the Program.
|
| 91 |
+
|
| 92 |
+
To "propagate" a work means to do anything with it that, without
|
| 93 |
+
permission, would make you directly or secondarily liable for
|
| 94 |
+
infringement under applicable copyright law, except executing it on a
|
| 95 |
+
computer or modifying a private copy. Propagation includes copying,
|
| 96 |
+
distribution (with or without modification), making available to the
|
| 97 |
+
public, and in some countries other activities as well.
|
| 98 |
+
|
| 99 |
+
To "convey" a work means any kind of propagation that enables other
|
| 100 |
+
parties to make or receive copies. Mere interaction with a user through
|
| 101 |
+
a computer network, with no transfer of a copy, is not conveying.
|
| 102 |
+
|
| 103 |
+
An interactive user interface displays "Appropriate Legal Notices"
|
| 104 |
+
to the extent that it includes a convenient and prominently visible
|
| 105 |
+
feature that (1) displays an appropriate copyright notice, and (2)
|
| 106 |
+
tells the user that there is no warranty for the work (except to the
|
| 107 |
+
extent that warranties are provided), that licensees may convey the
|
| 108 |
+
work under this License, and how to view a copy of this License. If
|
| 109 |
+
the interface presents a list of user commands or options, such as a
|
| 110 |
+
menu, a prominent item in the list meets this criterion.
|
| 111 |
+
|
| 112 |
+
1. Source Code.
|
| 113 |
+
|
| 114 |
+
The "source code" for a work means the preferred form of the work
|
| 115 |
+
for making modifications to it. "Object code" means any non-source
|
| 116 |
+
form of a work.
|
| 117 |
+
|
| 118 |
+
A "Standard Interface" means an interface that either is an official
|
| 119 |
+
standard defined by a recognized standards body, or, in the case of
|
| 120 |
+
interfaces specified for a particular programming language, one that
|
| 121 |
+
is widely used among developers working in that language.
|
| 122 |
+
|
| 123 |
+
The "System Libraries" of an executable work include anything, other
|
| 124 |
+
than the work as a whole, that (a) is included in the normal form of
|
| 125 |
+
packaging a Major Component, but which is not part of that Major
|
| 126 |
+
Component, and (b) serves only to enable use of the work with that
|
| 127 |
+
Major Component, or to implement a Standard Interface for which an
|
| 128 |
+
implementation is available to the public in source code form. A
|
| 129 |
+
"Major Component", in this context, means a major essential component
|
| 130 |
+
(kernel, window system, and so on) of the specific operating system
|
| 131 |
+
(if any) on which the executable work runs, or a compiler used to
|
| 132 |
+
produce the work, or an object code interpreter used to run it.
|
| 133 |
+
|
| 134 |
+
The "Corresponding Source" for a work in object code form means all
|
| 135 |
+
the source code needed to generate, install, and (for an executable
|
| 136 |
+
work) run the object code and to modify the work, including scripts to
|
| 137 |
+
control those activities. However, it does not include the work's
|
| 138 |
+
System Libraries, or general-purpose tools or generally available free
|
| 139 |
+
programs which are used unmodified in performing those activities but
|
| 140 |
+
which are not part of the work. For example, Corresponding Source
|
| 141 |
+
includes interface definition files associated with source files for
|
| 142 |
+
the work, and the source code for shared libraries and dynamically
|
| 143 |
+
linked subprograms that the work is specifically designed to require,
|
| 144 |
+
such as by intimate data communication or control flow between those
|
| 145 |
+
subprograms and other parts of the work.
|
| 146 |
+
|
| 147 |
+
The Corresponding Source need not include anything that users
|
| 148 |
+
can regenerate automatically from other parts of the Corresponding
|
| 149 |
+
Source.
|
| 150 |
+
|
| 151 |
+
The Corresponding Source for a work in source code form is that
|
| 152 |
+
same work.
|
| 153 |
+
|
| 154 |
+
2. Basic Permissions.
|
| 155 |
+
|
| 156 |
+
All rights granted under this License are granted for the term of
|
| 157 |
+
copyright on the Program, and are irrevocable provided the stated
|
| 158 |
+
conditions are met. This License explicitly affirms your unlimited
|
| 159 |
+
permission to run the unmodified Program. The output from running a
|
| 160 |
+
covered work is covered by this License only if the output, given its
|
| 161 |
+
content, constitutes a covered work. This License acknowledges your
|
| 162 |
+
rights of fair use or other equivalent, as provided by copyright law.
|
| 163 |
+
|
| 164 |
+
You may make, run and propagate covered works that you do not
|
| 165 |
+
convey, without conditions so long as your license otherwise remains
|
| 166 |
+
in force. You may convey covered works to others for the sole purpose
|
| 167 |
+
of having them make modifications exclusively for you, or provide you
|
| 168 |
+
with facilities for running those works, provided that you comply with
|
| 169 |
+
the terms of this License in conveying all material for which you do
|
| 170 |
+
not control copyright. Those thus making or running the covered works
|
| 171 |
+
for you must do so exclusively on your behalf, under your direction
|
| 172 |
+
and control, on terms that prohibit them from making any copies of
|
| 173 |
+
your copyrighted material outside their relationship with you.
|
| 174 |
+
|
| 175 |
+
Conveying under any other circumstances is permitted solely under
|
| 176 |
+
the conditions stated below. Sublicensing is not allowed; section 10
|
| 177 |
+
makes it unnecessary.
|
| 178 |
+
|
| 179 |
+
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
|
| 180 |
+
|
| 181 |
+
No covered work shall be deemed part of an effective technological
|
| 182 |
+
measure under any applicable law fulfilling obligations under article
|
| 183 |
+
11 of the WIPO copyright treaty adopted on 20 December 1996, or
|
| 184 |
+
similar laws prohibiting or restricting circumvention of such
|
| 185 |
+
measures.
|
| 186 |
+
|
| 187 |
+
When you convey a covered work, you waive any legal power to forbid
|
| 188 |
+
circumvention of technological measures to the extent such circumvention
|
| 189 |
+
is effected by exercising rights under this License with respect to
|
| 190 |
+
the covered work, and you disclaim any intention to limit operation or
|
| 191 |
+
modification of the work as a means of enforcing, against the work's
|
| 192 |
+
users, your or third parties' legal rights to forbid circumvention of
|
| 193 |
+
technological measures.
|
| 194 |
+
|
| 195 |
+
4. Conveying Verbatim Copies.
|
| 196 |
+
|
| 197 |
+
You may convey verbatim copies of the Program's source code as you
|
| 198 |
+
receive it, in any medium, provided that you conspicuously and
|
| 199 |
+
appropriately publish on each copy an appropriate copyright notice;
|
| 200 |
+
keep intact all notices stating that this License and any
|
| 201 |
+
non-permissive terms added in accord with section 7 apply to the code;
|
| 202 |
+
keep intact all notices of the absence of any warranty; and give all
|
| 203 |
+
recipients a copy of this License along with the Program.
|
| 204 |
+
|
| 205 |
+
You may charge any price or no price for each copy that you convey,
|
| 206 |
+
and you may offer support or warranty protection for a fee.
|
| 207 |
+
|
| 208 |
+
5. Conveying Modified Source Versions.
|
| 209 |
+
|
| 210 |
+
You may convey a work based on the Program, or the modifications to
|
| 211 |
+
produce it from the Program, in the form of source code under the
|
| 212 |
+
terms of section 4, provided that you also meet all of these conditions:
|
| 213 |
+
|
| 214 |
+
a) The work must carry prominent notices stating that you modified
|
| 215 |
+
it, and giving a relevant date.
|
| 216 |
+
|
| 217 |
+
b) The work must carry prominent notices stating that it is
|
| 218 |
+
released under this License and any conditions added under section
|
| 219 |
+
7. This requirement modifies the requirement in section 4 to
|
| 220 |
+
"keep intact all notices".
|
| 221 |
+
|
| 222 |
+
c) You must license the entire work, as a whole, under this
|
| 223 |
+
License to anyone who comes into possession of a copy. This
|
| 224 |
+
License will therefore apply, along with any applicable section 7
|
| 225 |
+
additional terms, to the whole of the work, and all its parts,
|
| 226 |
+
regardless of how they are packaged. This License gives no
|
| 227 |
+
permission to license the work in any other way, but it does not
|
| 228 |
+
invalidate such permission if you have separately received it.
|
| 229 |
+
|
| 230 |
+
d) If the work has interactive user interfaces, each must display
|
| 231 |
+
Appropriate Legal Notices; however, if the Program has interactive
|
| 232 |
+
interfaces that do not display Appropriate Legal Notices, your
|
| 233 |
+
work need not make them do so.
|
| 234 |
+
|
| 235 |
+
A compilation of a covered work with other separate and independent
|
| 236 |
+
works, which are not by their nature extensions of the covered work,
|
| 237 |
+
and which are not combined with it such as to form a larger program,
|
| 238 |
+
in or on a volume of a storage or distribution medium, is called an
|
| 239 |
+
"aggregate" if the compilation and its resulting copyright are not
|
| 240 |
+
used to limit the access or legal rights of the compilation's users
|
| 241 |
+
beyond what the individual works permit. Inclusion of a covered work
|
| 242 |
+
in an aggregate does not cause this License to apply to the other
|
| 243 |
+
parts of the aggregate.
|
| 244 |
+
|
| 245 |
+
6. Conveying Non-Source Forms.
|
| 246 |
+
|
| 247 |
+
You may convey a covered work in object code form under the terms
|
| 248 |
+
of sections 4 and 5, provided that you also convey the
|
| 249 |
+
machine-readable Corresponding Source under the terms of this License,
|
| 250 |
+
in one of these ways:
|
| 251 |
+
|
| 252 |
+
a) Convey the object code in, or embodied in, a physical product
|
| 253 |
+
(including a physical distribution medium), accompanied by the
|
| 254 |
+
Corresponding Source fixed on a durable physical medium
|
| 255 |
+
customarily used for software interchange.
|
| 256 |
+
|
| 257 |
+
b) Convey the object code in, or embodied in, a physical product
|
| 258 |
+
(including a physical distribution medium), accompanied by a
|
| 259 |
+
written offer, valid for at least three years and valid for as
|
| 260 |
+
long as you offer spare parts or customer support for that product
|
| 261 |
+
model, to give anyone who possesses the object code either (1) a
|
| 262 |
+
copy of the Corresponding Source for all the software in the
|
| 263 |
+
product that is covered by this License, on a durable physical
|
| 264 |
+
medium customarily used for software interchange, for a price no
|
| 265 |
+
more than your reasonable cost of physically performing this
|
| 266 |
+
conveying of source, or (2) access to copy the
|
| 267 |
+
Corresponding Source from a network server at no charge.
|
| 268 |
+
|
| 269 |
+
c) Convey individual copies of the object code with a copy of the
|
| 270 |
+
written offer to provide the Corresponding Source. This
|
| 271 |
+
alternative is allowed only occasionally and noncommercially, and
|
| 272 |
+
only if you received the object code with such an offer, in accord
|
| 273 |
+
with subsection 6b.
|
| 274 |
+
|
| 275 |
+
d) Convey the object code by offering access from a designated
|
| 276 |
+
place (gratis or for a charge), and offer equivalent access to the
|
| 277 |
+
Corresponding Source in the same way through the same place at no
|
| 278 |
+
further charge. You need not require recipients to copy the
|
| 279 |
+
Corresponding Source along with the object code. If the place to
|
| 280 |
+
copy the object code is a network server, the Corresponding Source
|
| 281 |
+
may be on a different server (operated by you or a third party)
|
| 282 |
+
that supports equivalent copying facilities, provided you maintain
|
| 283 |
+
clear directions next to the object code saying where to find the
|
| 284 |
+
Corresponding Source. Regardless of what server hosts the
|
| 285 |
+
Corresponding Source, you remain obligated to ensure that it is
|
| 286 |
+
available for as long as needed to satisfy these requirements.
|
| 287 |
+
|
| 288 |
+
e) Convey the object code using peer-to-peer transmission, provided
|
| 289 |
+
you inform other peers where the object code and Corresponding
|
| 290 |
+
Source of the work are being offered to the general public at no
|
| 291 |
+
charge under subsection 6d.
|
| 292 |
+
|
| 293 |
+
A separable portion of the object code, whose source code is excluded
|
| 294 |
+
from the Corresponding Source as a System Library, need not be
|
| 295 |
+
included in conveying the object code work.
|
| 296 |
+
|
| 297 |
+
A "User Product" is either (1) a "consumer product", which means any
|
| 298 |
+
tangible personal property which is normally used for personal, family,
|
| 299 |
+
or household purposes, or (2) anything designed or sold for incorporation
|
| 300 |
+
into a dwelling. In determining whether a product is a consumer product,
|
| 301 |
+
doubtful cases shall be resolved in favor of coverage. For a particular
|
| 302 |
+
product received by a particular user, "normally used" refers to a
|
| 303 |
+
typical or common use of that class of product, regardless of the status
|
| 304 |
+
of the particular user or of the way in which the particular user
|
| 305 |
+
actually uses, or expects or is expected to use, the product. A product
|
| 306 |
+
is a consumer product regardless of whether the product has substantial
|
| 307 |
+
commercial, industrial or non-consumer uses, unless such uses represent
|
| 308 |
+
the only significant mode of use of the product.
|
| 309 |
+
|
| 310 |
+
"Installation Information" for a User Product means any methods,
|
| 311 |
+
procedures, authorization keys, or other information required to install
|
| 312 |
+
and execute modified versions of a covered work in that User Product from
|
| 313 |
+
a modified version of its Corresponding Source. The information must
|
| 314 |
+
suffice to ensure that the continued functioning of the modified object
|
| 315 |
+
code is in no case prevented or interfered with solely because
|
| 316 |
+
modification has been made.
|
| 317 |
+
|
| 318 |
+
If you convey an object code work under this section in, or with, or
|
| 319 |
+
specifically for use in, a User Product, and the conveying occurs as
|
| 320 |
+
part of a transaction in which the right of possession and use of the
|
| 321 |
+
User Product is transferred to the recipient in perpetuity or for a
|
| 322 |
+
fixed term (regardless of how the transaction is characterized), the
|
| 323 |
+
Corresponding Source conveyed under this section must be accompanied
|
| 324 |
+
by the Installation Information. But this requirement does not apply
|
| 325 |
+
if neither you nor any third party retains the ability to install
|
| 326 |
+
modified object code on the User Product (for example, the work has
|
| 327 |
+
been installed in ROM).
|
| 328 |
+
|
| 329 |
+
The requirement to provide Installation Information does not include a
|
| 330 |
+
requirement to continue to provide support service, warranty, or updates
|
| 331 |
+
for a work that has been modified or installed by the recipient, or for
|
| 332 |
+
the User Product in which it has been modified or installed. Access to a
|
| 333 |
+
network may be denied when the modification itself materially and
|
| 334 |
+
adversely affects the operation of the network or violates the rules and
|
| 335 |
+
protocols for communication across the network.
|
| 336 |
+
|
| 337 |
+
Corresponding Source conveyed, and Installation Information provided,
|
| 338 |
+
in accord with this section must be in a format that is publicly
|
| 339 |
+
documented (and with an implementation available to the public in
|
| 340 |
+
source code form), and must require no special password or key for
|
| 341 |
+
unpacking, reading or copying.
|
| 342 |
+
|
| 343 |
+
7. Additional Terms.
|
| 344 |
+
|
| 345 |
+
"Additional permissions" are terms that supplement the terms of this
|
| 346 |
+
License by making exceptions from one or more of its conditions.
|
| 347 |
+
Additional permissions that are applicable to the entire Program shall
|
| 348 |
+
be treated as though they were included in this License, to the extent
|
| 349 |
+
that they are valid under applicable law. If additional permissions
|
| 350 |
+
apply only to part of the Program, that part may be used separately
|
| 351 |
+
under those permissions, but the entire Program remains governed by
|
| 352 |
+
this License without regard to the additional permissions.
|
| 353 |
+
|
| 354 |
+
When you convey a copy of a covered work, you may at your option
|
| 355 |
+
remove any additional permissions from that copy, or from any part of
|
| 356 |
+
it. (Additional permissions may be written to require their own
|
| 357 |
+
removal in certain cases when you modify the work.) You may place
|
| 358 |
+
additional permissions on material, added by you to a covered work,
|
| 359 |
+
for which you have or can give appropriate copyright permission.
|
| 360 |
+
|
| 361 |
+
Notwithstanding any other provision of this License, for material you
|
| 362 |
+
add to a covered work, you may (if authorized by the copyright holders of
|
| 363 |
+
that material) supplement the terms of this License with terms:
|
| 364 |
+
|
| 365 |
+
a) Disclaiming warranty or limiting liability differently from the
|
| 366 |
+
terms of sections 15 and 16 of this License; or
|
| 367 |
+
|
| 368 |
+
b) Requiring preservation of specified reasonable legal notices or
|
| 369 |
+
author attributions in that material or in the Appropriate Legal
|
| 370 |
+
Notices displayed by works containing it; or
|
| 371 |
+
|
| 372 |
+
c) Prohibiting misrepresentation of the origin of that material, or
|
| 373 |
+
requiring that modified versions of such material be marked in
|
| 374 |
+
reasonable ways as different from the original version; or
|
| 375 |
+
|
| 376 |
+
d) Limiting the use for publicity purposes of names of licensors or
|
| 377 |
+
authors of the material; or
|
| 378 |
+
|
| 379 |
+
e) Declining to grant rights under trademark law for use of some
|
| 380 |
+
trade names, trademarks, or service marks; or
|
| 381 |
+
|
| 382 |
+
f) Requiring indemnification of licensors and authors of that
|
| 383 |
+
material by anyone who conveys the material (or modified versions of
|
| 384 |
+
it) with contractual assumptions of liability to the recipient, for
|
| 385 |
+
any liability that these contractual assumptions directly impose on
|
| 386 |
+
those licensors and authors.
|
| 387 |
+
|
| 388 |
+
All other non-permissive additional terms are considered "further
|
| 389 |
+
restrictions" within the meaning of section 10. If the Program as you
|
| 390 |
+
received it, or any part of it, contains a notice stating that it is
|
| 391 |
+
governed by this License along with a term that is a further
|
| 392 |
+
restriction, you may remove that term. If a license document contains
|
| 393 |
+
a further restriction but permits relicensing or conveying under this
|
| 394 |
+
License, you may add to a covered work material governed by the terms
|
| 395 |
+
of that license document, provided that the further restriction does
|
| 396 |
+
not survive such relicensing or conveying.
|
| 397 |
+
|
| 398 |
+
If you add terms to a covered work in accord with this section, you
|
| 399 |
+
must place, in the relevant source files, a statement of the
|
| 400 |
+
additional terms that apply to those files, or a notice indicating
|
| 401 |
+
where to find the applicable terms.
|
| 402 |
+
|
| 403 |
+
Additional terms, permissive or non-permissive, may be stated in the
|
| 404 |
+
form of a separately written license, or stated as exceptions;
|
| 405 |
+
the above requirements apply either way.
|
| 406 |
+
|
| 407 |
+
8. Termination.
|
| 408 |
+
|
| 409 |
+
You may not propagate or modify a covered work except as expressly
|
| 410 |
+
provided under this License. Any attempt otherwise to propagate or
|
| 411 |
+
modify it is void, and will automatically terminate your rights under
|
| 412 |
+
this License (including any patent licenses granted under the third
|
| 413 |
+
paragraph of section 11).
|
| 414 |
+
|
| 415 |
+
However, if you cease all violation of this License, then your
|
| 416 |
+
license from a particular copyright holder is reinstated (a)
|
| 417 |
+
provisionally, unless and until the copyright holder explicitly and
|
| 418 |
+
finally terminates your license, and (b) permanently, if the copyright
|
| 419 |
+
holder fails to notify you of the violation by some reasonable means
|
| 420 |
+
prior to 60 days after the cessation.
|
| 421 |
+
|
| 422 |
+
Moreover, your license from a particular copyright holder is
|
| 423 |
+
reinstated permanently if the copyright holder notifies you of the
|
| 424 |
+
violation by some reasonable means, this is the first time you have
|
| 425 |
+
received notice of violation of this License (for any work) from that
|
| 426 |
+
copyright holder, and you cure the violation prior to 30 days after
|
| 427 |
+
your receipt of the notice.
|
| 428 |
+
|
| 429 |
+
Termination of your rights under this section does not terminate the
|
| 430 |
+
licenses of parties who have received copies or rights from you under
|
| 431 |
+
this License. If your rights have been terminated and not permanently
|
| 432 |
+
reinstated, you do not qualify to receive new licenses for the same
|
| 433 |
+
material under section 10.
|
| 434 |
+
|
| 435 |
+
9. Acceptance Not Required for Having Copies.
|
| 436 |
+
|
| 437 |
+
You are not required to accept this License in order to receive or
|
| 438 |
+
run a copy of the Program. Ancillary propagation of a covered work
|
| 439 |
+
occurring solely as a consequence of using peer-to-peer transmission
|
| 440 |
+
to receive a copy likewise does not require acceptance. However,
|
| 441 |
+
nothing other than this License grants you permission to propagate or
|
| 442 |
+
modify any covered work. These actions infringe copyright if you do
|
| 443 |
+
not accept this License. Therefore, by modifying or propagating a
|
| 444 |
+
covered work, you indicate your acceptance of this License to do so.
|
| 445 |
+
|
| 446 |
+
10. Automatic Licensing of Downstream Recipients.
|
| 447 |
+
|
| 448 |
+
Each time you convey a covered work, the recipient automatically
|
| 449 |
+
receives a license from the original licensors, to run, modify and
|
| 450 |
+
propagate that work, subject to this License. You are not responsible
|
| 451 |
+
for enforcing compliance by third parties with this License.
|
| 452 |
+
|
| 453 |
+
An "entity transaction" is a transaction transferring control of an
|
| 454 |
+
organization, or substantially all assets of one, or subdividing an
|
| 455 |
+
organization, or merging organizations. If propagation of a covered
|
| 456 |
+
work results from an entity transaction, each party to that
|
| 457 |
+
transaction who receives a copy of the work also receives whatever
|
| 458 |
+
licenses to the work the party's predecessor in interest had or could
|
| 459 |
+
give under the previous paragraph, plus a right to possession of the
|
| 460 |
+
Corresponding Source of the work from the predecessor in interest, if
|
| 461 |
+
the predecessor has it or can get it with reasonable efforts.
|
| 462 |
+
|
| 463 |
+
You may not impose any further restrictions on the exercise of the
|
| 464 |
+
rights granted or affirmed under this License. For example, you may
|
| 465 |
+
not impose a license fee, royalty, or other charge for exercise of
|
| 466 |
+
rights granted under this License, and you may not initiate litigation
|
| 467 |
+
(including a cross-claim or counterclaim in a lawsuit) alleging that
|
| 468 |
+
any patent claim is infringed by making, using, selling, offering for
|
| 469 |
+
sale, or importing the Program or any portion of it.
|
| 470 |
+
|
| 471 |
+
11. Patents.
|
| 472 |
+
|
| 473 |
+
A "contributor" is a copyright holder who authorizes use under this
|
| 474 |
+
License of the Program or a work on which the Program is based. The
|
| 475 |
+
work thus licensed is called the contributor's "contributor version".
|
| 476 |
+
|
| 477 |
+
A contributor's "essential patent claims" are all patent claims
|
| 478 |
+
owned or controlled by the contributor, whether already acquired or
|
| 479 |
+
hereafter acquired, that would be infringed by some manner, permitted
|
| 480 |
+
by this License, of making, using, or selling its contributor version,
|
| 481 |
+
but do not include claims that would be infringed only as a
|
| 482 |
+
consequence of further modification of the contributor version. For
|
| 483 |
+
purposes of this definition, "control" includes the right to grant
|
| 484 |
+
patent sublicenses in a manner consistent with the requirements of
|
| 485 |
+
this License.
|
| 486 |
+
|
| 487 |
+
Each contributor grants you a non-exclusive, worldwide, royalty-free
|
| 488 |
+
patent license under the contributor's essential patent claims, to
|
| 489 |
+
make, use, sell, offer for sale, import and otherwise run, modify and
|
| 490 |
+
propagate the contents of its contributor version.
|
| 491 |
+
|
| 492 |
+
In the following three paragraphs, a "patent license" is any express
|
| 493 |
+
agreement or commitment, however denominated, not to enforce a patent
|
| 494 |
+
(such as an express permission to practice a patent or covenant not to
|
| 495 |
+
sue for patent infringement). To "grant" such a patent license to a
|
| 496 |
+
party means to make such an agreement or commitment not to enforce a
|
| 497 |
+
patent against the party.
|
| 498 |
+
|
| 499 |
+
If you convey a covered work, knowingly relying on a patent license,
|
| 500 |
+
and the Corresponding Source of the work is not available for anyone
|
| 501 |
+
to copy, free of charge and under the terms of this License, through a
|
| 502 |
+
publicly available network server or other readily accessible means,
|
| 503 |
+
then you must either (1) cause the Corresponding Source to be so
|
| 504 |
+
available, or (2) arrange to deprive yourself of the benefit of the
|
| 505 |
+
patent license for this particular work, or (3) arrange, in a manner
|
| 506 |
+
consistent with the requirements of this License, to extend the patent
|
| 507 |
+
license to downstream recipients. "Knowingly relying" means you have
|
| 508 |
+
actual knowledge that, but for the patent license, your conveying the
|
| 509 |
+
covered work in a country, or your recipient's use of the covered work
|
| 510 |
+
in a country, would infringe one or more identifiable patents in that
|
| 511 |
+
country that you have reason to believe are valid.
|
| 512 |
+
|
| 513 |
+
If, pursuant to or in connection with a single transaction or
|
| 514 |
+
arrangement, you convey, or propagate by procuring conveyance of, a
|
| 515 |
+
covered work, and grant a patent license to some of the parties
|
| 516 |
+
receiving the covered work authorizing them to use, propagate, modify
|
| 517 |
+
or convey a specific copy of the covered work, then the patent license
|
| 518 |
+
you grant is automatically extended to all recipients of the covered
|
| 519 |
+
work and works based on it.
|
| 520 |
+
|
| 521 |
+
A patent license is "discriminatory" if it does not include within
|
| 522 |
+
the scope of its coverage, prohibits the exercise of, or is
|
| 523 |
+
conditioned on the non-exercise of one or more of the rights that are
|
| 524 |
+
specifically granted under this License. You may not convey a covered
|
| 525 |
+
work if you are a party to an arrangement with a third party that is
|
| 526 |
+
in the business of distributing software, under which you make payment
|
| 527 |
+
to the third party based on the extent of your activity of conveying
|
| 528 |
+
the work, and under which the third party grants, to any of the
|
| 529 |
+
parties who would receive the covered work from you, a discriminatory
|
| 530 |
+
patent license (a) in connection with copies of the covered work
|
| 531 |
+
conveyed by you (or copies made from those copies), or (b) primarily
|
| 532 |
+
for and in connection with specific products or compilations that
|
| 533 |
+
contain the covered work, unless you entered into that arrangement,
|
| 534 |
+
or that patent license was granted, prior to 28 March 2007.
|
| 535 |
+
|
| 536 |
+
Nothing in this License shall be construed as excluding or limiting
|
| 537 |
+
any implied license or other defenses to infringement that may
|
| 538 |
+
otherwise be available to you under applicable patent law.
|
| 539 |
+
|
| 540 |
+
12. No Surrender of Others' Freedom.
|
| 541 |
+
|
| 542 |
+
If conditions are imposed on you (whether by court order, agreement or
|
| 543 |
+
otherwise) that contradict the conditions of this License, they do not
|
| 544 |
+
excuse you from the conditions of this License. If you cannot convey a
|
| 545 |
+
covered work so as to satisfy simultaneously your obligations under this
|
| 546 |
+
License and any other pertinent obligations, then as a consequence you may
|
| 547 |
+
not convey it at all. For example, if you agree to terms that obligate you
|
| 548 |
+
to collect a royalty for further conveying from those to whom you convey
|
| 549 |
+
the Program, the only way you could satisfy both those terms and this
|
| 550 |
+
License would be to refrain entirely from conveying the Program.
|
| 551 |
+
|
| 552 |
+
13. Use with the GNU Affero General Public License.
|
| 553 |
+
|
| 554 |
+
Notwithstanding any other provision of this License, you have
|
| 555 |
+
permission to link or combine any covered work with a work licensed
|
| 556 |
+
under version 3 of the GNU Affero General Public License into a single
|
| 557 |
+
combined work, and to convey the resulting work. The terms of this
|
| 558 |
+
License will continue to apply to the part which is the covered work,
|
| 559 |
+
but the special requirements of the GNU Affero General Public License,
|
| 560 |
+
section 13, concerning interaction through a network will apply to the
|
| 561 |
+
combination as such.
|
| 562 |
+
|
| 563 |
+
14. Revised Versions of this License.
|
| 564 |
+
|
| 565 |
+
The Free Software Foundation may publish revised and/or new versions of
|
| 566 |
+
the GNU General Public License from time to time. Such new versions will
|
| 567 |
+
be similar in spirit to the present version, but may differ in detail to
|
| 568 |
+
address new problems or concerns.
|
| 569 |
+
|
| 570 |
+
Each version is given a distinguishing version number. If the
|
| 571 |
+
Program specifies that a certain numbered version of the GNU General
|
| 572 |
+
Public License "or any later version" applies to it, you have the
|
| 573 |
+
option of following the terms and conditions either of that numbered
|
| 574 |
+
version or of any later version published by the Free Software
|
| 575 |
+
Foundation. If the Program does not specify a version number of the
|
| 576 |
+
GNU General Public License, you may choose any version ever published
|
| 577 |
+
by the Free Software Foundation.
|
| 578 |
+
|
| 579 |
+
If the Program specifies that a proxy can decide which future
|
| 580 |
+
versions of the GNU General Public License can be used, that proxy's
|
| 581 |
+
public statement of acceptance of a version permanently authorizes you
|
| 582 |
+
to choose that version for the Program.
|
| 583 |
+
|
| 584 |
+
Later license versions may give you additional or different
|
| 585 |
+
permissions. However, no additional obligations are imposed on any
|
| 586 |
+
author or copyright holder as a result of your choosing to follow a
|
| 587 |
+
later version.
|
| 588 |
+
|
| 589 |
+
15. Disclaimer of Warranty.
|
| 590 |
+
|
| 591 |
+
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
|
| 592 |
+
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
|
| 593 |
+
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
|
| 594 |
+
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
|
| 595 |
+
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
| 596 |
+
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
|
| 597 |
+
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
|
| 598 |
+
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
|
| 599 |
+
|
| 600 |
+
16. Limitation of Liability.
|
| 601 |
+
|
| 602 |
+
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
|
| 603 |
+
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
|
| 604 |
+
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
|
| 605 |
+
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
|
| 606 |
+
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
|
| 607 |
+
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
|
| 608 |
+
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
|
| 609 |
+
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
|
| 610 |
+
SUCH DAMAGES.
|
| 611 |
+
|
| 612 |
+
17. Interpretation of Sections 15 and 16.
|
| 613 |
+
|
| 614 |
+
If the disclaimer of warranty and limitation of liability provided
|
| 615 |
+
above cannot be given local legal effect according to their terms,
|
| 616 |
+
reviewing courts shall apply local law that most closely approximates
|
| 617 |
+
an absolute waiver of all civil liability in connection with the
|
| 618 |
+
Program, unless a warranty or assumption of liability accompanies a
|
| 619 |
+
copy of the Program in return for a fee.
|
| 620 |
+
|
| 621 |
+
END OF TERMS AND CONDITIONS
|
| 622 |
+
|
| 623 |
+
How to Apply These Terms to Your New Programs
|
| 624 |
+
|
| 625 |
+
If you develop a new program, and you want it to be of the greatest
|
| 626 |
+
possible use to the public, the best way to achieve this is to make it
|
| 627 |
+
free software which everyone can redistribute and change under these terms.
|
| 628 |
+
|
| 629 |
+
To do so, attach the following notices to the program. It is safest
|
| 630 |
+
to attach them to the start of each source file to most effectively
|
| 631 |
+
state the exclusion of warranty; and each file should have at least
|
| 632 |
+
the "copyright" line and a pointer to where the full notice is found.
|
| 633 |
+
|
| 634 |
+
<one line to give the program's name and a brief idea of what it does.>
|
| 635 |
+
Copyright (C) <year> <name of author>
|
| 636 |
+
|
| 637 |
+
This program is free software: you can redistribute it and/or modify
|
| 638 |
+
it under the terms of the GNU General Public License as published by
|
| 639 |
+
the Free Software Foundation, either version 3 of the License, or
|
| 640 |
+
(at your option) any later version.
|
| 641 |
+
|
| 642 |
+
This program is distributed in the hope that it will be useful,
|
| 643 |
+
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
| 644 |
+
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
| 645 |
+
GNU General Public License for more details.
|
| 646 |
+
|
| 647 |
+
You should have received a copy of the GNU General Public License
|
| 648 |
+
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
| 649 |
+
|
| 650 |
+
Also add information on how to contact you by electronic and paper mail.
|
| 651 |
+
|
| 652 |
+
If the program does terminal interaction, make it output a short
|
| 653 |
+
notice like this when it starts in an interactive mode:
|
| 654 |
+
|
| 655 |
+
<program> Copyright (C) <year> <name of author>
|
| 656 |
+
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
|
| 657 |
+
This is free software, and you are welcome to redistribute it
|
| 658 |
+
under certain conditions; type `show c' for details.
|
| 659 |
+
|
| 660 |
+
The hypothetical commands `show w' and `show c' should show the appropriate
|
| 661 |
+
parts of the General Public License. Of course, your program's commands
|
| 662 |
+
might be different; for a GUI interface, you would use an "about box".
|
| 663 |
+
|
| 664 |
+
You should also get your employer (if you work as a programmer) or school,
|
| 665 |
+
if any, to sign a "copyright disclaimer" for the program, if necessary.
|
| 666 |
+
For more information on this, and how to apply and follow the GNU GPL, see
|
| 667 |
+
<https://www.gnu.org/licenses/>.
|
| 668 |
+
|
| 669 |
+
The GNU General Public License does not permit incorporating your program
|
| 670 |
+
into proprietary programs. If your program is a subroutine library, you
|
| 671 |
+
may consider it more useful to permit linking proprietary applications with
|
| 672 |
+
the library. If this is what you want to do, use the GNU Lesser General
|
| 673 |
+
Public License instead of this License. But first, please read
|
| 674 |
+
<https://www.gnu.org/licenses/why-not-lgpl.html>.
|
Abnormal-CT-Generation-Healthy/LeanVAE/__init__.py
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
from .models.autoencoder import LeanVAE
|
| 3 |
+
from .models.autoencoder_pl import AutoEncoderEngine
|
| 4 |
+
|
Abnormal-CT-Generation-Healthy/LeanVAE/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (309 Bytes). View file
|
|
|
Abnormal-CT-Generation-Healthy/LeanVAE/__pycache__/__init__.cpython-311.pyc
ADDED
|
Binary file (333 Bytes). View file
|
|
|
Abnormal-CT-Generation-Healthy/LeanVAE/__pycache__/__init__.cpython-39.pyc
ADDED
|
Binary file (307 Bytes). View file
|
|
|
Abnormal-CT-Generation-Healthy/LeanVAE/__pycache__/data.cpython-310.pyc
ADDED
|
Binary file (16.8 kB). View file
|
|
|
Abnormal-CT-Generation-Healthy/LeanVAE/__pycache__/data.cpython-39.pyc
ADDED
|
Binary file (17.1 kB). View file
|
|
|
Abnormal-CT-Generation-Healthy/LeanVAE/ckpts/LeanVAE-dim16.ckpt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1c1d65765e44ced040a43a0bb7084a936e5b3862d21df4a9fd13580508cd1ecb
|
| 3 |
+
size 159199850
|
Abnormal-CT-Generation-Healthy/LeanVAE/data.py
ADDED
|
@@ -0,0 +1,466 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import os.path as osp
|
| 3 |
+
import math
|
| 4 |
+
import random
|
| 5 |
+
import argparse
|
| 6 |
+
import numpy as np
|
| 7 |
+
from PIL import Image
|
| 8 |
+
from torch.utils.data import BatchSampler, Dataset, Sampler
|
| 9 |
+
import torch
|
| 10 |
+
import torch.utils.data as data
|
| 11 |
+
import torch.nn.functional as F
|
| 12 |
+
import torch.distributed as dist
|
| 13 |
+
from torchvision.datasets.video_utils import VideoClips
|
| 14 |
+
import pytorch_lightning as pl
|
| 15 |
+
from typing import TypeVar, Optional, Iterator, List
|
| 16 |
+
from collections import Counter, defaultdict
|
| 17 |
+
from decord import VideoReader
|
| 18 |
+
from .utils.video_utils import VideoNorm
|
| 19 |
+
|
| 20 |
+
try:
|
| 21 |
+
from torchvision.transforms import InterpolationMode
|
| 22 |
+
|
| 23 |
+
def _pil_interp(method):
|
| 24 |
+
if method == 'bicubic':
|
| 25 |
+
return InterpolationMode.BICUBIC
|
| 26 |
+
elif method == 'lanczos':
|
| 27 |
+
return InterpolationMode.LANCZOS
|
| 28 |
+
elif method == 'hamming':
|
| 29 |
+
return InterpolationMode.HAMMING
|
| 30 |
+
else:
|
| 31 |
+
# default bilinear, do we want to allow nearest?
|
| 32 |
+
return InterpolationMode.BILINEAR
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
import timm.data.transforms as timm_transforms
|
| 36 |
+
|
| 37 |
+
timm_transforms._pil_interp = _pil_interp
|
| 38 |
+
except:
|
| 39 |
+
from timm.data.transforms import _pil_interp
|
| 40 |
+
|
| 41 |
+
class MultiSizeVideoDataset(data.Dataset):
|
| 42 |
+
""" A flexible dataset for loading videos of different resolutions stored in a structured format.
|
| 43 |
+
This dataset reads video file paths from text files, where each file corresponds to a specific resolution (e.g., `256x256`).
|
| 44 |
+
Returns BCTHW videos in the range [-0.5, 0.5] """
|
| 45 |
+
def __init__(self, data_list, data_folder=None, sequence_length=17, train=True, sample_rate=1, dynamic_sample=False):
|
| 46 |
+
"""
|
| 47 |
+
Args:
|
| 48 |
+
data_list (str): Path to the folder containing text files with video paths.
|
| 49 |
+
data_folder (Optional[str]): Root folder where videos are stored (if paths in data_list are relative).
|
| 50 |
+
|
| 51 |
+
sequence_length: length of extracted video sequences
|
| 52 |
+
"""
|
| 53 |
+
super().__init__()
|
| 54 |
+
self.train = train
|
| 55 |
+
self.data_folder = data_folder
|
| 56 |
+
self.sequence_length = sequence_length
|
| 57 |
+
self.dynamic_sample = dynamic_sample
|
| 58 |
+
self.sample_rate = sample_rate
|
| 59 |
+
|
| 60 |
+
lengths = []
|
| 61 |
+
annotations = []
|
| 62 |
+
for dir in os.listdir(data_list):
|
| 63 |
+
file_path = os.path.join(data_list, dir)
|
| 64 |
+
with open(file_path) as f:
|
| 65 |
+
annotation = [ann.strip() for ann in f.readlines()]
|
| 66 |
+
annotations.extend(annotation)
|
| 67 |
+
lengths.extend([dir] * len(annotation))
|
| 68 |
+
|
| 69 |
+
self.annotations = annotations
|
| 70 |
+
self.lengths = lengths
|
| 71 |
+
|
| 72 |
+
self.norm = VideoNorm()
|
| 73 |
+
|
| 74 |
+
def __len__(self):
|
| 75 |
+
return len(self.annotations)
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
def __getitem__(self, idx):
|
| 79 |
+
|
| 80 |
+
video_path = self.annotations[idx] if self.data_folder is None else os.path.join(self.data_folder, self.annotations[idx])
|
| 81 |
+
try:
|
| 82 |
+
decord_vr = VideoReader(video_path)
|
| 83 |
+
total_frames = len(decord_vr)
|
| 84 |
+
except Exception as e:
|
| 85 |
+
raise RuntimeError(f"Failed to read video: {video_path}. Error: {e}")
|
| 86 |
+
|
| 87 |
+
if self.dynamic_sample:
|
| 88 |
+
sample_rate = random.randint(1, self.sample_rate)
|
| 89 |
+
else:
|
| 90 |
+
sample_rate = self.sample_rate
|
| 91 |
+
|
| 92 |
+
required_frames = self.sequence_length * sample_rate
|
| 93 |
+
if total_frames < self.sequence_length:
|
| 94 |
+
raise RuntimeError(f"Video {video_path} has only {total_frames} frames, but {self.sequence_length} are required.")
|
| 95 |
+
|
| 96 |
+
if total_frames < required_frames:
|
| 97 |
+
sample_rate = 1
|
| 98 |
+
required_frames = self.sequence_length
|
| 99 |
+
|
| 100 |
+
start_frame_ind = random.randint(0, max(0, total_frames - required_frames))
|
| 101 |
+
end_frame_ind = min(start_frame_ind + required_frames, total_frames)
|
| 102 |
+
frame_indice = np.linspace(
|
| 103 |
+
start_frame_ind, end_frame_ind - 1, self.sequence_length, dtype=int
|
| 104 |
+
)
|
| 105 |
+
|
| 106 |
+
video_data = decord_vr.get_batch(frame_indice).asnumpy()
|
| 107 |
+
video_data = torch.from_numpy(video_data).float()
|
| 108 |
+
video_data = video_data.permute(0, 3, 1, 2)
|
| 109 |
+
|
| 110 |
+
video = self.norm(video_data).permute(1, 0, 2, 3)
|
| 111 |
+
return {"video": video}
|
| 112 |
+
|
| 113 |
+
class MultiFilesBatchVideoSampler(BatchSampler):
|
| 114 |
+
"""A sampler wrapper for grouping videos within same folder into a same batch.
|
| 115 |
+
Args:
|
| 116 |
+
sampler (Sampler): Base sampler.
|
| 117 |
+
dataset (Dataset): Dataset providing data information.
|
| 118 |
+
batch_size (int): Size of mini-batch.
|
| 119 |
+
drop_last (bool): If ``True``, the sampler will drop the last batch if
|
| 120 |
+
its size would be less than ``batch_size``.
|
| 121 |
+
aspect_ratios (dict): The predefined aspect ratios.
|
| 122 |
+
"""
|
| 123 |
+
def __init__(self,
|
| 124 |
+
sampler: Sampler,
|
| 125 |
+
dataset: Dataset,
|
| 126 |
+
batch_size: int,
|
| 127 |
+
train_folder: str = None,
|
| 128 |
+
drop_last: bool = False
|
| 129 |
+
) -> None:
|
| 130 |
+
if not isinstance(sampler, Sampler):
|
| 131 |
+
raise TypeError('sampler should be an instance of ``Sampler``, '
|
| 132 |
+
f'but got {sampler}')
|
| 133 |
+
if not isinstance(batch_size, int) or batch_size <= 0:
|
| 134 |
+
raise ValueError('batch_size should be a positive integer value, '
|
| 135 |
+
f'but got batch_size={batch_size}')
|
| 136 |
+
self.sampler = sampler
|
| 137 |
+
self.dataset = dataset
|
| 138 |
+
self.train_folder = train_folder
|
| 139 |
+
self.batch_size = batch_size
|
| 140 |
+
self.drop_last = drop_last
|
| 141 |
+
self.bucket = {file_name: [] for file_name in os.listdir(self.train_folder)}
|
| 142 |
+
|
| 143 |
+
#{file_name: [list(os.listdir(os.path.join(self.train_folder, file_name)))] for file_name in os.listdir(self.train_folder)}
|
| 144 |
+
self.idx2file = []
|
| 145 |
+
|
| 146 |
+
|
| 147 |
+
def __iter__(self):
|
| 148 |
+
for idx in self.sampler:
|
| 149 |
+
file_name = self.idx2file[idx]
|
| 150 |
+
self.bucket[file_name].append(idx)
|
| 151 |
+
bucket = self.bucket[file_name]
|
| 152 |
+
bucket.append(idx)
|
| 153 |
+
# yield a batch of indices in the same aspect ratio group
|
| 154 |
+
if len(bucket) == self.batch_size:
|
| 155 |
+
yield bucket[:]
|
| 156 |
+
del bucket[:]
|
| 157 |
+
|
| 158 |
+
|
| 159 |
+
def group_data_fun(lengths, generator=None):
|
| 160 |
+
# counter is decrease order
|
| 161 |
+
counter = Counter(lengths) # counter {'1x256x256': 3, ''} lengths ['1x256x256', '1x256x256', '1x256x256', ...]
|
| 162 |
+
grouped_indices = defaultdict(list)
|
| 163 |
+
for idx, item in enumerate(lengths): # group idx to a list
|
| 164 |
+
grouped_indices[item].append(idx)
|
| 165 |
+
|
| 166 |
+
grouped_indices = dict(grouped_indices) # {'1x256x256': [0, 1, 2], ...}
|
| 167 |
+
sorted_indices = [grouped_indices[item] for (item, _) in sorted(counter.items(), key=lambda x: x[1], reverse=True)]
|
| 168 |
+
|
| 169 |
+
# shuffle in each group
|
| 170 |
+
shuffle_sorted_indices = []
|
| 171 |
+
for indice in sorted_indices:
|
| 172 |
+
shuffle_idx = torch.randperm(len(indice), generator=generator).tolist()
|
| 173 |
+
shuffle_sorted_indices.extend([indice[idx] for idx in shuffle_idx])
|
| 174 |
+
return shuffle_sorted_indices
|
| 175 |
+
|
| 176 |
+
def last_group_data_fun(shuffled_megabatches, lengths):
|
| 177 |
+
# lengths ['1x256x256', '1x256x256', '1x256x256' ...]
|
| 178 |
+
re_shuffled_megabatches = []
|
| 179 |
+
# print('shuffled_megabatches', len(shuffled_megabatches))
|
| 180 |
+
for i_megabatch, megabatch in enumerate(shuffled_megabatches):
|
| 181 |
+
re_megabatch = []
|
| 182 |
+
for i_batch, batch in enumerate(megabatch):
|
| 183 |
+
assert len(batch) != 0
|
| 184 |
+
|
| 185 |
+
len_each_batch = [lengths[i] for i in batch] # ['1x256x256', '1x256x256']
|
| 186 |
+
idx_length_dict = dict([*zip(batch, len_each_batch)]) # {0: '1x256x256', 100: '1x256x256'}
|
| 187 |
+
count_dict = Counter(len_each_batch) # {'1x256x256': 2} or {'1x256x256': 1, '1x768x256': 1}
|
| 188 |
+
if len(count_dict) != 1:
|
| 189 |
+
sorted_by_value = sorted(count_dict.items(), key=lambda item: item[1]) # {'1x256x256': 1, '1x768x256': 1}
|
| 190 |
+
# import ipdb;ipdb.set_trace()
|
| 191 |
+
# print(batch, idx_length_dict, count_dict, sorted_by_value)
|
| 192 |
+
pick_length = sorted_by_value[-1][0] # the highest frequency
|
| 193 |
+
candidate_batch = [idx for idx, length in idx_length_dict.items() if length == pick_length]
|
| 194 |
+
random_select_batch = [random.choice(candidate_batch) for i in range(len(len_each_batch) - len(candidate_batch))]
|
| 195 |
+
# print(batch, idx_length_dict, count_dict, sorted_by_value, pick_length, candidate_batch, random_select_batch)
|
| 196 |
+
batch = candidate_batch + random_select_batch
|
| 197 |
+
# print(batch)
|
| 198 |
+
|
| 199 |
+
for i in range(1, len(batch)-1):
|
| 200 |
+
# if not lengths[batch[0]] == lengths[batch[i]]:
|
| 201 |
+
# print(batch, [lengths[i] for i in batch])
|
| 202 |
+
# import ipdb;ipdb.set_trace()
|
| 203 |
+
assert lengths[batch[0]] == lengths[batch[i]]
|
| 204 |
+
re_megabatch.append(batch)
|
| 205 |
+
re_shuffled_megabatches.append(re_megabatch)
|
| 206 |
+
|
| 207 |
+
|
| 208 |
+
# for megabatch, re_megabatch in zip(shuffled_megabatches, re_shuffled_megabatches):
|
| 209 |
+
# for batch, re_batch in zip(megabatch, re_megabatch):
|
| 210 |
+
# for i, re_i in zip(batch, re_batch):
|
| 211 |
+
# if i != re_i:
|
| 212 |
+
# print(i, re_i)
|
| 213 |
+
return re_shuffled_megabatches
|
| 214 |
+
|
| 215 |
+
def split_to_even_chunks(megabatch, lengths, world_size, batch_size):
|
| 216 |
+
"""
|
| 217 |
+
Split a list of indices into `chunks` chunks of roughly equal lengths.
|
| 218 |
+
"""
|
| 219 |
+
# batch_size=2, world_size=2
|
| 220 |
+
# [1, 2, 3, 4] -> [[1, 2], [3, 4]]
|
| 221 |
+
# [1, 2, 3] -> [[1, 2], [3]]
|
| 222 |
+
# [1, 2] -> [[1], [2]]
|
| 223 |
+
# [1] -> [[1], []]
|
| 224 |
+
chunks = [megabatch[i::world_size] for i in range(world_size)]
|
| 225 |
+
|
| 226 |
+
pad_chunks = []
|
| 227 |
+
for idx, chunk in enumerate(chunks):
|
| 228 |
+
if batch_size != len(chunk):
|
| 229 |
+
assert batch_size > len(chunk)
|
| 230 |
+
if len(chunk) != 0: # [[1, 2], [3]] -> [[1, 2], [3, 3]]
|
| 231 |
+
chunk = chunk + [random.choice(chunk) for _ in range(batch_size - len(chunk))]
|
| 232 |
+
else:
|
| 233 |
+
chunk = random.choice(pad_chunks) # [[1], []] -> [[1], [1]]
|
| 234 |
+
print(chunks[idx], '->', chunk)
|
| 235 |
+
pad_chunks.append(chunk)
|
| 236 |
+
return pad_chunks
|
| 237 |
+
|
| 238 |
+
def get_length_grouped_indices(lengths, batch_size, world_size, gradient_accumulation_size, initial_global_step, generator=None, group_data=False, seed=42):
|
| 239 |
+
# We need to use torch for the random part as a distributed sampler will set the random seed for torch.
|
| 240 |
+
if generator is None:
|
| 241 |
+
generator = torch.Generator().manual_seed(seed) # every rank will generate a fixed order but random index
|
| 242 |
+
# print('lengths', lengths)
|
| 243 |
+
|
| 244 |
+
if group_data:
|
| 245 |
+
indices = group_data_fun(lengths, generator)
|
| 246 |
+
else:
|
| 247 |
+
indices = torch.randperm(len(lengths), generator=generator).tolist()
|
| 248 |
+
|
| 249 |
+
megabatch_size = world_size * batch_size
|
| 250 |
+
megabatches = [indices[i: i + megabatch_size] for i in range(0, len(lengths), megabatch_size)]
|
| 251 |
+
|
| 252 |
+
megabatches_len = [[lengths[i] for i in megabatch] for megabatch in megabatches]
|
| 253 |
+
|
| 254 |
+
megabatches = [split_to_even_chunks(megabatch, lengths, world_size, batch_size) for megabatch in megabatches]
|
| 255 |
+
|
| 256 |
+
split_to_even_chunks_len = [[[lengths[i] for i in batch] for batch in megabatch] for megabatch in megabatches]
|
| 257 |
+
|
| 258 |
+
indices_mega = torch.randperm(len(megabatches), generator=generator).tolist()
|
| 259 |
+
# print(f'rank {accelerator.process_index} seed {seed}, len(megabatches) {len(megabatches)}, indices_mega, {indices_mega[:50]}')
|
| 260 |
+
shuffled_megabatches = [megabatches[i] for i in indices_mega]
|
| 261 |
+
shuffled_megabatches_len = [[[lengths[i] for i in batch] for batch in megabatch] for megabatch in shuffled_megabatches]
|
| 262 |
+
# print(f'\nrank {accelerator.process_index} sorted shuffled_megabatches_len', shuffled_megabatches_len[0], shuffled_megabatches_len[1], shuffled_megabatches_len[-2], shuffled_megabatches_len[-1])
|
| 263 |
+
|
| 264 |
+
# import ipdb;ipdb.set_trace()
|
| 265 |
+
# print('shuffled_megabatches', len(shuffled_megabatches))
|
| 266 |
+
if group_data:
|
| 267 |
+
shuffled_megabatches = last_group_data_fun(shuffled_megabatches, lengths)
|
| 268 |
+
group_shuffled_megabatches_len = [[[lengths[i] for i in batch] for batch in megabatch] for megabatch in shuffled_megabatches]
|
| 269 |
+
# print(f'\nrank {accelerator.process_index} group_shuffled_megabatches_len', group_shuffled_megabatches_len[0], group_shuffled_megabatches_len[1], group_shuffled_megabatches_len[-2], group_shuffled_megabatches_len[-1])
|
| 270 |
+
|
| 271 |
+
|
| 272 |
+
#initial_global_step = initial_global_step * gradient_accumulation_size #todo
|
| 273 |
+
|
| 274 |
+
shuffled_megabatches = shuffled_megabatches[initial_global_step:]
|
| 275 |
+
#print(f'Skip the data of {initial_global_step} step!')
|
| 276 |
+
|
| 277 |
+
return [batch for megabatch in shuffled_megabatches for batch in megabatch]
|
| 278 |
+
|
| 279 |
+
class LengthGroupedSampler(Sampler):
|
| 280 |
+
r"""
|
| 281 |
+
Sampler that samples indices in a way that groups together features of the dataset of roughly the same length while
|
| 282 |
+
keeping a bit of randomness.
|
| 283 |
+
"""
|
| 284 |
+
|
| 285 |
+
def __init__(
|
| 286 |
+
self,
|
| 287 |
+
batch_size: int,
|
| 288 |
+
world_size: int,
|
| 289 |
+
gradient_accumulation_size: int = 1,
|
| 290 |
+
initial_global_step: int = 0,
|
| 291 |
+
lengths: Optional[List[int]] = None,
|
| 292 |
+
group_data=False,
|
| 293 |
+
generator=None,
|
| 294 |
+
rank: Optional[int] = None,
|
| 295 |
+
seed: int = 0,
|
| 296 |
+
):
|
| 297 |
+
if lengths is None:
|
| 298 |
+
raise ValueError("Lengths must be provided.")
|
| 299 |
+
|
| 300 |
+
self.batch_size = batch_size
|
| 301 |
+
self.world_size = world_size
|
| 302 |
+
self.initial_global_step = initial_global_step
|
| 303 |
+
self.gradient_accumulation_size = gradient_accumulation_size
|
| 304 |
+
self.lengths = lengths
|
| 305 |
+
self.group_data = group_data
|
| 306 |
+
self.generator = generator
|
| 307 |
+
|
| 308 |
+
self.rank = rank
|
| 309 |
+
self.epoch = 0
|
| 310 |
+
|
| 311 |
+
self.seed = seed
|
| 312 |
+
|
| 313 |
+
megabatch_size = self.batch_size * self.world_size
|
| 314 |
+
self.num_samples = ((len(lengths) + megabatch_size - 1) // megabatch_size ) * self.batch_size #todo
|
| 315 |
+
#self.num_samples = self.num_samples #- self.initial_global_step * self.batch_size * self.gradient_accumulation_size
|
| 316 |
+
# print('self.lengths, self.initial_global_step, self.batch_size, self.world_size, self.gradient_accumulation_size',
|
| 317 |
+
# len(self.lengths), self.initial_global_step, self.batch_size, self.world_size, self.gradient_accumulation_size)
|
| 318 |
+
|
| 319 |
+
def __len__(self):
|
| 320 |
+
return self.num_samples
|
| 321 |
+
|
| 322 |
+
def __iter__(self):
|
| 323 |
+
g = torch.Generator()
|
| 324 |
+
g.manual_seed(self.seed + self.epoch)
|
| 325 |
+
megabatch_indices = get_length_grouped_indices(self.lengths, self.batch_size, self.world_size,
|
| 326 |
+
self.gradient_accumulation_size, self.initial_global_step,
|
| 327 |
+
group_data=self.group_data, generator=g)
|
| 328 |
+
|
| 329 |
+
# subsample
|
| 330 |
+
indices = [i for batch in megabatch_indices[self.rank::self.world_size] for i in batch]
|
| 331 |
+
assert len(indices) == self.num_samples
|
| 332 |
+
|
| 333 |
+
return iter(indices)
|
| 334 |
+
|
| 335 |
+
def set_epoch(self, epoch: int) -> None:
|
| 336 |
+
r"""
|
| 337 |
+
Set the epoch for this sampler.
|
| 338 |
+
|
| 339 |
+
When :attr:`shuffle=True`, this ensures all replicas
|
| 340 |
+
use a different random ordering for each epoch. Otherwise, the next iteration of this
|
| 341 |
+
sampler will yield the same ordering.
|
| 342 |
+
|
| 343 |
+
Args:
|
| 344 |
+
epoch (int): Epoch number.
|
| 345 |
+
"""
|
| 346 |
+
self.epoch = epoch
|
| 347 |
+
|
| 348 |
+
|
| 349 |
+
class VideoData(pl.LightningDataModule):
|
| 350 |
+
def __init__(self, args):
|
| 351 |
+
super().__init__()
|
| 352 |
+
self.args = args
|
| 353 |
+
|
| 354 |
+
def _dataset(self, train):
|
| 355 |
+
datasets = []
|
| 356 |
+
for dataset_path, train_list, val_list in zip(self.args.data_path, self.args.train_datalist, self.args.val_datalist):
|
| 357 |
+
|
| 358 |
+
dataset = MultiSizeVideoDataset(data_folder=dataset_path, data_list=train_list if train else val_list, sequence_length=self.args.sequence_length,
|
| 359 |
+
train=train, sample_rate=self.args.sample_rate, dynamic_sample=self.args.dynamic_sample)
|
| 360 |
+
datasets.append(dataset)
|
| 361 |
+
return datasets
|
| 362 |
+
|
| 363 |
+
def _dataloader(self, train, steps = 0, batch_size = None):
|
| 364 |
+
dataset = self._dataset(train)
|
| 365 |
+
if isinstance(self.args.batch_size, int):
|
| 366 |
+
self.args.batch_size = [self.args.batch_size]
|
| 367 |
+
self.batch_size = self.args.batch_size if batch_size is None else batch_size
|
| 368 |
+
assert len(dataset) == len(self.args.batch_size)
|
| 369 |
+
dataloaders = []
|
| 370 |
+
for dset, d_batch_size in zip(dataset, self.batch_size):
|
| 371 |
+
if dist.is_initialized():
|
| 372 |
+
sampler = LengthGroupedSampler(
|
| 373 |
+
batch_size=d_batch_size,
|
| 374 |
+
world_size=dist.get_world_size(),
|
| 375 |
+
gradient_accumulation_size=1,
|
| 376 |
+
initial_global_step=steps if train else 0,
|
| 377 |
+
lengths=dset.lengths,
|
| 378 |
+
group_data=True,
|
| 379 |
+
rank = dist.get_rank()
|
| 380 |
+
)
|
| 381 |
+
else:
|
| 382 |
+
sampler = None
|
| 383 |
+
|
| 384 |
+
dataloader = data.DataLoader(
|
| 385 |
+
dset,
|
| 386 |
+
batch_size=d_batch_size,
|
| 387 |
+
num_workers=self.args.num_workers if train else 0,
|
| 388 |
+
pin_memory=False,
|
| 389 |
+
sampler=sampler,
|
| 390 |
+
)
|
| 391 |
+
|
| 392 |
+
dataloaders.append(dataloader)
|
| 393 |
+
|
| 394 |
+
return dataloaders
|
| 395 |
+
|
| 396 |
+
def train_dataloader(self):
|
| 397 |
+
return self._dataloader(True)
|
| 398 |
+
|
| 399 |
+
def val_dataloader(self):
|
| 400 |
+
return self._dataloader(False)[0]
|
| 401 |
+
|
| 402 |
+
|
| 403 |
+
@staticmethod
|
| 404 |
+
def add_data_specific_args(parent_parser):
|
| 405 |
+
parser = argparse.ArgumentParser(parents=[parent_parser], add_help=False)
|
| 406 |
+
parser.add_argument('--data_path', type=str, nargs="+", default=[''])
|
| 407 |
+
parser.add_argument('--train_datalist', type=str, nargs="+", default=['./video/kinetics-dataset/train/datapath'])
|
| 408 |
+
parser.add_argument('--val_datalist', type=str, nargs="+", default=['./video/kinetics-dataset/val/datapath'])
|
| 409 |
+
|
| 410 |
+
parser.add_argument('--sequence_length', type=int, default=17)
|
| 411 |
+
parser.add_argument('--sample_rate', type=int, default=1,
|
| 412 |
+
help='Frame sampling rate')
|
| 413 |
+
parser.add_argument('--dynamic_sample', action='store_true',
|
| 414 |
+
help='Enable dynamic sampling rate')
|
| 415 |
+
|
| 416 |
+
parser.add_argument('--batch_size', type=int, nargs="+", default=[5])
|
| 417 |
+
parser.add_argument('--num_workers', type=int, default=8)
|
| 418 |
+
return parser
|
| 419 |
+
|
| 420 |
+
if __name__ == "__main__":
|
| 421 |
+
import os
|
| 422 |
+
def lines(file_path):
|
| 423 |
+
with open(file_path, 'r') as file:
|
| 424 |
+
return sum(1 for line in file)
|
| 425 |
+
train_folder ='./kinetics-dataset/datapath'
|
| 426 |
+
lengths_dict = {file_name: lines(os.path.join(train_folder, file_name)) for file_name in os.listdir(train_folder)}
|
| 427 |
+
lengths = []
|
| 428 |
+
for k, v in lengths_dict.items():
|
| 429 |
+
lengths += [k] * min(v, 50) #(v % 7)
|
| 430 |
+
world_size = 4
|
| 431 |
+
sampler = []
|
| 432 |
+
batch_size = 10
|
| 433 |
+
for rank in range(world_size):
|
| 434 |
+
sampler.append(LengthGroupedSampler(
|
| 435 |
+
batch_size=batch_size,
|
| 436 |
+
world_size=world_size,
|
| 437 |
+
gradient_accumulation_size=1,
|
| 438 |
+
initial_global_step=0,
|
| 439 |
+
lengths=lengths,
|
| 440 |
+
group_data=True,
|
| 441 |
+
rank = rank
|
| 442 |
+
))
|
| 443 |
+
|
| 444 |
+
|
| 445 |
+
with open('./sampler.txt', 'w') as f:
|
| 446 |
+
for epoch in range(5):
|
| 447 |
+
rank_idx = {}
|
| 448 |
+
bk = []
|
| 449 |
+
print(f'epoch -------------------------------------- {epoch} ----------------------------------------------------', file=f)
|
| 450 |
+
for rank in range(world_size):
|
| 451 |
+
sl = sampler[rank]
|
| 452 |
+
sl.set_epoch(epoch)
|
| 453 |
+
for i in iter(sl):
|
| 454 |
+
bk.append(i)
|
| 455 |
+
if len(bk) == batch_size:
|
| 456 |
+
rank_idx.setdefault(f'rank_{rank}', [])
|
| 457 |
+
rank_idx[f'rank_{rank}'].append(bk)
|
| 458 |
+
bk = []
|
| 459 |
+
for num in range(5):
|
| 460 |
+
print('*'*5 + f'steps {num}' + '*'*5, file=f)
|
| 461 |
+
for rank, bk in rank_idx.items():
|
| 462 |
+
print(f'rank {rank}: {bk[num]}', file=f)
|
| 463 |
+
print([lengths[i] for i in bk[num]], file=f)
|
| 464 |
+
|
| 465 |
+
|
| 466 |
+
exit()
|
Abnormal-CT-Generation-Healthy/LeanVAE/models/__pycache__/autoencoder.cpython-310.pyc
ADDED
|
Binary file (6.12 kB). View file
|
|
|
Abnormal-CT-Generation-Healthy/LeanVAE/models/__pycache__/autoencoder.cpython-311.pyc
ADDED
|
Binary file (11.7 kB). View file
|
|
|
Abnormal-CT-Generation-Healthy/LeanVAE/models/__pycache__/autoencoder.cpython-39.pyc
ADDED
|
Binary file (6.14 kB). View file
|
|
|
Abnormal-CT-Generation-Healthy/LeanVAE/models/__pycache__/autoencoder_pl.cpython-310.pyc
ADDED
|
Binary file (7.39 kB). View file
|
|
|
Abnormal-CT-Generation-Healthy/LeanVAE/models/__pycache__/autoencoder_pl.cpython-311.pyc
ADDED
|
Binary file (15.4 kB). View file
|
|
|
Abnormal-CT-Generation-Healthy/LeanVAE/models/__pycache__/autoencoder_pl.cpython-39.pyc
ADDED
|
Binary file (7.35 kB). View file
|
|
|
Abnormal-CT-Generation-Healthy/LeanVAE/models/autoencoder.py
ADDED
|
@@ -0,0 +1,186 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import argparse
|
| 2 |
+
import torch
|
| 3 |
+
import torch.distributed
|
| 4 |
+
import torch.nn as nn
|
| 5 |
+
import torch.nn.functional as F
|
| 6 |
+
from einops import rearrange
|
| 7 |
+
from ..modules import DiagonalGaussianDistribution, Encoder_Arch, Decoder_Arch, ISTA
|
| 8 |
+
from ..utils.patcher_utils import Patcher, UnPatcher
|
| 9 |
+
|
| 10 |
+
class LeanVAE(nn.Module):
|
| 11 |
+
def __init__(self, args):
|
| 12 |
+
super().__init__()
|
| 13 |
+
self.args = args
|
| 14 |
+
self.embedding_dim = args.embedding_dim
|
| 15 |
+
|
| 16 |
+
self.latent_bottleneck = ISTA(points_num=args.embedding_dim, out_num=args.latent_dim, iter_num=args.ista_iter_num, layer_num=args.ista_layer_num)
|
| 17 |
+
|
| 18 |
+
self.dwt = Patcher()
|
| 19 |
+
self.idwt = UnPatcher()
|
| 20 |
+
|
| 21 |
+
self.encoder = Encoder_Arch(l_dim = args.l_dim, h_dim = args.h_dim, sep_num_layer = args.sep_num_layer, fusion_num_layer = args.fusion_num_layer)
|
| 22 |
+
self.decoder = Decoder_Arch(l_dim = args.l_dim, h_dim = args.h_dim, sep_num_layer = args.sep_num_layer, fusion_num_layer = args.fusion_num_layer)
|
| 23 |
+
|
| 24 |
+
self.std_layer = nn.Linear(args.embedding_dim, args.latent_dim)
|
| 25 |
+
|
| 26 |
+
self.tile_inference = False
|
| 27 |
+
self.chunksize_enc = args.chunksize_enc if hasattr(args, 'chunksize_enc') and args.chunksize_enc else 5
|
| 28 |
+
self.chunksize_dec = args.chunksize_dec if hasattr(args, 'chunksize_dec') and args.chunksize_dec else 5
|
| 29 |
+
if args.use_tile_inference:
|
| 30 |
+
self.set_tile_inference(True)
|
| 31 |
+
else:
|
| 32 |
+
self.set_tile_inference(False)
|
| 33 |
+
|
| 34 |
+
def _set_first_chunk(self, is_first_chunk=True):
|
| 35 |
+
for module in self.modules():
|
| 36 |
+
if hasattr(module, 'is_first_chunk'):
|
| 37 |
+
module.is_first_chunk = is_first_chunk
|
| 38 |
+
|
| 39 |
+
def set_tile_inference(self, tile_inference=False):
|
| 40 |
+
for module in self.modules():
|
| 41 |
+
if hasattr(module, 'tile_inference'):
|
| 42 |
+
module.tile_inference = tile_inference
|
| 43 |
+
|
| 44 |
+
def _build_chunk_index(self, T = 17, mtype = 'enc'):
|
| 45 |
+
start_end = []
|
| 46 |
+
if mtype == 'enc':
|
| 47 |
+
chunksize = self.chunksize_enc
|
| 48 |
+
else:
|
| 49 |
+
chunksize = self.chunksize_dec
|
| 50 |
+
if T >= chunksize :
|
| 51 |
+
start_end.append((0, chunksize))
|
| 52 |
+
start_idx = chunksize
|
| 53 |
+
else:
|
| 54 |
+
assert T < chunksize
|
| 55 |
+
|
| 56 |
+
for i in range(start_idx, T, chunksize-1):
|
| 57 |
+
end_idx = min(i + chunksize -1, T)
|
| 58 |
+
start_end.append((i, end_idx))
|
| 59 |
+
return start_end
|
| 60 |
+
|
| 61 |
+
def encode(self, x):
|
| 62 |
+
ndim = x.ndim
|
| 63 |
+
if ndim == 4:
|
| 64 |
+
x = x.unsqueeze(2)
|
| 65 |
+
self.set_tile_inference(False)
|
| 66 |
+
|
| 67 |
+
if self.tile_inference:
|
| 68 |
+
z = []
|
| 69 |
+
chunk_indexs = self._build_chunk_index(T=x.shape[2], mtype='enc')
|
| 70 |
+
for idx, (start, end) in enumerate(chunk_indexs):
|
| 71 |
+
if idx == 0:
|
| 72 |
+
self._set_first_chunk(True)
|
| 73 |
+
else:
|
| 74 |
+
self._set_first_chunk(False)
|
| 75 |
+
|
| 76 |
+
x_dwt = self.dwt(x[:, :, start:end])
|
| 77 |
+
p = self.encoder.encode(x=x_dwt)
|
| 78 |
+
z.append(self.latent_bottleneck.sample(p))
|
| 79 |
+
z = torch.cat(z, dim = 1)
|
| 80 |
+
else:
|
| 81 |
+
x_dwt = self.dwt(x)
|
| 82 |
+
p = self.encoder.encode(x=x_dwt)
|
| 83 |
+
z = self.latent_bottleneck.sample(p)
|
| 84 |
+
|
| 85 |
+
z = rearrange(z, 'b t h w d -> b d t h w')
|
| 86 |
+
return z
|
| 87 |
+
|
| 88 |
+
def decode(self, z, is_image = False):
|
| 89 |
+
z = rearrange(z, 'b d t h w -> b t h w d')
|
| 90 |
+
if is_image:
|
| 91 |
+
self.set_tile_inference(False)
|
| 92 |
+
if self.tile_inference:
|
| 93 |
+
x_recon = []
|
| 94 |
+
chunk_indexs = self._build_chunk_index(T=z.shape[1], mtype='dec')
|
| 95 |
+
for idx, (start, end) in enumerate(chunk_indexs):
|
| 96 |
+
if idx == 0:
|
| 97 |
+
self._set_first_chunk(True)
|
| 98 |
+
else:
|
| 99 |
+
self._set_first_chunk(False)
|
| 100 |
+
p_rec = self.latent_bottleneck.recon(z[:, start:end])
|
| 101 |
+
x_dwt_rec = self.decoder.decode(p_rec, is_image=is_image)
|
| 102 |
+
|
| 103 |
+
x_recon.append(self.idwt(x=x_dwt_rec))
|
| 104 |
+
x_recon = torch.cat(x_recon, dim = 2)
|
| 105 |
+
else:
|
| 106 |
+
p_rec = self.latent_bottleneck.recon(z)
|
| 107 |
+
x_dwt_rec = self.decoder.decode(p_rec, is_image=is_image)
|
| 108 |
+
|
| 109 |
+
x_recon = self.idwt(x=x_dwt_rec)
|
| 110 |
+
|
| 111 |
+
return x_recon
|
| 112 |
+
|
| 113 |
+
|
| 114 |
+
|
| 115 |
+
@torch.no_grad()
|
| 116 |
+
def inference(self, x):
|
| 117 |
+
if x.ndim == 4 :
|
| 118 |
+
is_image = True
|
| 119 |
+
else:
|
| 120 |
+
is_image = False
|
| 121 |
+
assert x.shape[2] % 4 == 1, f"Expected frame_num % 4 == 1, but got {x.shape[2] % 4}"
|
| 122 |
+
|
| 123 |
+
z = self.encode(x)
|
| 124 |
+
x_recon = self.decode(z, is_image=is_image)
|
| 125 |
+
|
| 126 |
+
if is_image:
|
| 127 |
+
x = x.squeeze(2)
|
| 128 |
+
return x, x_recon
|
| 129 |
+
|
| 130 |
+
def forward(self, x, log_image=False):
|
| 131 |
+
x_dwt = self.dwt(x)
|
| 132 |
+
p = self.encoder(x=x_dwt)
|
| 133 |
+
z_mean = self.latent_bottleneck.sample(p)
|
| 134 |
+
z_std = self.std_layer(p)
|
| 135 |
+
|
| 136 |
+
posterior = DiagonalGaussianDistribution(parameters=(z_mean, z_std))
|
| 137 |
+
z = posterior.sample()
|
| 138 |
+
p_rec = self.latent_bottleneck.recon(z)
|
| 139 |
+
|
| 140 |
+
x_dwt_rec = self.decoder(p_rec) #b c t h w
|
| 141 |
+
|
| 142 |
+
|
| 143 |
+
x_recon = self.idwt(x=x_dwt_rec)
|
| 144 |
+
|
| 145 |
+
if log_image:
|
| 146 |
+
return x, x_recon
|
| 147 |
+
|
| 148 |
+
return x, x_recon, x_dwt, x_dwt_rec, posterior
|
| 149 |
+
|
| 150 |
+
|
| 151 |
+
@classmethod
|
| 152 |
+
def load_from_checkpoint(cls, ckpt_path, device="cpu", strict=False):
|
| 153 |
+
""" Load model from checkpoint, initializing args and state_dict """
|
| 154 |
+
checkpoint = torch.load(ckpt_path, map_location=device)
|
| 155 |
+
|
| 156 |
+
if "args" not in checkpoint:
|
| 157 |
+
raise ValueError("Checkpoint does not contain 'args'. Ensure the checkpoint is saved correctly.")
|
| 158 |
+
|
| 159 |
+
args = argparse.Namespace(**checkpoint["args"])
|
| 160 |
+
|
| 161 |
+
model = cls(args)
|
| 162 |
+
if "state_dict" in checkpoint:
|
| 163 |
+
msg = model.load_state_dict(checkpoint["state_dict"], strict=strict)
|
| 164 |
+
print(f"Successfully loaded weights from {ckpt_path}, {msg}")
|
| 165 |
+
return model
|
| 166 |
+
|
| 167 |
+
@staticmethod
|
| 168 |
+
def add_model_specific_args(parent_parser):
|
| 169 |
+
parser = argparse.ArgumentParser(parents=[parent_parser], add_help=False)
|
| 170 |
+
|
| 171 |
+
# Model architecture parameters
|
| 172 |
+
parser.add_argument("--embedding_dim", type=int, default=512, help="Dimension of the embedding space.")
|
| 173 |
+
parser.add_argument("--latent_dim", type=int, default=4, help="Dimension of the latent channel.")
|
| 174 |
+
parser.add_argument("--ista_iter_num", type=int, default=2, help="Number of iterations in ISTA latent bottleneck.")
|
| 175 |
+
parser.add_argument("--ista_layer_num", type=int, default=2, help="Number of layers in ISTA latent bottleneck.")
|
| 176 |
+
|
| 177 |
+
parser.add_argument("--l_dim", type=int, default=128)
|
| 178 |
+
parser.add_argument("--h_dim", type=int, default=384)
|
| 179 |
+
parser.add_argument("--sep_num_layer", type=int, default=2, help="Number of separate processing layers in encoder/decoder.")
|
| 180 |
+
parser.add_argument("--fusion_num_layer", type=int, default=4, help="Number of fusion layers in encoder/decoder.")
|
| 181 |
+
|
| 182 |
+
# Tiling inference (for memory-efficient processing)
|
| 183 |
+
parser.add_argument("--use_tile_inference", action="store_true", help="Enable tiling inference to process video in chunks.")
|
| 184 |
+
parser.add_argument("--chunksize_enc", type=int, default=9, help="Number of frames per chunk during tiled encoding.")
|
| 185 |
+
parser.add_argument("--chunksize_dec", type=int, default=5, help="Number of frames per chunk during tiled decoding.")
|
| 186 |
+
return parser
|
Abnormal-CT-Generation-Healthy/LeanVAE/models/autoencoder_pl.py
ADDED
|
@@ -0,0 +1,232 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import argparse
|
| 2 |
+
import numpy as np
|
| 3 |
+
from PIL import Image
|
| 4 |
+
import pytorch_lightning as pl
|
| 5 |
+
import torch
|
| 6 |
+
import torch.distributed
|
| 7 |
+
import torch.nn as nn
|
| 8 |
+
import torch.nn.functional as F
|
| 9 |
+
from timm.scheduler.cosine_lr import CosineLRScheduler
|
| 10 |
+
from timm.models.layers import trunc_normal_
|
| 11 |
+
from .autoencoder import LeanVAE
|
| 12 |
+
from ..modules import LPIPS
|
| 13 |
+
from ..utils.gan_loss import AdversarialLoss
|
| 14 |
+
|
| 15 |
+
class AutoEncoderEngine(pl.LightningModule):
|
| 16 |
+
def __init__(self, args, data):
|
| 17 |
+
super().__init__()
|
| 18 |
+
self.args = args
|
| 19 |
+
self.video_data = data
|
| 20 |
+
|
| 21 |
+
self.autoencoder = LeanVAE(args=args)
|
| 22 |
+
|
| 23 |
+
self.automatic_optimization = False
|
| 24 |
+
self.kl_weight = args.kl_weight
|
| 25 |
+
self.discriminator_iter_start = args.discriminator_iter_start
|
| 26 |
+
|
| 27 |
+
self.perceptual_weight = args.perceptual_weight
|
| 28 |
+
self.l1_weight = args.l1_weight
|
| 29 |
+
|
| 30 |
+
self.automatic_optimization = False
|
| 31 |
+
self.grad_clip_val = args.grad_clip_val
|
| 32 |
+
|
| 33 |
+
if not hasattr(args, "grad_clip_val_disc"):
|
| 34 |
+
args.grad_clip_val_disc = 1.0
|
| 35 |
+
|
| 36 |
+
self.grad_clip_val_disc = args.grad_clip_val_disc
|
| 37 |
+
|
| 38 |
+
self.apply(self._init_weights)
|
| 39 |
+
self.perceptual_model = LPIPS().eval()
|
| 40 |
+
self.perceptual_model.requires_grad_(False)
|
| 41 |
+
self.gan_loss = AdversarialLoss(disc_weight=args.disc_weight)
|
| 42 |
+
self.save_hyperparameters()
|
| 43 |
+
|
| 44 |
+
def _init_weights(self, m):
|
| 45 |
+
if isinstance(m, nn.Linear):
|
| 46 |
+
trunc_normal_(m.weight, std=.02)
|
| 47 |
+
if isinstance(m, nn.Linear) and m.bias is not None:
|
| 48 |
+
nn.init.constant_(m.bias, 0)
|
| 49 |
+
elif isinstance(m, nn.LayerNorm):
|
| 50 |
+
if m.bias is not None:
|
| 51 |
+
nn.init.constant_(m.bias, 0)
|
| 52 |
+
if m.weight is not None:
|
| 53 |
+
nn.init.constant_(m.weight, 1.0)
|
| 54 |
+
|
| 55 |
+
elif isinstance(m, nn.Conv3d) or isinstance(m, nn.Conv2d):
|
| 56 |
+
nn.init.xavier_uniform_(m.weight)
|
| 57 |
+
nn.init.zeros_(m.bias)
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
def forward(self, x, optimizer_idx=None, x_recon = None, log_image=False):
|
| 61 |
+
if log_image:
|
| 62 |
+
return self.autoencoder(x, log_image)
|
| 63 |
+
|
| 64 |
+
if optimizer_idx == 1:
|
| 65 |
+
discloss = self.gan_loss(inputs=x, reconstructions=x_recon, optimizer_idx=1)
|
| 66 |
+
self.log("train/discloss", discloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)
|
| 67 |
+
return discloss
|
| 68 |
+
|
| 69 |
+
elif optimizer_idx == 0:
|
| 70 |
+
assert x.ndim == 5
|
| 71 |
+
B, C, T, H, W = x.shape
|
| 72 |
+
x, x_recon, x_dwt, x_dwt_rec, posterior = self.autoencoder(x)
|
| 73 |
+
recon_loss = F.l1_loss(x_recon, x)* self.l1_weight
|
| 74 |
+
kl_loss = posterior.kl()
|
| 75 |
+
kl_loss = torch.sum(kl_loss) / kl_loss.shape[0] * self.kl_weight
|
| 76 |
+
|
| 77 |
+
g_loss = 0.0
|
| 78 |
+
if self.global_step >= self.discriminator_iter_start:
|
| 79 |
+
g_loss = self.gan_loss(x, x_recon, optimizer_idx=0)
|
| 80 |
+
self.log("train/g_loss", g_loss, prog_bar=True, logger=True, on_step=True, on_epoch=True)
|
| 81 |
+
|
| 82 |
+
recon_loss_low = (F.l1_loss(x_dwt_rec[0][:, :3], x_dwt[0][:, :3]) + F.l1_loss(x_dwt_rec[1][:, :3], x_dwt[1][:, :3])) * self.l1_weight * 0.05
|
| 83 |
+
recon_loss_high = (F.l1_loss(x_dwt_rec[0][:, 3:], x_dwt[0][:, 3:])+ F.l1_loss(x_dwt_rec[1][:, 3:], x_dwt[1][:, 3:])) * self.l1_weight * 0.1
|
| 84 |
+
|
| 85 |
+
k = 4
|
| 86 |
+
valid_start_indices = torch.tensor([x for x in range(T - k + 1) if x % 4 == 1])
|
| 87 |
+
start_idx = valid_start_indices[torch.randint(0, len(valid_start_indices), (B,))]
|
| 88 |
+
frame_idx = start_idx.unsqueeze(1) + torch.arange(k)
|
| 89 |
+
frame_idx = torch.cat((torch.zeros((B, 1), dtype=torch.int), frame_idx), dim=1).to(self.device)
|
| 90 |
+
|
| 91 |
+
frame_idx_selected = frame_idx.reshape(-1, 1, k+1, 1, 1).repeat(1, C, 1, H, W)
|
| 92 |
+
frames = torch.gather(x, 2, frame_idx_selected)
|
| 93 |
+
frames_recon = torch.gather(x_recon, 2, frame_idx_selected)
|
| 94 |
+
frames = frames.permute(0, 2, 1, 3, 4).contiguous().view(-1, 3, H, W)
|
| 95 |
+
frames_recon = frames_recon.permute(0, 2, 1, 3, 4).contiguous().view(-1, 3, H, W)
|
| 96 |
+
perceptual_loss = self.perceptual_model(frames, frames_recon).mean() * self.perceptual_weight
|
| 97 |
+
|
| 98 |
+
self.log("train/recon_loss", recon_loss, prog_bar=True, logger=True, on_step=True, on_epoch=True)
|
| 99 |
+
self.log("train/kl_loss", kl_loss, prog_bar=True, logger=True, on_step=True, on_epoch=True)
|
| 100 |
+
self.log("train/recon_loss_low", recon_loss_low, prog_bar=True, logger=True, on_step=True, on_epoch=True)
|
| 101 |
+
self.log("train/recon_loss_high", recon_loss_high, prog_bar=True, logger=True, on_step=True, on_epoch=True)
|
| 102 |
+
self.log("train/perceptual_loss", perceptual_loss, prog_bar=True, logger=True, on_step=True, on_epoch=True)
|
| 103 |
+
return perceptual_loss + recon_loss + recon_loss_low + recon_loss_high + kl_loss + g_loss, x_recon
|
| 104 |
+
|
| 105 |
+
return perceptual_loss, recon_loss, kl_loss
|
| 106 |
+
|
| 107 |
+
|
| 108 |
+
def training_step(self, batch, batch_idx):
|
| 109 |
+
|
| 110 |
+
x = batch[0]['video']
|
| 111 |
+
cur_global_step = self.global_step
|
| 112 |
+
|
| 113 |
+
sch1, sch2 = self.lr_schedulers()
|
| 114 |
+
opt1, opt2 = self.optimizers()
|
| 115 |
+
|
| 116 |
+
cur_global_step = self.global_step
|
| 117 |
+
|
| 118 |
+
self.toggle_optimizer(opt1, optimizer_idx=0)
|
| 119 |
+
loss_generator, x_recon = self.forward(x, optimizer_idx=0)
|
| 120 |
+
opt1.zero_grad()
|
| 121 |
+
self.manual_backward(loss_generator)
|
| 122 |
+
if self.grad_clip_val is not None:
|
| 123 |
+
self.clip_gradients(opt1, gradient_clip_val=self.grad_clip_val)
|
| 124 |
+
opt1.step()
|
| 125 |
+
sch1.step(cur_global_step)
|
| 126 |
+
self.untoggle_optimizer(optimizer_idx=0)
|
| 127 |
+
|
| 128 |
+
if cur_global_step > self.discriminator_iter_start:
|
| 129 |
+
self.toggle_optimizer(opt2, optimizer_idx=1)
|
| 130 |
+
loss_discriminator = self.forward(x, optimizer_idx=1, x_recon=x_recon)
|
| 131 |
+
|
| 132 |
+
opt2.zero_grad()
|
| 133 |
+
self.manual_backward(loss_discriminator)
|
| 134 |
+
|
| 135 |
+
if self.grad_clip_val_disc is not None:
|
| 136 |
+
self.clip_gradients(opt2, gradient_clip_val=self.grad_clip_val_disc)
|
| 137 |
+
opt2.step()
|
| 138 |
+
sch2.step(cur_global_step)
|
| 139 |
+
self.untoggle_optimizer(optimizer_idx=1)
|
| 140 |
+
|
| 141 |
+
|
| 142 |
+
def validation_step(self, batch, batch_idx):
|
| 143 |
+
x = batch['video']
|
| 144 |
+
perceptual_loss, recon_loss, kl_loss = self.forward(x)
|
| 145 |
+
self.log('val/recon_loss', recon_loss, prog_bar=True)
|
| 146 |
+
self.log('val/perceptual_loss', perceptual_loss, prog_bar=True)
|
| 147 |
+
self.log("val/kl_loss", kl_loss, prog_bar=True)
|
| 148 |
+
|
| 149 |
+
def train_dataloader(self):
|
| 150 |
+
dataloaders = self.video_data._dataloader(train=True)
|
| 151 |
+
return dataloaders
|
| 152 |
+
|
| 153 |
+
def val_dataloader(self):
|
| 154 |
+
return self.video_data._dataloader(False)[0]
|
| 155 |
+
|
| 156 |
+
def configure_optimizers(self):
|
| 157 |
+
opt_ae = torch.optim.Adam(self.autoencoder.parameters(),
|
| 158 |
+
lr=self.args.lr, betas=(0.5, 0.9))
|
| 159 |
+
|
| 160 |
+
opt_disc = torch.optim.Adam(
|
| 161 |
+
self.gan_loss.get_trainable_parameters(),
|
| 162 |
+
lr=self.args.lr_min, betas=(0.5, 0.9))
|
| 163 |
+
|
| 164 |
+
lr_min = self.args.lr_min
|
| 165 |
+
train_iters = self.args.max_steps - self.discriminator_iter_start
|
| 166 |
+
warmup_steps = self.args.warmup_steps
|
| 167 |
+
warmup_lr_init = self.args.warmup_lr_init
|
| 168 |
+
|
| 169 |
+
|
| 170 |
+
sch_ae = CosineLRScheduler(
|
| 171 |
+
opt_ae,
|
| 172 |
+
lr_min = lr_min,
|
| 173 |
+
t_initial = train_iters,
|
| 174 |
+
warmup_lr_init=warmup_lr_init,
|
| 175 |
+
warmup_t=warmup_steps,
|
| 176 |
+
cycle_mul = 1.,
|
| 177 |
+
cycle_limit=1,
|
| 178 |
+
t_in_epochs=True,
|
| 179 |
+
)
|
| 180 |
+
|
| 181 |
+
sch_disc = CosineLRScheduler(
|
| 182 |
+
opt_disc,
|
| 183 |
+
lr_min = lr_min ,
|
| 184 |
+
t_initial = train_iters,
|
| 185 |
+
warmup_lr_init=warmup_lr_init,
|
| 186 |
+
warmup_t= self.args.dis_warmup_steps,
|
| 187 |
+
cycle_mul = 1.,
|
| 188 |
+
cycle_limit=1,
|
| 189 |
+
t_in_epochs=True,
|
| 190 |
+
)
|
| 191 |
+
|
| 192 |
+
|
| 193 |
+
return [opt_ae, opt_disc], [{"scheduler": sch_ae, "interval": "step"}, {"scheduler": sch_disc, "interval": "step"}]
|
| 194 |
+
|
| 195 |
+
|
| 196 |
+
|
| 197 |
+
def log_videos(self, batch, **kwargs):
|
| 198 |
+
log = dict()
|
| 199 |
+
if isinstance(batch, list):
|
| 200 |
+
batch = batch[0]
|
| 201 |
+
x = batch['video']
|
| 202 |
+
x, x_rec = self(x, log_image=True)
|
| 203 |
+
log["inputs"] = x
|
| 204 |
+
log["reconstructions"] = x_rec
|
| 205 |
+
return log
|
| 206 |
+
|
| 207 |
+
@staticmethod
|
| 208 |
+
def add_model_specific_args(parent_parser):
|
| 209 |
+
parser = argparse.ArgumentParser(parents=[parent_parser], add_help=False)
|
| 210 |
+
|
| 211 |
+
# training configurations
|
| 212 |
+
parser.add_argument('--lr', type=float, default=5e-5)
|
| 213 |
+
parser.add_argument('--lr_min', type=float, default=1e-5)
|
| 214 |
+
parser.add_argument('--warmup_steps', type=int, default=5000)
|
| 215 |
+
parser.add_argument('--warmup_lr_init', type=float, default=0.)
|
| 216 |
+
parser.add_argument('--grad_clip_val', type=float, default=1.0)
|
| 217 |
+
parser.add_argument('--grad_clip_val_disc', type=float, default=1.0)
|
| 218 |
+
|
| 219 |
+
|
| 220 |
+
parser.add_argument('--kl_weight', type=float, default=1e-7)
|
| 221 |
+
parser.add_argument('--perceptual_weight', type=float, default=4.)
|
| 222 |
+
parser.add_argument('--l1_weight', type=float, default=4.)
|
| 223 |
+
parser.add_argument('--disc_weight', type=float, default=0.2)
|
| 224 |
+
|
| 225 |
+
# configuration for discriminator
|
| 226 |
+
parser.add_argument('--dis_warmup_steps', type=int, default=0)
|
| 227 |
+
parser.add_argument('--discriminator_iter_start', type=int, default=0)
|
| 228 |
+
parser.add_argument('--dis_lr_multiplier', type=float, default=1.)
|
| 229 |
+
|
| 230 |
+
return parser
|
| 231 |
+
|
| 232 |
+
|
Abnormal-CT-Generation-Healthy/LeanVAE/modules/__init__.py
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .lpips import LPIPS
|
| 2 |
+
from .backbones import *
|
| 3 |
+
from .vae import DiagonalGaussianDistribution
|
Abnormal-CT-Generation-Healthy/LeanVAE/modules/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (321 Bytes). View file
|
|
|
Abnormal-CT-Generation-Healthy/LeanVAE/modules/__pycache__/__init__.cpython-311.pyc
ADDED
|
Binary file (355 Bytes). View file
|
|
|
Abnormal-CT-Generation-Healthy/LeanVAE/modules/__pycache__/__init__.cpython-39.pyc
ADDED
|
Binary file (319 Bytes). View file
|
|
|
Abnormal-CT-Generation-Healthy/LeanVAE/modules/__pycache__/backbones.cpython-310.pyc
ADDED
|
Binary file (11.6 kB). View file
|
|
|
Abnormal-CT-Generation-Healthy/LeanVAE/modules/__pycache__/backbones.cpython-311.pyc
ADDED
|
Binary file (24.6 kB). View file
|
|
|
Abnormal-CT-Generation-Healthy/LeanVAE/modules/__pycache__/backbones.cpython-39.pyc
ADDED
|
Binary file (12.1 kB). View file
|
|
|
Abnormal-CT-Generation-Healthy/LeanVAE/modules/__pycache__/discriminator.cpython-310.pyc
ADDED
|
Binary file (4.08 kB). View file
|
|
|
Abnormal-CT-Generation-Healthy/LeanVAE/modules/__pycache__/discriminator.cpython-311.pyc
ADDED
|
Binary file (8.23 kB). View file
|
|
|
Abnormal-CT-Generation-Healthy/LeanVAE/modules/__pycache__/discriminator.cpython-39.pyc
ADDED
|
Binary file (4.07 kB). View file
|
|
|
Abnormal-CT-Generation-Healthy/LeanVAE/modules/__pycache__/lpips.cpython-310.pyc
ADDED
|
Binary file (9.14 kB). View file
|
|
|
Abnormal-CT-Generation-Healthy/LeanVAE/modules/__pycache__/lpips.cpython-311.pyc
ADDED
|
Binary file (19.2 kB). View file
|
|
|
Abnormal-CT-Generation-Healthy/LeanVAE/modules/__pycache__/lpips.cpython-39.pyc
ADDED
|
Binary file (9.07 kB). View file
|
|
|
Abnormal-CT-Generation-Healthy/LeanVAE/modules/__pycache__/vae.cpython-310.pyc
ADDED
|
Binary file (2.84 kB). View file
|
|
|
Abnormal-CT-Generation-Healthy/LeanVAE/modules/__pycache__/vae.cpython-311.pyc
ADDED
|
Binary file (4.92 kB). View file
|
|
|
Abnormal-CT-Generation-Healthy/LeanVAE/modules/__pycache__/vae.cpython-39.pyc
ADDED
|
Binary file (2.82 kB). View file
|
|
|
Abnormal-CT-Generation-Healthy/LeanVAE/modules/backbones.py
ADDED
|
@@ -0,0 +1,402 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn as nn
|
| 3 |
+
import torch.nn.functional as F
|
| 4 |
+
from beartype import beartype
|
| 5 |
+
from typing import Tuple
|
| 6 |
+
from einops import rearrange, repeat
|
| 7 |
+
from einops.layers.torch import Rearrange
|
| 8 |
+
import numpy as np
|
| 9 |
+
|
| 10 |
+
def exists(val):
|
| 11 |
+
return val is not None
|
| 12 |
+
|
| 13 |
+
def default(val, d):
|
| 14 |
+
if exists(val):
|
| 15 |
+
return val
|
| 16 |
+
return d() if callable(d) else d
|
| 17 |
+
|
| 18 |
+
class PEG3D(nn.Module):
|
| 19 |
+
def __init__(
|
| 20 |
+
self,
|
| 21 |
+
dim
|
| 22 |
+
):
|
| 23 |
+
super().__init__()
|
| 24 |
+
self.ds_conv = nn.Conv3d(in_channels=dim, out_channels=dim, kernel_size=(3,3,3), groups = dim)
|
| 25 |
+
self.is_first_chunk = True
|
| 26 |
+
self.causal_cached = None
|
| 27 |
+
self.tile_inference = False
|
| 28 |
+
|
| 29 |
+
def forward(self, x):
|
| 30 |
+
x = rearrange(x, 'b t h w d -> b d t h w')
|
| 31 |
+
if self.tile_inference:
|
| 32 |
+
if self.is_first_chunk:
|
| 33 |
+
x = F.pad(x, (1, 1, 1, 1, 2, 0), value=0.)
|
| 34 |
+
else:
|
| 35 |
+
x = F.pad(x, (1, 1, 1, 1, 0, 0), value=0.)
|
| 36 |
+
x = torch.concatenate((self.causal_cached, x), dim=2)
|
| 37 |
+
|
| 38 |
+
self.causal_cached = x[:, :, -2:].clone()
|
| 39 |
+
else:
|
| 40 |
+
x = F.pad(x, (1, 1, 1, 1, 2, 0), value=0.)
|
| 41 |
+
x = self.ds_conv(x.contiguous())
|
| 42 |
+
x = rearrange(x, 'b d t h w -> b t h w d')
|
| 43 |
+
return x
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
class GEGLU(nn.Module):
|
| 47 |
+
def forward(self, x):
|
| 48 |
+
x, gate = x.chunk(2, dim=-1)
|
| 49 |
+
return F.gelu(gate) * x
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
def ffd(dim, mult=4, dropout=0.):
|
| 53 |
+
inner_dim = int(mult * (2 / 3) * dim)
|
| 54 |
+
return nn.Sequential(
|
| 55 |
+
nn.LayerNorm(dim),
|
| 56 |
+
nn.Linear(dim, inner_dim * 2, bias=False),
|
| 57 |
+
GEGLU(),
|
| 58 |
+
nn.Dropout(dropout),
|
| 59 |
+
nn.Linear(inner_dim, dim, bias=False)
|
| 60 |
+
)
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
class NAF(nn.Module):
|
| 64 |
+
def __init__(self,
|
| 65 |
+
num_layer,
|
| 66 |
+
dim,
|
| 67 |
+
):
|
| 68 |
+
super(NAF, self).__init__()
|
| 69 |
+
self.num_layer = num_layer
|
| 70 |
+
self.dconv_layer = nn.Sequential()
|
| 71 |
+
self.ffd_layer = nn.Sequential()
|
| 72 |
+
for _ in range(num_layer):
|
| 73 |
+
self.ffd_layer.append(ffd(dim, 4))
|
| 74 |
+
self.dconv_layer.append(PEG3D(dim))
|
| 75 |
+
|
| 76 |
+
def forward(self, x):
|
| 77 |
+
for i in range(self.num_layer):
|
| 78 |
+
x = self.dconv_layer[i](x)
|
| 79 |
+
x = self.ffd_layer[i](x)
|
| 80 |
+
return x
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
class ResNAF(nn.Module):
|
| 84 |
+
def __init__(self,
|
| 85 |
+
num_layer,
|
| 86 |
+
dim,
|
| 87 |
+
):
|
| 88 |
+
super(ResNAF, self).__init__()
|
| 89 |
+
self.num_layer = num_layer
|
| 90 |
+
self.dconv_layer = nn.Sequential()
|
| 91 |
+
self.ffd_layer = nn.Sequential()
|
| 92 |
+
for _ in range(num_layer):
|
| 93 |
+
self.ffd_layer.append(ffd(dim, 4))
|
| 94 |
+
self.dconv_layer.append(PEG3D(dim))
|
| 95 |
+
|
| 96 |
+
def forward(self, x):
|
| 97 |
+
for i in range(self.num_layer):
|
| 98 |
+
x = x + self.dconv_layer[i](x)
|
| 99 |
+
x = x + self.ffd_layer[i](x)
|
| 100 |
+
return x
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
class Encoder_Arch(nn.Module):
|
| 104 |
+
def __init__(self,
|
| 105 |
+
l_dim = 128,
|
| 106 |
+
h_dim = 384,
|
| 107 |
+
sep_num_layer = 2,
|
| 108 |
+
fusion_num_layer = 4,
|
| 109 |
+
patch_size = (2,4,4),
|
| 110 |
+
in_channel = 3
|
| 111 |
+
):
|
| 112 |
+
super(Encoder_Arch, self).__init__()
|
| 113 |
+
|
| 114 |
+
self.is_first_chunk = True
|
| 115 |
+
self.tile_inference = False
|
| 116 |
+
|
| 117 |
+
self.in_channel = in_channel
|
| 118 |
+
|
| 119 |
+
self._build_linear_patch(in_channel=in_channel, out_channel_low=l_dim, out_channel_high=h_dim, pt=patch_size[0], ph=patch_size[1], pw=patch_size[2])
|
| 120 |
+
|
| 121 |
+
self.low_layer = ResNAF(num_layer=sep_num_layer, dim=l_dim)
|
| 122 |
+
self.high_layer = ResNAF(num_layer=sep_num_layer, dim=h_dim)
|
| 123 |
+
self.fusion_layer = ResNAF(num_layer=fusion_num_layer, dim=l_dim + h_dim)
|
| 124 |
+
|
| 125 |
+
def _build_linear_patch(self, in_channel = 3, out_channel_low = 128, out_channel_high = 384, pt = 2, ph = 4, pw = 4):
|
| 126 |
+
patch_config = {
|
| 127 |
+
'video_low': (pt, ph, pw),
|
| 128 |
+
'video_high': (pt, ph, pw),
|
| 129 |
+
'image_low': (1, ph, pw),
|
| 130 |
+
'image_high': (1, ph, pw)
|
| 131 |
+
}
|
| 132 |
+
|
| 133 |
+
for name, (t, h, w) in patch_config.items():
|
| 134 |
+
if 'low' in name:
|
| 135 |
+
in_dim = in_channel * t * h * w
|
| 136 |
+
out_dim = out_channel_low
|
| 137 |
+
else:
|
| 138 |
+
out_dim = out_channel_high
|
| 139 |
+
in_dim = in_channel * t * h * w * 7 if 'video' in name else in_channel * t * h * w * 3
|
| 140 |
+
proj = nn.Sequential(
|
| 141 |
+
Rearrange(f'b c (nt {t}) (nh {h}) (nw {w}) -> b nt nh nw (c {t} {h} {w})' if 'video' in name else f'b c (nh {h}) (nw {w}) -> b 1 nh nw (c {h} {w})'),
|
| 142 |
+
nn.Linear(in_dim, out_dim)
|
| 143 |
+
)
|
| 144 |
+
self.add_module(f"{name}_proj", proj)
|
| 145 |
+
|
| 146 |
+
|
| 147 |
+
def _linear_patch(self, x, proj_type):
|
| 148 |
+
low_comp, high_comp = x[:, :self.in_channel], x[:, self.in_channel:]
|
| 149 |
+
return getattr(self, f"{proj_type}_low_proj")(low_comp), getattr(self, f"{proj_type}_high_proj")(high_comp)
|
| 150 |
+
|
| 151 |
+
def forward(self, x):
|
| 152 |
+
xi, xv = x
|
| 153 |
+
xi_low, xi_high = self._linear_patch(xi, 'image')
|
| 154 |
+
xv_low, xv_high = self._linear_patch(xv, 'video')
|
| 155 |
+
|
| 156 |
+
low_x = torch.cat([xi_low, xv_low], dim=1)
|
| 157 |
+
high_x = torch.cat([xi_high, xv_high], dim=1)
|
| 158 |
+
|
| 159 |
+
high_x = self.high_layer(high_x)
|
| 160 |
+
low_x = self.low_layer(low_x)
|
| 161 |
+
x = torch.cat([low_x, high_x], dim=-1)
|
| 162 |
+
x = self.fusion_layer(x)
|
| 163 |
+
return x
|
| 164 |
+
|
| 165 |
+
|
| 166 |
+
|
| 167 |
+
def encode(self, x):
|
| 168 |
+
xi, xv = x
|
| 169 |
+
if xi is not None and xv is not None:
|
| 170 |
+
xi_low, xi_high = self._linear_patch(xi, 'image')
|
| 171 |
+
xv_low, xv_high = self._linear_patch(xv, 'video')
|
| 172 |
+
|
| 173 |
+
low_x = torch.cat([xi_low, xv_low], dim=1)
|
| 174 |
+
high_x = torch.cat([xi_high, xv_high], dim=1)
|
| 175 |
+
elif xi is not None:
|
| 176 |
+
low_x, high_x = self._linear_patch(xi, 'image')
|
| 177 |
+
elif xv is not None:
|
| 178 |
+
low_x, high_x = self._linear_patch(xv, 'video')
|
| 179 |
+
|
| 180 |
+
high_x = self.high_layer(high_x)
|
| 181 |
+
low_x = self.low_layer(low_x)
|
| 182 |
+
x = torch.cat([low_x, high_x], dim=-1)
|
| 183 |
+
x = self.fusion_layer(x)
|
| 184 |
+
return x
|
| 185 |
+
|
| 186 |
+
|
| 187 |
+
|
| 188 |
+
class Encoder_Arch(nn.Module):
|
| 189 |
+
def __init__(self,
|
| 190 |
+
l_dim = 128,
|
| 191 |
+
h_dim = 384,
|
| 192 |
+
sep_num_layer = 2,
|
| 193 |
+
fusion_num_layer = 4,
|
| 194 |
+
patch_size = (2,4,4),
|
| 195 |
+
in_channel = 3
|
| 196 |
+
):
|
| 197 |
+
super(Encoder_Arch, self).__init__()
|
| 198 |
+
|
| 199 |
+
self.is_first_chunk = True
|
| 200 |
+
self.tile_inference = False
|
| 201 |
+
|
| 202 |
+
self.in_channel = in_channel
|
| 203 |
+
|
| 204 |
+
self._build_linear_patch(in_channel=in_channel, out_channel_low=l_dim, out_channel_high=h_dim, pt=patch_size[0], ph=patch_size[1], pw=patch_size[2])
|
| 205 |
+
|
| 206 |
+
self.low_layer = ResNAF(num_layer=sep_num_layer, dim=l_dim)
|
| 207 |
+
self.high_layer = ResNAF(num_layer=sep_num_layer, dim=h_dim)
|
| 208 |
+
self.fusion_layer = ResNAF(num_layer=fusion_num_layer, dim=l_dim + h_dim)
|
| 209 |
+
|
| 210 |
+
def _build_linear_patch(self, in_channel = 3, out_channel_low = 128, out_channel_high = 384, pt = 2, ph = 4, pw = 4):
|
| 211 |
+
patch_config = {
|
| 212 |
+
'video_low': (pt, ph, pw),
|
| 213 |
+
'video_high': (pt, ph, pw),
|
| 214 |
+
'image_low': (1, ph, pw),
|
| 215 |
+
'image_high': (1, ph, pw)
|
| 216 |
+
}
|
| 217 |
+
|
| 218 |
+
for name, (t, h, w) in patch_config.items():
|
| 219 |
+
if 'low' in name:
|
| 220 |
+
in_dim = in_channel * t * h * w
|
| 221 |
+
out_dim = out_channel_low
|
| 222 |
+
else:
|
| 223 |
+
out_dim = out_channel_high
|
| 224 |
+
in_dim = in_channel * t * h * w * 7 if 'video' in name else in_channel * t * h * w * 3
|
| 225 |
+
proj = nn.Sequential(
|
| 226 |
+
Rearrange('b c (nt pt) (nh ph) (nw pw) -> b nt nh nw (c pt ph pw)', pt=t, ph=h, pw=w),
|
| 227 |
+
nn.Linear(in_dim, out_dim)
|
| 228 |
+
)
|
| 229 |
+
self.add_module(f"{name}_proj", proj)
|
| 230 |
+
|
| 231 |
+
|
| 232 |
+
def _linear_patch(self, x, proj_type):
|
| 233 |
+
low_comp, high_comp = x[:, :self.in_channel], x[:, self.in_channel:]
|
| 234 |
+
return getattr(self, f"{proj_type}_low_proj")(low_comp), getattr(self, f"{proj_type}_high_proj")(high_comp)
|
| 235 |
+
|
| 236 |
+
def _feature_transform(self, low_x, high_x):
|
| 237 |
+
low_x = self.low_layer(low_x)
|
| 238 |
+
high_x = self.high_layer(high_x)
|
| 239 |
+
x = torch.cat([low_x, high_x], dim=-1)
|
| 240 |
+
x = self.fusion_layer(x)
|
| 241 |
+
return x
|
| 242 |
+
|
| 243 |
+
def forward(self, x):
|
| 244 |
+
xi, xv = x
|
| 245 |
+
xi_low, xi_high = self._linear_patch(x=xi, proj_type='image')
|
| 246 |
+
xv_low, xv_high = self._linear_patch(x=xv, proj_type='video')
|
| 247 |
+
|
| 248 |
+
low_x = torch.cat([xi_low, xv_low], dim=1)
|
| 249 |
+
high_x = torch.cat([xi_high, xv_high], dim=1)
|
| 250 |
+
|
| 251 |
+
return self._feature_transform(low_x=low_x, high_x=high_x)
|
| 252 |
+
|
| 253 |
+
|
| 254 |
+
|
| 255 |
+
def encode(self, x):
|
| 256 |
+
xi, xv = x
|
| 257 |
+
if xi is not None and xv is not None:
|
| 258 |
+
xi_low, xi_high = self._linear_patch(x=xi, proj_type='image')
|
| 259 |
+
xv_low, xv_high = self._linear_patch(x=xv, proj_type='video')
|
| 260 |
+
|
| 261 |
+
low_x = torch.cat([xi_low, xv_low], dim=1)
|
| 262 |
+
high_x = torch.cat([xi_high, xv_high], dim=1)
|
| 263 |
+
elif xi is not None:
|
| 264 |
+
low_x, high_x = self._linear_patch(x=xi, proj_type='image')
|
| 265 |
+
elif xv is not None:
|
| 266 |
+
low_x, high_x = self._linear_patch(x=xv, proj_type='video')
|
| 267 |
+
|
| 268 |
+
return self._feature_transform(low_x=low_x, high_x=high_x)
|
| 269 |
+
|
| 270 |
+
|
| 271 |
+
|
| 272 |
+
class Decoder_Arch(nn.Module):
|
| 273 |
+
def __init__(self,
|
| 274 |
+
l_dim = 128,
|
| 275 |
+
h_dim = 384,
|
| 276 |
+
sep_num_layer = 2,
|
| 277 |
+
fusion_num_layer = 4,
|
| 278 |
+
patch_size = (2,4,4),
|
| 279 |
+
in_channel = 3
|
| 280 |
+
):
|
| 281 |
+
super(Decoder_Arch, self).__init__()
|
| 282 |
+
|
| 283 |
+
self.l_dim = l_dim
|
| 284 |
+
self.is_first_chunk = True
|
| 285 |
+
self.tile_inference = False
|
| 286 |
+
|
| 287 |
+
self._build_linear_unpatch(in_channel=in_channel, out_channel_low=l_dim, out_channel_high=h_dim, pt=patch_size[0], ph=patch_size[1], pw=patch_size[2])
|
| 288 |
+
|
| 289 |
+
self.low_layer = ResNAF(num_layer=sep_num_layer, dim=l_dim)
|
| 290 |
+
self.high_layer = ResNAF(num_layer=sep_num_layer, dim=h_dim)
|
| 291 |
+
self.fusion_layer = ResNAF(num_layer=fusion_num_layer, dim=l_dim + h_dim)
|
| 292 |
+
|
| 293 |
+
|
| 294 |
+
def _build_linear_unpatch(self, in_channel = 3, out_channel_low = 128, out_channel_high = 384, pt = 2, ph = 4, pw = 4):
|
| 295 |
+
patch_config = {
|
| 296 |
+
'video_low': (pt, ph, pw),
|
| 297 |
+
'video_high': (pt, ph, pw),
|
| 298 |
+
'image_low': (1, ph, pw),
|
| 299 |
+
'image_high': (1, ph, pw)
|
| 300 |
+
}
|
| 301 |
+
|
| 302 |
+
for name, (t, h, w) in patch_config.items():
|
| 303 |
+
if 'low' in name:
|
| 304 |
+
out_dim = in_channel * t * h * w
|
| 305 |
+
in_dim = out_channel_low
|
| 306 |
+
else:
|
| 307 |
+
in_dim = out_channel_high
|
| 308 |
+
out_dim = in_channel * t * h * w * 7 if 'video' in name else in_channel * t * h * w * 3
|
| 309 |
+
proj = nn.Sequential(
|
| 310 |
+
nn.Linear(in_dim, out_dim),
|
| 311 |
+
Rearrange('b nt nh nw (c pt ph pw) -> b c (nt pt) (nh ph) (nw pw)', pt=t, ph=h, pw=w),
|
| 312 |
+
)
|
| 313 |
+
self.add_module(f"{name}_proj", proj)
|
| 314 |
+
|
| 315 |
+
def _linear_unpatch(self, x, proj_type):
|
| 316 |
+
low_comp, high_comp = getattr(self, f"{proj_type}_low_proj")(x[0]), getattr(self, f"{proj_type}_high_proj")(x[1])
|
| 317 |
+
return torch.cat([low_comp, high_comp], dim=1)
|
| 318 |
+
|
| 319 |
+
def _feature_transform(self, x):
|
| 320 |
+
x = self.fusion_layer(x)
|
| 321 |
+
low_x = self.low_layer(x[:,:,:,:,:self.l_dim])
|
| 322 |
+
high_x = self.high_layer(x[:,:,:,:,self.l_dim:])
|
| 323 |
+
|
| 324 |
+
return low_x, high_x
|
| 325 |
+
|
| 326 |
+
|
| 327 |
+
def decode(self, x, is_image = False):
|
| 328 |
+
low_x, high_x = self._feature_transform(x)
|
| 329 |
+
|
| 330 |
+
if is_image:
|
| 331 |
+
xi = self._linear_unpatch(x=(low_x, high_x), proj_type='image')
|
| 332 |
+
return (xi, None)
|
| 333 |
+
|
| 334 |
+
else:
|
| 335 |
+
if self.tile_inference and not self.is_first_chunk:
|
| 336 |
+
xv = self._linear_unpatch(x=(low_x, high_x), proj_type='video')
|
| 337 |
+
return (None, xv)
|
| 338 |
+
else:
|
| 339 |
+
xi = self._linear_unpatch(x=(low_x[:, :1], high_x[:, :1]), proj_type='image')
|
| 340 |
+
xv = self._linear_unpatch(x=(low_x[:, 1:], high_x[:, 1:]), proj_type='video')
|
| 341 |
+
return (xi, xv)
|
| 342 |
+
|
| 343 |
+
def forward(self, x):
|
| 344 |
+
low_x, high_x = self._feature_transform(x)
|
| 345 |
+
xi = self._linear_unpatch(x=(low_x[:, :1], high_x[:, :1]), proj_type='image')
|
| 346 |
+
xv = self._linear_unpatch(x=(low_x[:, 1:], high_x[:, 1:]), proj_type='video')
|
| 347 |
+
return (xi, xv)
|
| 348 |
+
|
| 349 |
+
class ISTA(nn.Module):
|
| 350 |
+
def __init__(self,
|
| 351 |
+
points_num = 512,
|
| 352 |
+
out_num = 4,
|
| 353 |
+
iter_num = 2,
|
| 354 |
+
layer_num = 2,
|
| 355 |
+
):
|
| 356 |
+
super(ISTA, self).__init__()
|
| 357 |
+
phi_init = np.random.normal(0.0, (1 / points_num) ** 0.5, size=(out_num, points_num))
|
| 358 |
+
self.phi = nn.Parameter(torch.from_numpy(phi_init).float(), requires_grad=True)
|
| 359 |
+
self.Q = nn.Parameter(torch.from_numpy(np.transpose(phi_init)).float(), requires_grad=True)
|
| 360 |
+
self.iter_num = iter_num
|
| 361 |
+
self.forward_l = nn.ModuleList()
|
| 362 |
+
self.backward_l = nn.ModuleList()
|
| 363 |
+
|
| 364 |
+
for _ in range(self.iter_num):
|
| 365 |
+
self.forward_l.append(NAF(num_layer=layer_num, dim=points_num))
|
| 366 |
+
self.backward_l.append(NAF(num_layer=layer_num, dim=points_num))
|
| 367 |
+
|
| 368 |
+
self.weights = nn.ParameterList()
|
| 369 |
+
self.etas = nn.ParameterList()
|
| 370 |
+
self.threshold = nn.ParameterList()
|
| 371 |
+
|
| 372 |
+
for _ in range(self.iter_num):
|
| 373 |
+
self.threshold.append(nn.Parameter(torch.Tensor([0.01]), requires_grad=True))
|
| 374 |
+
self.weights.append(nn.Parameter(torch.tensor(1.), requires_grad=True))
|
| 375 |
+
|
| 376 |
+
def sample(self, x):
|
| 377 |
+
b, t, h, w, d = x.shape
|
| 378 |
+
y = x.view(-1, d) @ self.phi.T
|
| 379 |
+
return y.view(b, t, h, w, -1)
|
| 380 |
+
|
| 381 |
+
def recon(self, y):
|
| 382 |
+
b, t, h, w, c = y.shape
|
| 383 |
+
y = y.reshape(-1, c)
|
| 384 |
+
recon = torch.mm(y, self.Q.t())
|
| 385 |
+
_, d = recon.shape
|
| 386 |
+
for i in range(self.iter_num):
|
| 387 |
+
recon_r = recon - self.weights[i] * torch.mm((torch.mm(recon, self.phi.t()) - y), self.phi)
|
| 388 |
+
recon = recon_r.reshape(b, t, h, w, -1)
|
| 389 |
+
recon = self.forward_l[i](recon)
|
| 390 |
+
recon = torch.mul(torch.sign(recon), F.relu(torch.abs(recon) - self.threshold[i]))
|
| 391 |
+
|
| 392 |
+
recon = self.backward_l[i](recon).view(-1, d)
|
| 393 |
+
recon = recon_r + recon
|
| 394 |
+
return recon.view(b, t, h, w, -1)
|
| 395 |
+
|
| 396 |
+
|
| 397 |
+
def forward(self, x):
|
| 398 |
+
y = self.sample(x)
|
| 399 |
+
recon = self.recon(y)
|
| 400 |
+
return recon
|
| 401 |
+
|
| 402 |
+
|
Abnormal-CT-Generation-Healthy/LeanVAE/modules/cache/vgg.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a78928a0af1e5f0fcb1f3b9e8f8c3a2a5a3de244d830ad5c1feddc79b8432868
|
| 3 |
+
size 7289
|
Abnormal-CT-Generation-Healthy/LeanVAE/modules/discriminator.py
ADDED
|
@@ -0,0 +1,130 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Any, Union
|
| 2 |
+
import torch
|
| 3 |
+
import torch.nn as nn
|
| 4 |
+
import torch.nn.functional as F
|
| 5 |
+
from einops import rearrange
|
| 6 |
+
import functools
|
| 7 |
+
|
| 8 |
+
class ActNorm(nn.Module):
|
| 9 |
+
def __init__(self, num_features, logdet=False, affine=True, allow_reverse_init=False):
|
| 10 |
+
assert affine
|
| 11 |
+
super().__init__()
|
| 12 |
+
self.logdet = logdet
|
| 13 |
+
self.loc = nn.Parameter(torch.zeros(1, num_features, 1, 1))
|
| 14 |
+
self.scale = nn.Parameter(torch.ones(1, num_features, 1, 1))
|
| 15 |
+
self.allow_reverse_init = allow_reverse_init
|
| 16 |
+
|
| 17 |
+
self.register_buffer("initialized", torch.tensor(0, dtype=torch.uint8))
|
| 18 |
+
|
| 19 |
+
def initialize(self, input):
|
| 20 |
+
with torch.no_grad():
|
| 21 |
+
flatten = input.permute(1, 0, 2, 3).contiguous().view(input.shape[1], -1)
|
| 22 |
+
mean = flatten.mean(1).unsqueeze(1).unsqueeze(2).unsqueeze(3).permute(1, 0, 2, 3)
|
| 23 |
+
std = flatten.std(1).unsqueeze(1).unsqueeze(2).unsqueeze(3).permute(1, 0, 2, 3)
|
| 24 |
+
|
| 25 |
+
self.loc.data.copy_(-mean)
|
| 26 |
+
self.scale.data.copy_(1 / (std + 1e-6))
|
| 27 |
+
|
| 28 |
+
def forward(self, input, reverse=False):
|
| 29 |
+
if reverse:
|
| 30 |
+
return self.reverse(input)
|
| 31 |
+
if len(input.shape) == 2:
|
| 32 |
+
input = input[:, :, None, None]
|
| 33 |
+
squeeze = True
|
| 34 |
+
else:
|
| 35 |
+
squeeze = False
|
| 36 |
+
|
| 37 |
+
_, _, height, width = input.shape
|
| 38 |
+
|
| 39 |
+
if self.training and self.initialized.item() == 0:
|
| 40 |
+
self.initialize(input)
|
| 41 |
+
self.initialized.fill_(1)
|
| 42 |
+
|
| 43 |
+
h = self.scale * (input + self.loc)
|
| 44 |
+
|
| 45 |
+
if squeeze:
|
| 46 |
+
h = h.squeeze(-1).squeeze(-1)
|
| 47 |
+
|
| 48 |
+
if self.logdet:
|
| 49 |
+
log_abs = torch.log(torch.abs(self.scale))
|
| 50 |
+
logdet = height * width * torch.sum(log_abs)
|
| 51 |
+
logdet = logdet * torch.ones(input.shape[0]).to(input)
|
| 52 |
+
return h, logdet
|
| 53 |
+
|
| 54 |
+
return h
|
| 55 |
+
|
| 56 |
+
def reverse(self, output):
|
| 57 |
+
if self.training and self.initialized.item() == 0:
|
| 58 |
+
if not self.allow_reverse_init:
|
| 59 |
+
raise RuntimeError(
|
| 60 |
+
"Initializing ActNorm in reverse direction is "
|
| 61 |
+
"disabled by default. Use allow_reverse_init=True to enable."
|
| 62 |
+
)
|
| 63 |
+
else:
|
| 64 |
+
self.initialize(output)
|
| 65 |
+
self.initialized.fill_(1)
|
| 66 |
+
|
| 67 |
+
if len(output.shape) == 2:
|
| 68 |
+
output = output[:, :, None, None]
|
| 69 |
+
squeeze = True
|
| 70 |
+
else:
|
| 71 |
+
squeeze = False
|
| 72 |
+
|
| 73 |
+
h = output / self.scale - self.loc
|
| 74 |
+
|
| 75 |
+
if squeeze:
|
| 76 |
+
h = h.squeeze(-1).squeeze(-1)
|
| 77 |
+
return h
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
class NLayerDiscriminator(nn.Module):
|
| 82 |
+
"""Defines a PatchGAN discriminator as in Pix2Pix."""
|
| 83 |
+
# https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/models/networks.py
|
| 84 |
+
def __init__(self, input_nc=3, ndf=64, n_layers=3, use_actnorm=False):
|
| 85 |
+
"""Construct a PatchGAN discriminator
|
| 86 |
+
Parameters:
|
| 87 |
+
input_nc (int) -- the number of channels in input images
|
| 88 |
+
ndf (int) -- the number of filters in the last conv layer
|
| 89 |
+
n_layers (int) -- the number of conv layers in the discriminator
|
| 90 |
+
"""
|
| 91 |
+
super(NLayerDiscriminator, self).__init__()
|
| 92 |
+
if not use_actnorm:
|
| 93 |
+
norm_layer = nn.BatchNorm2d
|
| 94 |
+
else:
|
| 95 |
+
norm_layer = ActNorm
|
| 96 |
+
if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters
|
| 97 |
+
use_bias = norm_layer.func != nn.BatchNorm2d
|
| 98 |
+
else:
|
| 99 |
+
use_bias = norm_layer != nn.BatchNorm2d
|
| 100 |
+
|
| 101 |
+
kw = 4
|
| 102 |
+
padw = 1
|
| 103 |
+
sequence = [nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True)]
|
| 104 |
+
nf_mult = 1
|
| 105 |
+
nf_mult_prev = 1
|
| 106 |
+
for n in range(1, n_layers): # gradually increase the number of filters
|
| 107 |
+
nf_mult_prev = nf_mult
|
| 108 |
+
nf_mult = min(2**n, 8)
|
| 109 |
+
sequence += [
|
| 110 |
+
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias),
|
| 111 |
+
norm_layer(ndf * nf_mult),
|
| 112 |
+
nn.LeakyReLU(0.2, True),
|
| 113 |
+
]
|
| 114 |
+
|
| 115 |
+
nf_mult_prev = nf_mult
|
| 116 |
+
nf_mult = min(2**n_layers, 8)
|
| 117 |
+
sequence += [
|
| 118 |
+
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1, padding=padw, bias=use_bias),
|
| 119 |
+
norm_layer(ndf * nf_mult),
|
| 120 |
+
nn.LeakyReLU(0.2, True),
|
| 121 |
+
]
|
| 122 |
+
|
| 123 |
+
sequence += [
|
| 124 |
+
nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)
|
| 125 |
+
] # output 1 channel prediction map
|
| 126 |
+
self.main = nn.Sequential(*sequence)
|
| 127 |
+
|
| 128 |
+
def forward(self, input):
|
| 129 |
+
"""Standard forward."""
|
| 130 |
+
return self.main(input)
|
Abnormal-CT-Generation-Healthy/LeanVAE/modules/lpips.py
ADDED
|
@@ -0,0 +1,230 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Stripped version of https://github.com/richzhang/PerceptualSimilarity/tree/master/models"""
|
| 2 |
+
|
| 3 |
+
import os, hashlib
|
| 4 |
+
import requests
|
| 5 |
+
from tqdm import tqdm
|
| 6 |
+
|
| 7 |
+
import torch
|
| 8 |
+
import torch.nn as nn
|
| 9 |
+
from torchvision import models
|
| 10 |
+
from collections import namedtuple
|
| 11 |
+
import torchvision
|
| 12 |
+
URL_MAP = {
|
| 13 |
+
"vgg_lpips": "https://heibox.uni-heidelberg.de/f/607503859c864bc1b30b/?dl=1"
|
| 14 |
+
}
|
| 15 |
+
|
| 16 |
+
CKPT_MAP = {
|
| 17 |
+
"vgg_lpips": "vgg.pth"
|
| 18 |
+
}
|
| 19 |
+
|
| 20 |
+
MD5_MAP = {
|
| 21 |
+
"vgg_lpips": "d507d7349b931f0638a25a48a722f98a"
|
| 22 |
+
}
|
| 23 |
+
|
| 24 |
+
def download(url, local_path, chunk_size=1024):
|
| 25 |
+
os.makedirs(os.path.split(local_path)[0], exist_ok=True)
|
| 26 |
+
with requests.get(url, stream=True) as r:
|
| 27 |
+
total_size = int(r.headers.get("content-length", 0))
|
| 28 |
+
with tqdm(total=total_size, unit="B", unit_scale=True) as pbar:
|
| 29 |
+
with open(local_path, "wb") as f:
|
| 30 |
+
for data in r.iter_content(chunk_size=chunk_size):
|
| 31 |
+
if data:
|
| 32 |
+
f.write(data)
|
| 33 |
+
pbar.update(chunk_size)
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
def md5_hash(path):
|
| 37 |
+
with open(path, "rb") as f:
|
| 38 |
+
content = f.read()
|
| 39 |
+
return hashlib.md5(content).hexdigest()
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
def get_ckpt_path(name, root, check=False):
|
| 43 |
+
assert name in URL_MAP
|
| 44 |
+
path = os.path.join(root, CKPT_MAP[name])
|
| 45 |
+
if not os.path.exists(path) or (check and not md5_hash(path) == MD5_MAP[name]):
|
| 46 |
+
print("Downloading {} model from {} to {}".format(name, URL_MAP[name], path))
|
| 47 |
+
download(URL_MAP[name], path)
|
| 48 |
+
md5 = md5_hash(path)
|
| 49 |
+
assert md5 == MD5_MAP[name], md5
|
| 50 |
+
return path
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
class LPIPS(nn.Module):
|
| 54 |
+
# Learned perceptual metric
|
| 55 |
+
def __init__(self, use_dropout=True):
|
| 56 |
+
super().__init__()
|
| 57 |
+
self.scaling_layer = ScalingLayer()
|
| 58 |
+
self.chns = [64, 128, 256, 512, 512] # vg16 features
|
| 59 |
+
self.net = vgg16(pretrained=True, requires_grad=False)
|
| 60 |
+
self.lin0 = NetLinLayer(self.chns[0], use_dropout=use_dropout)
|
| 61 |
+
self.lin1 = NetLinLayer(self.chns[1], use_dropout=use_dropout)
|
| 62 |
+
self.lin2 = NetLinLayer(self.chns[2], use_dropout=use_dropout)
|
| 63 |
+
self.lin3 = NetLinLayer(self.chns[3], use_dropout=use_dropout)
|
| 64 |
+
self.lin4 = NetLinLayer(self.chns[4], use_dropout=use_dropout)
|
| 65 |
+
self.load_from_pretrained()
|
| 66 |
+
for param in self.parameters():
|
| 67 |
+
param.requires_grad = False
|
| 68 |
+
|
| 69 |
+
def load_from_pretrained(self, name="vgg_lpips"):
|
| 70 |
+
ckpt = get_ckpt_path(name, os.path.join(os.path.dirname(os.path.abspath(__file__)), "cache"))
|
| 71 |
+
self.load_state_dict(torch.load(ckpt, map_location=torch.device("cpu")), strict=False)
|
| 72 |
+
print("loaded pretrained LPIPS loss from {}".format(ckpt))
|
| 73 |
+
|
| 74 |
+
@classmethod
|
| 75 |
+
def from_pretrained(cls, name="vgg_lpips"):
|
| 76 |
+
if name is not "vgg_lpips":
|
| 77 |
+
raise NotImplementedError
|
| 78 |
+
model = cls()
|
| 79 |
+
ckpt = get_ckpt_path(name, os.path.join(os.path.dirname(os.path.abspath(__file__)), "cache"))
|
| 80 |
+
r = model.load_state_dict(torch.load(ckpt, map_location=torch.device("cpu")), strict=False)
|
| 81 |
+
print(r)
|
| 82 |
+
return model
|
| 83 |
+
|
| 84 |
+
def forward(self, input, target):
|
| 85 |
+
in0_input, in1_input = (self.scaling_layer(input), self.scaling_layer(target))
|
| 86 |
+
outs0, outs1 = self.net(in0_input), self.net(in1_input)
|
| 87 |
+
feats0, feats1, diffs = {}, {}, {}
|
| 88 |
+
lins = [self.lin0, self.lin1, self.lin2, self.lin3, self.lin4]
|
| 89 |
+
for kk in range(len(self.chns)):
|
| 90 |
+
feats0[kk], feats1[kk] = normalize_tensor(outs0[kk]), normalize_tensor(outs1[kk])
|
| 91 |
+
diffs[kk] = (feats0[kk] - feats1[kk]) ** 2
|
| 92 |
+
|
| 93 |
+
res = [spatial_average(lins[kk].model(diffs[kk]), keepdim=True) for kk in range(len(self.chns))]
|
| 94 |
+
val = res[0]
|
| 95 |
+
for l in range(1, len(self.chns)):
|
| 96 |
+
# print(res[l].shape)
|
| 97 |
+
val += res[l]
|
| 98 |
+
|
| 99 |
+
return val
|
| 100 |
+
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
|
| 104 |
+
|
| 105 |
+
class ScalingLayer(nn.Module):
|
| 106 |
+
def __init__(self):
|
| 107 |
+
super(ScalingLayer, self).__init__()
|
| 108 |
+
self.register_buffer('shift', torch.Tensor([-.030, -.088, -.188])[None, :, None, None])
|
| 109 |
+
self.register_buffer('scale', torch.Tensor([.458, .448, .450])[None, :, None, None])
|
| 110 |
+
|
| 111 |
+
def forward(self, inp):
|
| 112 |
+
return (inp - self.shift) / self.scale
|
| 113 |
+
|
| 114 |
+
|
| 115 |
+
class NetLinLayer(nn.Module):
|
| 116 |
+
""" A single linear layer which does a 1x1 conv """
|
| 117 |
+
def __init__(self, chn_in, chn_out=1, use_dropout=False):
|
| 118 |
+
super(NetLinLayer, self).__init__()
|
| 119 |
+
layers = [nn.Dropout(), ] if (use_dropout) else []
|
| 120 |
+
layers += [nn.Conv2d(chn_in, chn_out, 1, stride=1, padding=0, bias=False), ]
|
| 121 |
+
self.model = nn.Sequential(*layers)
|
| 122 |
+
|
| 123 |
+
|
| 124 |
+
class vgg16(torch.nn.Module):
|
| 125 |
+
def __init__(self, requires_grad=False, pretrained=True):
|
| 126 |
+
super(vgg16, self).__init__()
|
| 127 |
+
vgg_pretrained_features = models.vgg16(pretrained=pretrained).features
|
| 128 |
+
self.slice1 = torch.nn.Sequential()
|
| 129 |
+
self.slice2 = torch.nn.Sequential()
|
| 130 |
+
self.slice3 = torch.nn.Sequential()
|
| 131 |
+
self.slice4 = torch.nn.Sequential()
|
| 132 |
+
self.slice5 = torch.nn.Sequential()
|
| 133 |
+
self.N_slices = 5
|
| 134 |
+
for x in range(4):
|
| 135 |
+
self.slice1.add_module(str(x), vgg_pretrained_features[x])
|
| 136 |
+
for x in range(4, 9):
|
| 137 |
+
self.slice2.add_module(str(x), vgg_pretrained_features[x])
|
| 138 |
+
for x in range(9, 16):
|
| 139 |
+
self.slice3.add_module(str(x), vgg_pretrained_features[x])
|
| 140 |
+
for x in range(16, 23):
|
| 141 |
+
self.slice4.add_module(str(x), vgg_pretrained_features[x])
|
| 142 |
+
for x in range(23, 30):
|
| 143 |
+
self.slice5.add_module(str(x), vgg_pretrained_features[x])
|
| 144 |
+
if not requires_grad:
|
| 145 |
+
for param in self.parameters():
|
| 146 |
+
param.requires_grad = False
|
| 147 |
+
|
| 148 |
+
def forward(self, X):
|
| 149 |
+
h = self.slice1(X)
|
| 150 |
+
h_relu1_2 = h
|
| 151 |
+
h = self.slice2(h)
|
| 152 |
+
h_relu2_2 = h
|
| 153 |
+
h = self.slice3(h)
|
| 154 |
+
h_relu3_3 = h
|
| 155 |
+
h = self.slice4(h)
|
| 156 |
+
h_relu4_3 = h
|
| 157 |
+
h = self.slice5(h)
|
| 158 |
+
h_relu5_3 = h
|
| 159 |
+
vgg_outputs = namedtuple("VggOutputs", ['relu1_2', 'relu2_2', 'relu3_3', 'relu4_3', 'relu5_3'])
|
| 160 |
+
out = vgg_outputs(h_relu1_2, h_relu2_2, h_relu3_3, h_relu4_3, h_relu5_3)
|
| 161 |
+
return out
|
| 162 |
+
|
| 163 |
+
|
| 164 |
+
def normalize_tensor(x,eps=1e-10):
|
| 165 |
+
norm_factor = torch.sqrt(torch.sum(x**2,dim=1,keepdim=True))
|
| 166 |
+
return x/(norm_factor+eps)
|
| 167 |
+
|
| 168 |
+
|
| 169 |
+
def spatial_average(x, keepdim=True):
|
| 170 |
+
return x.mean([2,3],keepdim=keepdim)
|
| 171 |
+
|
| 172 |
+
|
| 173 |
+
class ResNetLPIPS(nn.Module):
|
| 174 |
+
# Learned perceptual metric
|
| 175 |
+
def __init__(self, use_dropout=True):
|
| 176 |
+
super().__init__()
|
| 177 |
+
net, _ = clip.load(device='cpu', name='RN50')
|
| 178 |
+
self.net = net.visual
|
| 179 |
+
self.net.attnpool = nn.Identity()
|
| 180 |
+
for param in self.parameters():
|
| 181 |
+
param.requires_grad = False
|
| 182 |
+
|
| 183 |
+
def forward(self, input, target):
|
| 184 |
+
|
| 185 |
+
outs0, outs1 = self.net(input), self.net(target)
|
| 186 |
+
#feats0, feats1= normalize_tensor(outs0), normalize_tensor(outs1)
|
| 187 |
+
diffs = ((outs0 - outs1) ** 2 ) #(feats0 - feats1) ** 2
|
| 188 |
+
|
| 189 |
+
return diffs
|
| 190 |
+
|
| 191 |
+
|
| 192 |
+
class MeanShift(nn.Conv2d):
|
| 193 |
+
def __init__(self, data_mean, data_std, data_range=1, norm=True):
|
| 194 |
+
c = len(data_mean)
|
| 195 |
+
super(MeanShift, self).__init__(c, c, kernel_size=1)
|
| 196 |
+
std = torch.Tensor(data_std)
|
| 197 |
+
self.weight.data = torch.eye(c).view(c, c, 1, 1)
|
| 198 |
+
if norm:
|
| 199 |
+
self.weight.data.div_(std.view(c, 1, 1, 1))
|
| 200 |
+
self.bias.data = -1 * data_range * torch.Tensor(data_mean)
|
| 201 |
+
self.bias.data.div_(std)
|
| 202 |
+
else:
|
| 203 |
+
self.weight.data.mul_(std.view(c, 1, 1, 1))
|
| 204 |
+
self.bias.data = data_range * torch.Tensor(data_mean)
|
| 205 |
+
self.requires_grad = False
|
| 206 |
+
|
| 207 |
+
class VGGPerceptualLoss(torch.nn.Module):
|
| 208 |
+
def __init__(self, rank):
|
| 209 |
+
super(VGGPerceptualLoss, self).__init__()
|
| 210 |
+
blocks = []
|
| 211 |
+
pretrained = True
|
| 212 |
+
self.vgg_pretrained_features = models.vgg19(pretrained=pretrained).features
|
| 213 |
+
self.normalize = MeanShift([0.485, 0.456, 0.406], [0.229, 0.224, 0.225], norm=True).to(rank)
|
| 214 |
+
for param in self.parameters():
|
| 215 |
+
param.requires_grad = False
|
| 216 |
+
|
| 217 |
+
def forward(self, Y, X, indices=None):
|
| 218 |
+
X = self.normalize(X)
|
| 219 |
+
Y = self.normalize(Y)
|
| 220 |
+
indices = [2, 7, 12, 21, 30]
|
| 221 |
+
weights = [1.0/2.6, 1.0/4.8, 1.0/3.7, 1.0/5.6, 10/1.5]
|
| 222 |
+
k = 0
|
| 223 |
+
loss = 0
|
| 224 |
+
for i in range(indices[-1]):
|
| 225 |
+
X = self.vgg_pretrained_features[i](X)
|
| 226 |
+
Y = self.vgg_pretrained_features[i](Y)
|
| 227 |
+
if (i+1) in indices:
|
| 228 |
+
loss += weights[k] * (X - Y.detach()).abs().mean() * 0.1
|
| 229 |
+
k += 1
|
| 230 |
+
return loss
|
Abnormal-CT-Generation-Healthy/LeanVAE/modules/vae.py
ADDED
|
@@ -0,0 +1,73 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import numpy as np
|
| 3 |
+
|
| 4 |
+
class DiagonalGaussianDistribution(object):
|
| 5 |
+
def __init__(self, parameters, deterministic=False):
|
| 6 |
+
self.parameters = parameters
|
| 7 |
+
self.mean, self.logvar = parameters #torch.chunk(parameters, 2, dim=1)
|
| 8 |
+
self.logvar = torch.clamp(self.logvar, -30.0, 20.0)
|
| 9 |
+
self.deterministic = deterministic
|
| 10 |
+
self.std = torch.exp(0.5 * self.logvar)
|
| 11 |
+
self.var = torch.exp(self.logvar)
|
| 12 |
+
if self.deterministic:
|
| 13 |
+
self.var = self.std = torch.zeros_like(self.mean).to(device=self.mean.device)
|
| 14 |
+
|
| 15 |
+
def sample(self):
|
| 16 |
+
x = self.mean + self.std * torch.randn(self.mean.shape).to(device=self.mean.device)
|
| 17 |
+
return x
|
| 18 |
+
|
| 19 |
+
def kl(self, other=None):
|
| 20 |
+
if self.deterministic:
|
| 21 |
+
return torch.Tensor([0.])
|
| 22 |
+
else:
|
| 23 |
+
if other is None:
|
| 24 |
+
return 0.5 * torch.sum(torch.pow(self.mean, 2)
|
| 25 |
+
+ self.var - 1.0 - self.logvar,
|
| 26 |
+
dim=[1, 2, 3])
|
| 27 |
+
else:
|
| 28 |
+
return 0.5 * torch.sum(
|
| 29 |
+
torch.pow(self.mean - other.mean, 2) / other.var
|
| 30 |
+
+ self.var / other.var - 1.0 - self.logvar + other.logvar,
|
| 31 |
+
dim=[1, 2, 3])
|
| 32 |
+
|
| 33 |
+
def nll(self, sample, dims=[1,2,3]):
|
| 34 |
+
if self.deterministic:
|
| 35 |
+
return torch.Tensor([0.])
|
| 36 |
+
logtwopi = np.log(2.0 * np.pi)
|
| 37 |
+
return 0.5 * torch.sum(
|
| 38 |
+
logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var,
|
| 39 |
+
dim=dims)
|
| 40 |
+
|
| 41 |
+
def mode(self):
|
| 42 |
+
return self.mean
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
def normal_kl(mean1, logvar1, mean2, logvar2):
|
| 47 |
+
"""
|
| 48 |
+
source: https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/losses.py#L12
|
| 49 |
+
Compute the KL divergence between two gaussians.
|
| 50 |
+
Shapes are automatically broadcasted, so batches can be compared to
|
| 51 |
+
scalars, among other use cases.
|
| 52 |
+
"""
|
| 53 |
+
tensor = None
|
| 54 |
+
for obj in (mean1, logvar1, mean2, logvar2):
|
| 55 |
+
if isinstance(obj, torch.Tensor):
|
| 56 |
+
tensor = obj
|
| 57 |
+
break
|
| 58 |
+
assert tensor is not None, "at least one argument must be a Tensor"
|
| 59 |
+
|
| 60 |
+
# Force variances to be Tensors. Broadcasting helps convert scalars to
|
| 61 |
+
# Tensors, but it does not work for torch.exp().
|
| 62 |
+
logvar1, logvar2 = [
|
| 63 |
+
x if isinstance(x, torch.Tensor) else torch.tensor(x).to(tensor)
|
| 64 |
+
for x in (logvar1, logvar2)
|
| 65 |
+
]
|
| 66 |
+
|
| 67 |
+
return 0.5 * (
|
| 68 |
+
-1.0
|
| 69 |
+
+ logvar2
|
| 70 |
+
- logvar1
|
| 71 |
+
+ torch.exp(logvar1 - logvar2)
|
| 72 |
+
+ ((mean1 - mean2) ** 2) * torch.exp(-logvar2)
|
| 73 |
+
)
|
Abnormal-CT-Generation-Healthy/LeanVAE/utils/__init__.py
ADDED
|
File without changes
|
Abnormal-CT-Generation-Healthy/LeanVAE/utils/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (202 Bytes). View file
|
|
|
Abnormal-CT-Generation-Healthy/LeanVAE/utils/__pycache__/__init__.cpython-311.pyc
ADDED
|
Binary file (196 Bytes). View file
|
|
|
Abnormal-CT-Generation-Healthy/LeanVAE/utils/__pycache__/__init__.cpython-39.pyc
ADDED
|
Binary file (200 Bytes). View file
|
|
|
Abnormal-CT-Generation-Healthy/LeanVAE/utils/__pycache__/gan_loss.cpython-310.pyc
ADDED
|
Binary file (4.92 kB). View file
|
|
|
Abnormal-CT-Generation-Healthy/LeanVAE/utils/__pycache__/gan_loss.cpython-311.pyc
ADDED
|
Binary file (8.93 kB). View file
|
|
|
Abnormal-CT-Generation-Healthy/LeanVAE/utils/__pycache__/gan_loss.cpython-39.pyc
ADDED
|
Binary file (4.87 kB). View file
|
|
|
Abnormal-CT-Generation-Healthy/LeanVAE/utils/__pycache__/patcher_utils.cpython-310.pyc
ADDED
|
Binary file (5.99 kB). View file
|
|
|
Abnormal-CT-Generation-Healthy/LeanVAE/utils/__pycache__/patcher_utils.cpython-311.pyc
ADDED
|
Binary file (16.6 kB). View file
|
|
|
Abnormal-CT-Generation-Healthy/LeanVAE/utils/__pycache__/patcher_utils.cpython-39.pyc
ADDED
|
Binary file (6.09 kB). View file
|
|
|
Abnormal-CT-Generation-Healthy/LeanVAE/utils/__pycache__/video_utils.cpython-310.pyc
ADDED
|
Binary file (18.5 kB). View file
|
|
|