Upload folder using huggingface_hub
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- .gitattributes +477 -0
- MRI_recon/code/Frequency-Diffusion/.gitignore +142 -0
- MRI_recon/code/Frequency-Diffusion/FSMNet/LICENSE +201 -0
- MRI_recon/code/Frequency-Diffusion/FSMNet/README.md +97 -0
- MRI_recon/code/Frequency-Diffusion/FSMNet/__init__.py +0 -0
- MRI_recon/code/Frequency-Diffusion/FSMNet/bash/brats.sh +46 -0
- MRI_recon/code/Frequency-Diffusion/FSMNet/bash/fastmri.sh +59 -0
- MRI_recon/code/Frequency-Diffusion/FSMNet/bash/fastmri_8x.sh +52 -0
- MRI_recon/code/Frequency-Diffusion/FSMNet/bash/m4raw.sh +149 -0
- MRI_recon/code/Frequency-Diffusion/FSMNet/dataloaders/BRATS_DuDo_dataloader.py +295 -0
- MRI_recon/code/Frequency-Diffusion/FSMNet/dataloaders/BRATS_dataloader.py +174 -0
- MRI_recon/code/Frequency-Diffusion/FSMNet/dataloaders/BRATS_dataloader_new.py +384 -0
- MRI_recon/code/Frequency-Diffusion/FSMNet/dataloaders/BRATS_kspace_dataloader.py +298 -0
- MRI_recon/code/Frequency-Diffusion/FSMNet/dataloaders/__init__.py +0 -0
- MRI_recon/code/Frequency-Diffusion/FSMNet/dataloaders/__pycache__/__init__.cpython-310.pyc +0 -0
- MRI_recon/code/Frequency-Diffusion/FSMNet/dataloaders/__pycache__/albu_transform.cpython-310.pyc +0 -0
- MRI_recon/code/Frequency-Diffusion/FSMNet/dataloaders/__pycache__/fastmri.cpython-310.pyc +0 -0
- MRI_recon/code/Frequency-Diffusion/FSMNet/dataloaders/__pycache__/kspace_subsample.cpython-310.pyc +0 -0
- MRI_recon/code/Frequency-Diffusion/FSMNet/dataloaders/__pycache__/m4_utils.cpython-310.pyc +0 -0
- MRI_recon/code/Frequency-Diffusion/FSMNet/dataloaders/__pycache__/m4raw_dataloader.cpython-310.pyc +0 -0
- MRI_recon/code/Frequency-Diffusion/FSMNet/dataloaders/__pycache__/m4raw_std_dataloader.cpython-310.pyc +0 -0
- MRI_recon/code/Frequency-Diffusion/FSMNet/dataloaders/__pycache__/math.cpython-310.pyc +0 -0
- MRI_recon/code/Frequency-Diffusion/FSMNet/dataloaders/__pycache__/subsample.cpython-310.pyc +0 -0
- MRI_recon/code/Frequency-Diffusion/FSMNet/dataloaders/__pycache__/transforms.cpython-310.pyc +0 -0
- MRI_recon/code/Frequency-Diffusion/FSMNet/dataloaders/albu_transform.py +75 -0
- MRI_recon/code/Frequency-Diffusion/FSMNet/dataloaders/example_mask/brats_4X_mask.npy +3 -0
- MRI_recon/code/Frequency-Diffusion/FSMNet/dataloaders/example_mask/brats_8X_mask.npy +3 -0
- MRI_recon/code/Frequency-Diffusion/FSMNet/dataloaders/example_mask/brats_data_gen.py +302 -0
- MRI_recon/code/Frequency-Diffusion/FSMNet/dataloaders/example_mask/kspace_4_mask.npy +3 -0
- MRI_recon/code/Frequency-Diffusion/FSMNet/dataloaders/example_mask/kspace_8_mask.npy +3 -0
- MRI_recon/code/Frequency-Diffusion/FSMNet/dataloaders/example_mask/m4raw_4_mask.npy +3 -0
- MRI_recon/code/Frequency-Diffusion/FSMNet/dataloaders/fastmri.py +339 -0
- MRI_recon/code/Frequency-Diffusion/FSMNet/dataloaders/hybrid_sparse.py +156 -0
- MRI_recon/code/Frequency-Diffusion/FSMNet/dataloaders/kspace_subsample.py +328 -0
- MRI_recon/code/Frequency-Diffusion/FSMNet/dataloaders/m4_utils.py +272 -0
- MRI_recon/code/Frequency-Diffusion/FSMNet/dataloaders/m4raw_dataloader.py +574 -0
- MRI_recon/code/Frequency-Diffusion/FSMNet/dataloaders/m4raw_std_dataloader.py +583 -0
- MRI_recon/code/Frequency-Diffusion/FSMNet/dataloaders/math.py +231 -0
- MRI_recon/code/Frequency-Diffusion/FSMNet/dataloaders/subsample.py +195 -0
- MRI_recon/code/Frequency-Diffusion/FSMNet/dataloaders/transforms.py +493 -0
- MRI_recon/code/Frequency-Diffusion/FSMNet/debug/True_0_0.png +3 -0
- MRI_recon/code/Frequency-Diffusion/FSMNet/documents/INSTALL.md +11 -0
- MRI_recon/code/Frequency-Diffusion/FSMNet/frequency_diffusion/__init__.py +2 -0
- MRI_recon/code/Frequency-Diffusion/FSMNet/frequency_diffusion/__pycache__/__init__.cpython-310.pyc +0 -0
- MRI_recon/code/Frequency-Diffusion/FSMNet/frequency_diffusion/__pycache__/frequency_noise.cpython-310.pyc +0 -0
- MRI_recon/code/Frequency-Diffusion/FSMNet/frequency_diffusion/degradation/__init__.py +0 -0
- MRI_recon/code/Frequency-Diffusion/FSMNet/frequency_diffusion/degradation/__pycache__/__init__.cpython-310.pyc +0 -0
- MRI_recon/code/Frequency-Diffusion/FSMNet/frequency_diffusion/degradation/__pycache__/k_degradation.cpython-310.pyc +0 -0
- MRI_recon/code/Frequency-Diffusion/FSMNet/frequency_diffusion/degradation/__pycache__/mask_utils.cpython-310.pyc +0 -0
- MRI_recon/code/Frequency-Diffusion/FSMNet/frequency_diffusion/degradation/extract_example_mask.py +71 -0
.gitattributes
CHANGED
|
@@ -10081,3 +10081,480 @@ var/VAR_dd/visualize/attn_score/15/class_980/seed_1/raw_map_map_6.jpg filter=lfs
|
|
| 10081 |
var/VAR_dd/visualize/attn_score/15/class_980/seed_1/raw_map_map_7.jpg filter=lfs diff=lfs merge=lfs -text
|
| 10082 |
var/VAR_dd/visualize/attn_score/15/class_980/seed_1/raw_map_map_8.jpg filter=lfs diff=lfs merge=lfs -text
|
| 10083 |
var/VAR_dd/visualize/attn_score/15/class_980/seed_1/raw_map_map_9.jpg filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 10081 |
var/VAR_dd/visualize/attn_score/15/class_980/seed_1/raw_map_map_7.jpg filter=lfs diff=lfs merge=lfs -text
|
| 10082 |
var/VAR_dd/visualize/attn_score/15/class_980/seed_1/raw_map_map_8.jpg filter=lfs diff=lfs merge=lfs -text
|
| 10083 |
var/VAR_dd/visualize/attn_score/15/class_980/seed_1/raw_map_map_9.jpg filter=lfs diff=lfs merge=lfs -text
|
| 10084 |
+
MRI_recon/code/Frequency-Diffusion/FSMNet/debug/True_0_0.png filter=lfs diff=lfs merge=lfs -text
|
| 10085 |
+
MRI_recon/code/Frequency-Diffusion/experiments/FSMNet/figures/FSMNet.png filter=lfs diff=lfs merge=lfs -text
|
| 10086 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/104150-0-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10087 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/104150-0-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10088 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/104150-1-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10089 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/104150-1-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10090 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/104150-2-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10091 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/104150-2-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10092 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/108316-0-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10093 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/108316-0-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10094 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/108316-1-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10095 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/108316-1-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10096 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/108316-2-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10097 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/108316-2-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10098 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/112482-0-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10099 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/112482-0-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10100 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/112482-1-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10101 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/112482-1-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10102 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/112482-2-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10103 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/112482-2-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10104 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/116648-0-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10105 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/116648-0-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10106 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/116648-1-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10107 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/116648-1-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10108 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/116648-2-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10109 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/116648-2-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10110 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/120814-0-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10111 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/120814-0-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10112 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/120814-1-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10113 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/120814-1-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10114 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/120814-2-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10115 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/120814-2-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10116 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/12498-0-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10117 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/12498-0-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10118 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/12498-1-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10119 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/12498-1-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10120 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/12498-2-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10121 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/12498-2-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10122 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/124980-0-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10123 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/124980-0-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10124 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/124980-1-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10125 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/124980-1-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10126 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/124980-2-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10127 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/124980-2-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10128 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/129146-0-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10129 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/129146-0-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10130 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/129146-1-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10131 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/129146-1-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10132 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/129146-2-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10133 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/129146-2-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10134 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/133312-0-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10135 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/133312-0-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10136 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/133312-1-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10137 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/133312-1-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10138 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/133312-2-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10139 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/133312-2-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10140 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/137478-0-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10141 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/137478-0-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10142 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/137478-1-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10143 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/137478-1-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10144 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/137478-2-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10145 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/137478-2-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10146 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/141644-0-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10147 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/141644-0-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10148 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/141644-1-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10149 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/141644-1-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10150 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/141644-2-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10151 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/141644-2-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10152 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/145810-0-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10153 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/145810-0-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10154 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/145810-1-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10155 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/145810-1-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10156 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/145810-2-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10157 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/145810-2-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10158 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/149976-0-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10159 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/149976-0-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10160 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/149976-1-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10161 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/149976-1-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10162 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/149976-2-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10163 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/149976-2-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10164 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/154142-0-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10165 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/154142-0-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10166 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/154142-1-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10167 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/154142-1-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10168 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/154142-2-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10169 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/154142-2-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10170 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/158308-0-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10171 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/158308-0-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10172 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/158308-1-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10173 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/158308-1-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10174 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/158308-2-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10175 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/158308-2-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10176 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/162474-0-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10177 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/162474-0-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10178 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/162474-1-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10179 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/162474-1-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10180 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/162474-2-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10181 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/162474-2-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10182 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/16664-0-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10183 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/16664-0-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10184 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/16664-1-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10185 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/16664-1-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10186 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/16664-2-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10187 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/16664-2-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10188 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/20830-0-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10189 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/20830-0-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10190 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/20830-1-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10191 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/20830-1-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10192 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/20830-2-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10193 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/20830-2-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10194 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/24996-0-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10195 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/24996-0-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10196 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/24996-1-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10197 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/24996-1-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10198 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/24996-2-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10199 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/24996-2-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10200 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/29162-0-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10201 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/29162-0-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10202 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/29162-1-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10203 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/29162-1-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10204 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/29162-2-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10205 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/29162-2-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10206 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/33328-0-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10207 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/33328-0-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10208 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/33328-1-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10209 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/33328-1-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10210 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/33328-2-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10211 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/33328-2-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10212 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/37494-0-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10213 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/37494-0-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10214 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/37494-1-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10215 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/37494-1-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10216 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/37494-2-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10217 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/37494-2-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10218 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/4166-0-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10219 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/4166-0-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10220 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/4166-1-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10221 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/4166-1-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10222 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/4166-2-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10223 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/4166-2-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10224 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/41660-0-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10225 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/41660-0-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10226 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/41660-1-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10227 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/41660-1-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10228 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/41660-2-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10229 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/41660-2-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10230 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/45826-0-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10231 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/45826-0-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10232 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/45826-1-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10233 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/45826-1-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10234 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/45826-2-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10235 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/45826-2-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10236 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/49992-0-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10237 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/49992-0-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10238 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/49992-1-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10239 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/49992-1-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10240 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/49992-2-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10241 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/49992-2-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10242 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/54158-0-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10243 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/54158-0-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10244 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/54158-1-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10245 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/54158-1-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10246 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/54158-2-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10247 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/54158-2-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10248 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/58324-0-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10249 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/58324-0-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10250 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/58324-1-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10251 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/58324-1-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10252 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/58324-2-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10253 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/58324-2-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10254 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/62490-0-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10255 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/62490-0-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10256 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/62490-1-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10257 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/62490-1-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10258 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/62490-2-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10259 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/62490-2-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10260 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/66656-0-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10261 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/66656-0-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10262 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/66656-1-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10263 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/66656-1-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10264 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/66656-2-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10265 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/66656-2-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10266 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/70822-0-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10267 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/70822-0-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10268 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/70822-1-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10269 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/70822-1-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10270 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/70822-2-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10271 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/70822-2-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10272 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/74988-0-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10273 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/74988-0-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10274 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/74988-1-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10275 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/74988-1-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10276 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/74988-2-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10277 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/74988-2-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10278 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/79154-0-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10279 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/79154-0-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10280 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/79154-1-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10281 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/79154-1-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10282 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/79154-2-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10283 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/79154-2-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10284 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/8332-0-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10285 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/8332-0-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10286 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/8332-1-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10287 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/8332-1-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10288 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/8332-2-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10289 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/8332-2-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10290 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/83320-0-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10291 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/83320-0-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10292 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/83320-1-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10293 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/83320-1-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10294 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/83320-2-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10295 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/83320-2-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10296 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/87486-0-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10297 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/87486-0-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10298 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/87486-1-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10299 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/87486-1-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10300 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/87486-2-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10301 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/87486-2-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10302 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/91652-0-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10303 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/91652-0-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10304 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/91652-1-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10305 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/91652-1-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10306 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/91652-2-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10307 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/91652-2-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10308 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/95818-0-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10309 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/95818-0-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10310 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/95818-1-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10311 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/95818-1-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10312 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/95818-2-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10313 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/95818-2-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10314 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/99984-0-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10315 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/99984-0-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10316 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/99984-1-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10317 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/99984-1-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10318 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/99984-2-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10319 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_4X/99984-2-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10320 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/104150-0-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10321 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/104150-0-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10322 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/104150-1-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10323 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/104150-1-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10324 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/104150-2-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10325 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/104150-2-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10326 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/108316-0-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10327 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/108316-0-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10328 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/108316-1-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10329 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/108316-1-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10330 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/108316-2-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10331 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/108316-2-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10332 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/112482-0-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10333 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/112482-0-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10334 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/112482-1-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10335 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/112482-1-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10336 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/112482-2-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10337 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/112482-2-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10338 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/116648-0-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10339 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/116648-0-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10340 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/116648-1-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10341 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/116648-1-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10342 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/116648-2-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10343 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/116648-2-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10344 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/120814-0-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10345 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/120814-0-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10346 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/120814-1-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10347 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/120814-1-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10348 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/120814-2-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10349 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/120814-2-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10350 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/12498-0-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10351 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/12498-0-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10352 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/12498-1-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10353 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/12498-1-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10354 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/12498-2-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10355 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/12498-2-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10356 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/124980-0-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10357 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/124980-0-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10358 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/124980-1-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10359 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/124980-1-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10360 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/124980-2-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10361 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/124980-2-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10362 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/129146-0-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10363 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/129146-0-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10364 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/129146-1-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10365 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/129146-1-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10366 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/129146-2-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10367 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/129146-2-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10368 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/133312-0-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10369 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/133312-0-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10370 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/133312-1-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10371 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/133312-1-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10372 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/133312-2-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10373 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/133312-2-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10374 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/137478-0-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10375 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/137478-0-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10376 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/137478-1-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10377 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/137478-1-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10378 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/137478-2-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10379 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/137478-2-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10380 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/141644-0-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10381 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/141644-0-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10382 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/141644-1-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10383 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/141644-1-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10384 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/141644-2-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10385 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/141644-2-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10386 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/145810-0-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10387 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/145810-0-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10388 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/145810-1-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10389 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/145810-1-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10390 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/145810-2-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10391 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/145810-2-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10392 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/149976-0-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10393 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/149976-0-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10394 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/149976-1-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10395 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/149976-1-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10396 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/149976-2-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10397 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/149976-2-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10398 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/154142-0-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10399 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/154142-0-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10400 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/154142-1-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10401 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/154142-1-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10402 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/154142-2-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10403 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/154142-2-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10404 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/158308-0-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10405 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/158308-0-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10406 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/158308-1-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10407 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/158308-1-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10408 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/158308-2-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10409 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/158308-2-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10410 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/162474-0-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10411 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/162474-0-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10412 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/162474-1-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10413 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/162474-1-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10414 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/162474-2-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10415 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/162474-2-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10416 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/16664-0-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10417 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/16664-0-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10418 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/16664-1-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10419 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/16664-1-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10420 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/16664-2-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10421 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/16664-2-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10422 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/166640-0-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10423 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/166640-0-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10424 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/166640-1-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10425 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/166640-1-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10426 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/166640-2-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10427 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/166640-2-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10428 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/20830-0-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10429 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/20830-0-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10430 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/20830-1-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10431 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/20830-1-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10432 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/20830-2-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10433 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/20830-2-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10434 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/24996-0-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10435 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/24996-0-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10436 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/24996-1-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10437 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/24996-1-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10438 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/24996-2-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10439 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/24996-2-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10440 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/29162-0-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10441 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/29162-0-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10442 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/29162-1-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10443 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/29162-1-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10444 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/29162-2-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10445 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/29162-2-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10446 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/33328-0-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10447 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/33328-0-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10448 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/33328-1-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10449 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/33328-1-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10450 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/33328-2-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10451 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/33328-2-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10452 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/37494-0-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10453 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/37494-0-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10454 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/37494-1-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10455 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/37494-1-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10456 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/37494-2-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10457 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/37494-2-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10458 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/4166-0-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10459 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/4166-0-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10460 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/4166-1-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10461 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/4166-1-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10462 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/4166-2-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10463 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/4166-2-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10464 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/41660-0-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10465 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/41660-0-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10466 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/41660-1-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10467 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/41660-1-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10468 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/41660-2-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10469 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/41660-2-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10470 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/45826-0-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10471 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/45826-0-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10472 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/45826-1-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10473 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/45826-1-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10474 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/45826-2-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10475 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/45826-2-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10476 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/49992-0-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10477 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/49992-0-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10478 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/49992-1-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10479 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/49992-1-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10480 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/49992-2-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10481 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/49992-2-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10482 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/54158-0-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10483 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/54158-0-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10484 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/54158-1-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10485 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/54158-1-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10486 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/54158-2-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10487 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/54158-2-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10488 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/58324-0-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10489 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/58324-0-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10490 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/58324-1-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10491 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/58324-1-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10492 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/58324-2-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10493 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/58324-2-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10494 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/62490-0-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10495 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/62490-0-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10496 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/62490-1-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10497 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/62490-1-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10498 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/62490-2-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10499 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/62490-2-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10500 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/66656-0-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10501 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/66656-0-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10502 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/66656-1-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10503 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/66656-1-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10504 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/66656-2-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10505 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/66656-2-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10506 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/70822-0-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10507 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/70822-0-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10508 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/70822-1-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10509 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/70822-1-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10510 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/70822-2-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10511 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/70822-2-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10512 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/74988-0-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10513 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/74988-0-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10514 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/74988-1-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10515 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/74988-1-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10516 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/74988-2-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10517 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/74988-2-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10518 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/79154-0-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10519 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/79154-0-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10520 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/79154-1-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10521 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/79154-1-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10522 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/79154-2-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10523 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/79154-2-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10524 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/8332-0-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10525 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/8332-0-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10526 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/8332-1-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10527 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/8332-1-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10528 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/8332-2-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10529 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/8332-2-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10530 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/83320-0-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10531 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/83320-0-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10532 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/83320-1-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10533 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/83320-1-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10534 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/83320-2-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10535 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/83320-2-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10536 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/87486-0-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10537 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/87486-0-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10538 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/87486-1-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10539 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/87486-1-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10540 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/87486-2-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10541 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/87486-2-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10542 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/91652-0-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10543 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/91652-0-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10544 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/91652-1-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10545 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/91652-1-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10546 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/91652-2-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10547 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/91652-2-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10548 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/95818-0-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10549 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/95818-0-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10550 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/95818-1-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10551 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/95818-1-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10552 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/95818-2-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10553 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/95818-2-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10554 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/99984-0-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10555 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/99984-0-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10556 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/99984-1-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10557 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/99984-1-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10558 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/99984-2-compare.png filter=lfs diff=lfs merge=lfs -text
|
| 10559 |
+
MRI_recon/new_code/Frequency-Diffusion-main/FSMNet/image_results/fastmri_8X/99984-2-middle.png filter=lfs diff=lfs merge=lfs -text
|
| 10560 |
+
MRI_recon/new_code/Frequency-Diffusion-main/experiments/FSMNet/figures/FSMNet.png filter=lfs diff=lfs merge=lfs -text
|
MRI_recon/code/Frequency-Diffusion/.gitignore
ADDED
|
@@ -0,0 +1,142 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Generation results
|
| 2 |
+
results/
|
| 3 |
+
|
| 4 |
+
# Byte-compiled / optimized / DLL files
|
| 5 |
+
__pycache__/
|
| 6 |
+
*.py[cod]
|
| 7 |
+
*$py.class
|
| 8 |
+
|
| 9 |
+
# C extensions
|
| 10 |
+
*.so
|
| 11 |
+
|
| 12 |
+
# Distribution / packaging
|
| 13 |
+
.Python
|
| 14 |
+
build/
|
| 15 |
+
develop-eggs/
|
| 16 |
+
dist/
|
| 17 |
+
downloads/
|
| 18 |
+
eggs/
|
| 19 |
+
.eggs/
|
| 20 |
+
lib/
|
| 21 |
+
lib64/
|
| 22 |
+
parts/
|
| 23 |
+
sdist/
|
| 24 |
+
var/
|
| 25 |
+
wheels/
|
| 26 |
+
pip-wheel-metadata/
|
| 27 |
+
share/python-wheels/
|
| 28 |
+
*.egg-info/
|
| 29 |
+
.installed.cfg
|
| 30 |
+
*.egg
|
| 31 |
+
MANIFEST
|
| 32 |
+
|
| 33 |
+
# PyInstaller
|
| 34 |
+
# Usually these files are written by a python script from a template
|
| 35 |
+
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
| 36 |
+
*.manifest
|
| 37 |
+
*.spec
|
| 38 |
+
|
| 39 |
+
log./
|
| 40 |
+
log.txt
|
| 41 |
+
.log
|
| 42 |
+
|
| 43 |
+
# Installer logs
|
| 44 |
+
pip-log.txt
|
| 45 |
+
pip-delete-this-directory.txt
|
| 46 |
+
|
| 47 |
+
# Unit test / coverage reports
|
| 48 |
+
htmlcov/
|
| 49 |
+
.tox/
|
| 50 |
+
.nox/
|
| 51 |
+
.coverage
|
| 52 |
+
.coverage.*
|
| 53 |
+
.cache
|
| 54 |
+
nosetests.xml
|
| 55 |
+
coverage.xml
|
| 56 |
+
*.cover
|
| 57 |
+
*.py,cover
|
| 58 |
+
.hypothesis/
|
| 59 |
+
.pytest_cache/
|
| 60 |
+
|
| 61 |
+
*.png
|
| 62 |
+
*.pth
|
| 63 |
+
# Translations
|
| 64 |
+
*.mo
|
| 65 |
+
*.pot
|
| 66 |
+
|
| 67 |
+
# Django stuff:
|
| 68 |
+
*.log
|
| 69 |
+
log/
|
| 70 |
+
local_settings.py
|
| 71 |
+
db.sqlite3
|
| 72 |
+
db.sqlite3-journal
|
| 73 |
+
|
| 74 |
+
# Flask stuff:
|
| 75 |
+
instance/
|
| 76 |
+
.webassets-cache
|
| 77 |
+
|
| 78 |
+
# Scrapy stuff:
|
| 79 |
+
.scrapy
|
| 80 |
+
|
| 81 |
+
# Sphinx documentation
|
| 82 |
+
docs/_build/
|
| 83 |
+
|
| 84 |
+
# PyBuilder
|
| 85 |
+
target/
|
| 86 |
+
|
| 87 |
+
# Jupyter Notebook
|
| 88 |
+
.ipynb_checkpoints
|
| 89 |
+
|
| 90 |
+
# IPython
|
| 91 |
+
profile_default/
|
| 92 |
+
ipython_config.py
|
| 93 |
+
|
| 94 |
+
# pyenv
|
| 95 |
+
.python-version
|
| 96 |
+
|
| 97 |
+
# pipenv
|
| 98 |
+
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
| 99 |
+
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
| 100 |
+
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
| 101 |
+
# install all needed dependencies.
|
| 102 |
+
#Pipfile.lock
|
| 103 |
+
|
| 104 |
+
# PEP 582; used by e.g. github.com/David-OConnor/pyflow
|
| 105 |
+
__pypackages__/
|
| 106 |
+
|
| 107 |
+
# Celery stuff
|
| 108 |
+
celerybeat-schedule
|
| 109 |
+
celerybeat.pid
|
| 110 |
+
|
| 111 |
+
# SageMath parsed files
|
| 112 |
+
*.sage.py
|
| 113 |
+
|
| 114 |
+
# Environments
|
| 115 |
+
.env
|
| 116 |
+
.venv
|
| 117 |
+
env/
|
| 118 |
+
venv/
|
| 119 |
+
ENV/
|
| 120 |
+
env.bak/
|
| 121 |
+
venv.bak/
|
| 122 |
+
|
| 123 |
+
# Spyder project settings
|
| 124 |
+
.spyderproject
|
| 125 |
+
.spyproject
|
| 126 |
+
|
| 127 |
+
# Rope project settings
|
| 128 |
+
.ropeproject
|
| 129 |
+
|
| 130 |
+
# mkdocs documentation
|
| 131 |
+
/site
|
| 132 |
+
|
| 133 |
+
# mypy
|
| 134 |
+
.mypy_cache/
|
| 135 |
+
.dmypy.json
|
| 136 |
+
dmypy.json
|
| 137 |
+
|
| 138 |
+
# Pyre type checker
|
| 139 |
+
.pyre/
|
| 140 |
+
.DS_Store
|
| 141 |
+
.idea/
|
| 142 |
+
apex
|
MRI_recon/code/Frequency-Diffusion/FSMNet/LICENSE
ADDED
|
@@ -0,0 +1,201 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Apache License
|
| 2 |
+
Version 2.0, January 2004
|
| 3 |
+
http://www.apache.org/licenses/
|
| 4 |
+
|
| 5 |
+
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
| 6 |
+
|
| 7 |
+
1. Definitions.
|
| 8 |
+
|
| 9 |
+
"License" shall mean the terms and conditions for use, reproduction,
|
| 10 |
+
and distribution as defined by Sections 1 through 9 of this document.
|
| 11 |
+
|
| 12 |
+
"Licensor" shall mean the copyright owner or entity authorized by
|
| 13 |
+
the copyright owner that is granting the License.
|
| 14 |
+
|
| 15 |
+
"Legal Entity" shall mean the union of the acting entity and all
|
| 16 |
+
other entities that control, are controlled by, or are under common
|
| 17 |
+
control with that entity. For the purposes of this definition,
|
| 18 |
+
"control" means (i) the power, direct or indirect, to cause the
|
| 19 |
+
direction or management of such entity, whether by contract or
|
| 20 |
+
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
| 21 |
+
outstanding shares, or (iii) beneficial ownership of such entity.
|
| 22 |
+
|
| 23 |
+
"You" (or "Your") shall mean an individual or Legal Entity
|
| 24 |
+
exercising permissions granted by this License.
|
| 25 |
+
|
| 26 |
+
"Source" form shall mean the preferred form for making modifications,
|
| 27 |
+
including but not limited to software source code, documentation
|
| 28 |
+
source, and configuration files.
|
| 29 |
+
|
| 30 |
+
"Object" form shall mean any form resulting from mechanical
|
| 31 |
+
transformation or translation of a Source form, including but
|
| 32 |
+
not limited to compiled object code, generated documentation,
|
| 33 |
+
and conversions to other media types.
|
| 34 |
+
|
| 35 |
+
"Work" shall mean the work of authorship, whether in Source or
|
| 36 |
+
Object form, made available under the License, as indicated by a
|
| 37 |
+
copyright notice that is included in or attached to the work
|
| 38 |
+
(an example is provided in the Appendix below).
|
| 39 |
+
|
| 40 |
+
"Derivative Works" shall mean any work, whether in Source or Object
|
| 41 |
+
form, that is based on (or derived from) the Work and for which the
|
| 42 |
+
editorial revisions, annotations, elaborations, or other modifications
|
| 43 |
+
represent, as a whole, an original work of authorship. For the purposes
|
| 44 |
+
of this License, Derivative Works shall not include works that remain
|
| 45 |
+
separable from, or merely link (or bind by name) to the interfaces of,
|
| 46 |
+
the Work and Derivative Works thereof.
|
| 47 |
+
|
| 48 |
+
"Contribution" shall mean any work of authorship, including
|
| 49 |
+
the original version of the Work and any modifications or additions
|
| 50 |
+
to that Work or Derivative Works thereof, that is intentionally
|
| 51 |
+
submitted to Licensor for inclusion in the Work by the copyright owner
|
| 52 |
+
or by an individual or Legal Entity authorized to submit on behalf of
|
| 53 |
+
the copyright owner. For the purposes of this definition, "submitted"
|
| 54 |
+
means any form of electronic, verbal, or written communication sent
|
| 55 |
+
to the Licensor or its representatives, including but not limited to
|
| 56 |
+
communication on electronic mailing lists, source code control systems,
|
| 57 |
+
and issue tracking systems that are managed by, or on behalf of, the
|
| 58 |
+
Licensor for the purpose of discussing and improving the Work, but
|
| 59 |
+
excluding communication that is conspicuously marked or otherwise
|
| 60 |
+
designated in writing by the copyright owner as "Not a Contribution."
|
| 61 |
+
|
| 62 |
+
"Contributor" shall mean Licensor and any individual or Legal Entity
|
| 63 |
+
on behalf of whom a Contribution has been received by Licensor and
|
| 64 |
+
subsequently incorporated within the Work.
|
| 65 |
+
|
| 66 |
+
2. Grant of Copyright License. Subject to the terms and conditions of
|
| 67 |
+
this License, each Contributor hereby grants to You a perpetual,
|
| 68 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
| 69 |
+
copyright license to reproduce, prepare Derivative Works of,
|
| 70 |
+
publicly display, publicly perform, sublicense, and distribute the
|
| 71 |
+
Work and such Derivative Works in Source or Object form.
|
| 72 |
+
|
| 73 |
+
3. Grant of Patent License. Subject to the terms and conditions of
|
| 74 |
+
this License, each Contributor hereby grants to You a perpetual,
|
| 75 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
| 76 |
+
(except as stated in this section) patent license to make, have made,
|
| 77 |
+
use, offer to sell, sell, import, and otherwise transfer the Work,
|
| 78 |
+
where such license applies only to those patent claims licensable
|
| 79 |
+
by such Contributor that are necessarily infringed by their
|
| 80 |
+
Contribution(s) alone or by combination of their Contribution(s)
|
| 81 |
+
with the Work to which such Contribution(s) was submitted. If You
|
| 82 |
+
institute patent litigation against any entity (including a
|
| 83 |
+
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
| 84 |
+
or a Contribution incorporated within the Work constitutes direct
|
| 85 |
+
or contributory patent infringement, then any patent licenses
|
| 86 |
+
granted to You under this License for that Work shall terminate
|
| 87 |
+
as of the date such litigation is filed.
|
| 88 |
+
|
| 89 |
+
4. Redistribution. You may reproduce and distribute copies of the
|
| 90 |
+
Work or Derivative Works thereof in any medium, with or without
|
| 91 |
+
modifications, and in Source or Object form, provided that You
|
| 92 |
+
meet the following conditions:
|
| 93 |
+
|
| 94 |
+
(a) You must give any other recipients of the Work or
|
| 95 |
+
Derivative Works a copy of this License; and
|
| 96 |
+
|
| 97 |
+
(b) You must cause any modified files to carry prominent notices
|
| 98 |
+
stating that You changed the files; and
|
| 99 |
+
|
| 100 |
+
(c) You must retain, in the Source form of any Derivative Works
|
| 101 |
+
that You distribute, all copyright, patent, trademark, and
|
| 102 |
+
attribution notices from the Source form of the Work,
|
| 103 |
+
excluding those notices that do not pertain to any part of
|
| 104 |
+
the Derivative Works; and
|
| 105 |
+
|
| 106 |
+
(d) If the Work includes a "NOTICE" text file as part of its
|
| 107 |
+
distribution, then any Derivative Works that You distribute must
|
| 108 |
+
include a readable copy of the attribution notices contained
|
| 109 |
+
within such NOTICE file, excluding those notices that do not
|
| 110 |
+
pertain to any part of the Derivative Works, in at least one
|
| 111 |
+
of the following places: within a NOTICE text file distributed
|
| 112 |
+
as part of the Derivative Works; within the Source form or
|
| 113 |
+
documentation, if provided along with the Derivative Works; or,
|
| 114 |
+
within a display generated by the Derivative Works, if and
|
| 115 |
+
wherever such third-party notices normally appear. The contents
|
| 116 |
+
of the NOTICE file are for informational purposes only and
|
| 117 |
+
do not modify the License. You may add Your own attribution
|
| 118 |
+
notices within Derivative Works that You distribute, alongside
|
| 119 |
+
or as an addendum to the NOTICE text from the Work, provided
|
| 120 |
+
that such additional attribution notices cannot be construed
|
| 121 |
+
as modifying the License.
|
| 122 |
+
|
| 123 |
+
You may add Your own copyright statement to Your modifications and
|
| 124 |
+
may provide additional or different license terms and conditions
|
| 125 |
+
for use, reproduction, or distribution of Your modifications, or
|
| 126 |
+
for any such Derivative Works as a whole, provided Your use,
|
| 127 |
+
reproduction, and distribution of the Work otherwise complies with
|
| 128 |
+
the conditions stated in this License.
|
| 129 |
+
|
| 130 |
+
5. Submission of Contributions. Unless You explicitly state otherwise,
|
| 131 |
+
any Contribution intentionally submitted for inclusion in the Work
|
| 132 |
+
by You to the Licensor shall be under the terms and conditions of
|
| 133 |
+
this License, without any additional terms or conditions.
|
| 134 |
+
Notwithstanding the above, nothing herein shall supersede or modify
|
| 135 |
+
the terms of any separate license agreement you may have executed
|
| 136 |
+
with Licensor regarding such Contributions.
|
| 137 |
+
|
| 138 |
+
6. Trademarks. This License does not grant permission to use the trade
|
| 139 |
+
names, trademarks, service marks, or product names of the Licensor,
|
| 140 |
+
except as required for reasonable and customary use in describing the
|
| 141 |
+
origin of the Work and reproducing the content of the NOTICE file.
|
| 142 |
+
|
| 143 |
+
7. Disclaimer of Warranty. Unless required by applicable law or
|
| 144 |
+
agreed to in writing, Licensor provides the Work (and each
|
| 145 |
+
Contributor provides its Contributions) on an "AS IS" BASIS,
|
| 146 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
| 147 |
+
implied, including, without limitation, any warranties or conditions
|
| 148 |
+
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
| 149 |
+
PARTICULAR PURPOSE. You are solely responsible for determining the
|
| 150 |
+
appropriateness of using or redistributing the Work and assume any
|
| 151 |
+
risks associated with Your exercise of permissions under this License.
|
| 152 |
+
|
| 153 |
+
8. Limitation of Liability. In no event and under no legal theory,
|
| 154 |
+
whether in tort (including negligence), contract, or otherwise,
|
| 155 |
+
unless required by applicable law (such as deliberate and grossly
|
| 156 |
+
negligent acts) or agreed to in writing, shall any Contributor be
|
| 157 |
+
liable to You for damages, including any direct, indirect, special,
|
| 158 |
+
incidental, or consequential damages of any character arising as a
|
| 159 |
+
result of this License or out of the use or inability to use the
|
| 160 |
+
Work (including but not limited to damages for loss of goodwill,
|
| 161 |
+
work stoppage, computer failure or malfunction, or any and all
|
| 162 |
+
other commercial damages or losses), even if such Contributor
|
| 163 |
+
has been advised of the possibility of such damages.
|
| 164 |
+
|
| 165 |
+
9. Accepting Warranty or Additional Liability. While redistributing
|
| 166 |
+
the Work or Derivative Works thereof, You may choose to offer,
|
| 167 |
+
and charge a fee for, acceptance of support, warranty, indemnity,
|
| 168 |
+
or other liability obligations and/or rights consistent with this
|
| 169 |
+
License. However, in accepting such obligations, You may act only
|
| 170 |
+
on Your own behalf and on Your sole responsibility, not on behalf
|
| 171 |
+
of any other Contributor, and only if You agree to indemnify,
|
| 172 |
+
defend, and hold each Contributor harmless for any liability
|
| 173 |
+
incurred by, or claims asserted against, such Contributor by reason
|
| 174 |
+
of your accepting any such warranty or additional liability.
|
| 175 |
+
|
| 176 |
+
END OF TERMS AND CONDITIONS
|
| 177 |
+
|
| 178 |
+
APPENDIX: How to apply the Apache License to your work.
|
| 179 |
+
|
| 180 |
+
To apply the Apache License to your work, attach the following
|
| 181 |
+
boilerplate notice, with the fields enclosed by brackets "[]"
|
| 182 |
+
replaced with your own identifying information. (Don't include
|
| 183 |
+
the brackets!) The text should be enclosed in the appropriate
|
| 184 |
+
comment syntax for the file format. We also recommend that a
|
| 185 |
+
file or class name and description of purpose be included on the
|
| 186 |
+
same "printed page" as the copyright notice for easier
|
| 187 |
+
identification within third-party archives.
|
| 188 |
+
|
| 189 |
+
Copyright [yyyy] [name of copyright owner]
|
| 190 |
+
|
| 191 |
+
Licensed under the Apache License, Version 2.0 (the "License");
|
| 192 |
+
you may not use this file except in compliance with the License.
|
| 193 |
+
You may obtain a copy of the License at
|
| 194 |
+
|
| 195 |
+
http://www.apache.org/licenses/LICENSE-2.0
|
| 196 |
+
|
| 197 |
+
Unless required by applicable law or agreed to in writing, software
|
| 198 |
+
distributed under the License is distributed on an "AS IS" BASIS,
|
| 199 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 200 |
+
See the License for the specific language governing permissions and
|
| 201 |
+
limitations under the License.
|
MRI_recon/code/Frequency-Diffusion/FSMNet/README.md
ADDED
|
@@ -0,0 +1,97 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# FSMNet
|
| 2 |
+
FSMNet efficiently explores global dependencies across different modalities. Specifically, the features for each modality are extracted by the Frequency-Spatial Feature Extraction (FSFE) module, featuring a frequency branch and a spatial branch. Benefiting from the global property of the Fourier transform, the frequency branch can efficiently capture global dependency with an image-size receptive field, while the spatial branch can extract local features. To exploit complementary information from the auxiliary modality, we propose a Cross-Modal Selective fusion (CMS-fusion) module that selectively incorporate the frequency and spatial features from the auxiliary modality to enhance the corresponding branch of the target modality. To further integrate the enhanced global features from the frequency branch and the enhanced local features from the spatial branch, we develop a Frequency-Spatial fusion (FS-fusion) module, resulting in a comprehensive feature representation for the target modality.
|
| 3 |
+
|
| 4 |
+
<p align="center"><img width="100%" src="figures/FSMNet.png" /></p>
|
| 5 |
+
|
| 6 |
+
## Paper
|
| 7 |
+
|
| 8 |
+
<b>Accelerated Multi-Contrast MRI Reconstruction via Frequency and Spatial Mutual Learning</b> <br/>
|
| 9 |
+
[Qi Chen](https://scholar.google.com/citations?user=4Q5gs2MAAAAJ&hl=en)<sup>1</sup>, [Xiaohan Xing](https://hathawayxxh.github.io/)<sup>2, *</sup>, [Zhen Chen](https://franciszchen.github.io/)<sup>3</sup>, [Zhiwei Xiong](http://staff.ustc.edu.cn/~zwxiong/)<sup>1</sup> <br/>
|
| 10 |
+
<sup>1 </sup>University of Science and Technology of China, <br/>
|
| 11 |
+
<sup>2 </sup>Stanford University, <br/>
|
| 12 |
+
<sup>3 </sup>Centre for Artificial Intelligence and Robotics (CAIR), HKISI-CAS <br/>
|
| 13 |
+
MICCAI, 2024 <br/>
|
| 14 |
+
[paper](http://arxiv.org/abs/2409.14113) | [code](https://github.com/qic999/FSMNet) | [huggingface](https://huggingface.co/datasets/qicq1c/MRI_Reconstruction)
|
| 15 |
+
|
| 16 |
+
## 0. Installation
|
| 17 |
+
|
| 18 |
+
```bash
|
| 19 |
+
git clone https://github.com/qic999/FSMNet.git
|
| 20 |
+
cd FSMNet
|
| 21 |
+
```
|
| 22 |
+
|
| 23 |
+
See [installation instructions](documents/INSTALL.md) to create an environment and obtain requirements.
|
| 24 |
+
|
| 25 |
+
## 1. Prepare datasets
|
| 26 |
+
Download BraTS dataset and fastMRI dataset and save them to the `datapath` directory.
|
| 27 |
+
```
|
| 28 |
+
cd $datapath
|
| 29 |
+
# download brats dataset
|
| 30 |
+
wget https://huggingface.co/datasets/qicq1c/MRI_Reconstruction/resolve/main/BRATS_100patients.zip
|
| 31 |
+
unzip BRATS_100patients.zip
|
| 32 |
+
# download fastmri dataset
|
| 33 |
+
wget https://huggingface.co/datasets/qicq1c/MRI_Reconstruction/resolve/main/singlecoil_train_selected.zip
|
| 34 |
+
unzip singlecoil_train_selected.zip
|
| 35 |
+
```
|
| 36 |
+
|
| 37 |
+
## 2. Training
|
| 38 |
+
##### BraTS dataset, AF=4
|
| 39 |
+
```
|
| 40 |
+
python train_brats.py --root_path /data/qic99/MRI_recon image_100patients_4X/ \
|
| 41 |
+
--gpu 0 --batch_size 4 --base_lr 0.0001 --MRIDOWN 4X --low_field_SNR 0 \
|
| 42 |
+
--input_normalize mean_std \
|
| 43 |
+
--exp FSMNet_BraTS_4x
|
| 44 |
+
```
|
| 45 |
+
|
| 46 |
+
##### BraTS dataset, AF=8
|
| 47 |
+
```
|
| 48 |
+
python train_brats.py --root_path /data/qic99/MRI_recon/image_100patients_8X/ \
|
| 49 |
+
--gpu 1 --batch_size 4 --base_lr 0.0001 --MRIDOWN 8X --low_field_SNR 0 \
|
| 50 |
+
--input_normalize mean_std \
|
| 51 |
+
--exp FSMNet_BraTS_8x
|
| 52 |
+
```
|
| 53 |
+
|
| 54 |
+
##### fastMRI dataset, AF=4
|
| 55 |
+
```
|
| 56 |
+
python train_fastmri.py --root_path /data/qic99/MRI_recon/fastMRI/ \
|
| 57 |
+
--gpu 0 --batch_size 4 --base_lr 0.0001 --CENTER_FRACTIONS 0.08 --ACCELERATIONS 4 \
|
| 58 |
+
--exp FSMNet_fastmri_4x
|
| 59 |
+
```
|
| 60 |
+
|
| 61 |
+
##### fastMRI dataset, AF=8
|
| 62 |
+
```
|
| 63 |
+
python train_fastmri.py --root_path /data/qic99/MRI_recon/fastMRI/ \
|
| 64 |
+
--gpu 1 --batch_size 4 --base_lr 0.0001 --CENTER_FRACTIONS 0.04 --ACCELERATIONS 8 \
|
| 65 |
+
--exp FSMNet_fastmri_8x
|
| 66 |
+
```
|
| 67 |
+
|
| 68 |
+
## 3. Testing
|
| 69 |
+
##### BraTS dataset, AF=4
|
| 70 |
+
```
|
| 71 |
+
python test_brats.py --root_path /data/qic99/MRI_recon/image_100patients_4X/ \
|
| 72 |
+
--gpu 3 --base_lr 0.0001 --MRIDOWN 4X --low_field_SNR 0 \
|
| 73 |
+
--input_normalize mean_std \
|
| 74 |
+
--exp FSMNet_BraTS_4x --phase test
|
| 75 |
+
```
|
| 76 |
+
|
| 77 |
+
##### BraTS dataset, AF=8
|
| 78 |
+
```
|
| 79 |
+
python test_brats.py --root_path /data/qic99/MRI_recon/image_100patients_8X/ \
|
| 80 |
+
--gpu 4 --base_lr 0.0001 --MRIDOWN 8X --low_field_SNR 0 \
|
| 81 |
+
--input_normalize mean_std \
|
| 82 |
+
--exp FSMNet_BraTS_8x --phase test
|
| 83 |
+
```
|
| 84 |
+
|
| 85 |
+
##### fastMRI dataset, AF=4
|
| 86 |
+
```
|
| 87 |
+
python test_fastmri.py --root_path /data/qic99/MRI_recon/fastMRI/ \
|
| 88 |
+
--gpu 5 --batch_size 4 --base_lr 0.0001 --CENTER_FRACTIONS 0.08 --ACCELERATIONS 4 \
|
| 89 |
+
--exp FSMNet_fastmri_4x --phase test
|
| 90 |
+
```
|
| 91 |
+
|
| 92 |
+
##### fastMRI dataset, AF=8
|
| 93 |
+
```
|
| 94 |
+
python test_fastmri.py --root_path /data/qic99/MRI_recon/fastMRI/ \
|
| 95 |
+
--gpu 6 --batch_size 4 --base_lr 0.0001 --CENTER_FRACTIONS 0.04 --ACCELERATIONS 8 \
|
| 96 |
+
--exp FSMNet_fastmri_8x --phase test
|
| 97 |
+
```
|
MRI_recon/code/Frequency-Diffusion/FSMNet/__init__.py
ADDED
|
File without changes
|
MRI_recon/code/Frequency-Diffusion/FSMNet/bash/brats.sh
ADDED
|
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# BraTS dataset, AF=4
|
| 2 |
+
mamba activate diffmri
|
| 3 |
+
cd /home/cbtil3/hao/repo/Frequency-Diffusion/FSMNet-modify
|
| 4 |
+
cd /bask/projects/j/jiaoj-rep-learn/Hao/repo/Frequency-Diffusion/FSMNet-modify
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
gamedrive=/media/cbtil3/74ec35fd-2452-4dcc-8d7d-3ba957e302c9
|
| 8 |
+
|
| 9 |
+
#4T folder: /media/cbtil3/9feaf350-913e-4def-8114-f03573c04364/hao
|
| 10 |
+
root_path_4x=/bask/projects/j/jiaoj-rep-learn/Hao/datasets/knee/image_100patients_4X/
|
| 11 |
+
root_path_8x=/bask/projects/j/jiaoj-rep-learn/Hao/datasets/knee/image_100patients_8X/
|
| 12 |
+
|
| 13 |
+
root_path_4x=$gamedrive/Datasets/medical/FrequencyDiffusion/brats/image_100patients_4X/
|
| 14 |
+
root_path_8x=$gamedrive/Datasets/medical/FrequencyDiffusion/brats/image_100patients_8X/
|
| 15 |
+
|
| 16 |
+
python train_brats.py --root_path $root_path_4x\
|
| 17 |
+
--gpu 0 --batch_size 4 --base_lr 0.0001 --MRIDOWN 4X --low_field_SNR 0 \
|
| 18 |
+
--input_normalize mean_std --CENTER_FRACTIONS 0.08 --ACCELERATIONS 4 \
|
| 19 |
+
--exp FSMNet_BraTS_4x --use_time_model --use_kspace
|
| 20 |
+
|
| 21 |
+
# BraTS dataset, AF=8
|
| 22 |
+
python train_brats.py --root_path $root_path_8x \
|
| 23 |
+
--gpu 0 --batch_size 4 --base_lr 0.0001 --MRIDOWN 8X --low_field_SNR 0 \
|
| 24 |
+
--input_normalize mean_std --CENTER_FRACTIONS 0.04 --ACCELERATIONS 8\
|
| 25 |
+
--exp FSMNet_BraTS_8x --use_time_model --use_kspace
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
# Test
|
| 31 |
+
#BraTS dataset, AF=4
|
| 32 |
+
|
| 33 |
+
python test_brats.py --root_path $root_path_4x \
|
| 34 |
+
--gpu 0 --base_lr 0.0001 --MRIDOWN 4X --low_field_SNR 0 \
|
| 35 |
+
--input_normalize mean_std --CENTER_FRACTIONS 0.08 --ACCELERATIONS 4 \
|
| 36 |
+
--exp FSMNet_BraTS_4x --phase test --use_time_model --use_kspace
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
#BraTS dataset, AF=8
|
| 41 |
+
|
| 42 |
+
python test_brats.py --root_path $root_path_8x \
|
| 43 |
+
--gpu 1 --base_lr 0.0001 --MRIDOWN 8X --low_field_SNR 0 \
|
| 44 |
+
--input_normalize mean_std --CENTER_FRACTIONS 0.04 --ACCELERATIONS 8 \
|
| 45 |
+
--exp FSMNet_BraTS_8x --phase test --use_time_model --use_kspace
|
| 46 |
+
|
MRI_recon/code/Frequency-Diffusion/FSMNet/bash/fastmri.sh
ADDED
|
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
cd /home/v-qichen3/MRI_recon/code/Frequency-Diffusion/FSMNet
|
| 3 |
+
|
| 4 |
+
data_root=/home/v-qichen3/MRI_recon/data/fastmri
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
python train_fastmri.py --root_path $data_root \
|
| 8 |
+
--gpu 2 --batch_size 4 --base_lr 0.0001 --CENTER_FRACTIONS 0.08 --ACCELERATIONS 4 \
|
| 9 |
+
--exp FSMNet_fastmri_4x --MRIDOWN 4X --MASKTYPE random \
|
| 10 |
+
--num_timesteps 5 --image_size 320 --use_kspace --use_time_model
|
| 11 |
+
|
| 12 |
+
# fastMRI dataset, AF=8
|
| 13 |
+
|
| 14 |
+
# python train_fastmri.py --root_path $data_root \
|
| 15 |
+
# --gpu 0 --batch_size 4 --base_lr 0.0001 --CENTER_FRACTIONS 0.04 --ACCELERATIONS 8 \
|
| 16 |
+
# --exp FSMNet_fastmri_8x --MRIDOWN 8X --MASKTYPE equispaced \
|
| 17 |
+
# --num_timesteps 5 --image_size 320 --use_kspace --use_time_model
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
# python train_fastmri.py --root_path $data_root \
|
| 21 |
+
# --gpu 0 --batch_size 4 --base_lr 0.0001 --CENTER_FRACTIONS 0.03 --ACCELERATIONS 12 \
|
| 22 |
+
# --exp FSMNet_fastmri_12x --MRIDOWN 12X --MASKTYPE equispaced \
|
| 23 |
+
# --num_timesteps 5 --image_size 320 --use_kspace --use_time_model
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
# # Test
|
| 28 |
+
# #fastMRI dataset, AF=4
|
| 29 |
+
# model_4x=model/FSMNet_fastmri_4x/iter_100000.pth
|
| 30 |
+
|
| 31 |
+
python test_fastmri.py --root_path $data_root \
|
| 32 |
+
--gpu 1 --batch_size 4 --base_lr 0.0001 --CENTER_FRACTIONS 0.08 --ACCELERATIONS 4 \
|
| 33 |
+
--exp FSMNet_fastmri_4x --phase test --MRIDOWN 4X \
|
| 34 |
+
--num_timesteps 5 --image_size 320 --use_kspace --use_time_model --test_sample Ksample --snapshot_path /home/v-qichen3/MRI_recon/code/Frequency-Diffusion/FSMNet/model/FSMNet_fastmri_4x_t5_kspace_time_no_distortion
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
# ColdDiffusion DDPM
|
| 39 |
+
|
| 40 |
+
# #fastMRI dataset, AF=8
|
| 41 |
+
|
| 42 |
+
python test_fastmri.py --root_path $data_root \
|
| 43 |
+
--gpu 3 --batch_size 4 --base_lr 0.0001 --CENTER_FRACTIONS 0.04 --ACCELERATIONS 8 \
|
| 44 |
+
--exp FSMNet_fastmri_8x --phase test --MRIDOWN 8X \
|
| 45 |
+
--num_timesteps 5 --image_size 320 --use_kspace --use_time_model --test_sample Ksample --snapshot_path /home/v-qichen3/MRI_recon/code/Frequency-Diffusion/FSMNet/model/FSMNet_fastmri_8x_t5_kspace_time_no_distortion
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
# ColdDiffusion DDPM
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
# python test_fastmri.py --root_path $data_root \
|
| 52 |
+
# --gpu 0 --batch_size 4 --base_lr 0.0001 --CENTER_FRACTIONS 0.03 --ACCELERATIONS 12 \
|
| 53 |
+
# --exp FSMNet_fastmri_12x --phase test --MRIDOWN 12X \
|
| 54 |
+
# --num_timesteps 5 --image_size 320 --use_kspace --use_time_model --test_sample Ksample # ColdDiffusion DDPM
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
# # FSMNet_fastmri_12x_t30_kspace_time
|
| 59 |
+
|
MRI_recon/code/Frequency-Diffusion/FSMNet/bash/fastmri_8x.sh
ADDED
|
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
cd /home/v-qichen3/MRI_recon/code/Frequency-Diffusion/FSMNet
|
| 3 |
+
|
| 4 |
+
data_root=/home/v-qichen3/MRI_recon/data/fastmri
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
# python train_fastmri.py --root_path $data_root \
|
| 8 |
+
# --gpu 0 --batch_size 4 --base_lr 0.0001 --CENTER_FRACTIONS 0.08 --ACCELERATIONS 4 \
|
| 9 |
+
# --exp FSMNet_fastmri_4x --MRIDOWN 4X --MASKTYPE random \
|
| 10 |
+
# --num_timesteps 5 --image_size 320 --use_kspace --use_time_model
|
| 11 |
+
|
| 12 |
+
# fastMRI dataset, AF=8
|
| 13 |
+
|
| 14 |
+
python train_fastmri.py --root_path $data_root \
|
| 15 |
+
--gpu 3 --batch_size 4 --base_lr 0.0001 --CENTER_FRACTIONS 0.04 --ACCELERATIONS 8 \
|
| 16 |
+
--exp FSMNet_fastmri_8x --MRIDOWN 8X --MASKTYPE equispaced \
|
| 17 |
+
--num_timesteps 5 --image_size 320 --use_kspace --use_time_model
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
# python train_fastmri.py --root_path $data_root \
|
| 21 |
+
# --gpu 0 --batch_size 4 --base_lr 0.0001 --CENTER_FRACTIONS 0.03 --ACCELERATIONS 12 \
|
| 22 |
+
# --exp FSMNet_fastmri_12x --MRIDOWN 12X --MASKTYPE equispaced \
|
| 23 |
+
# --num_timesteps 5 --image_size 320 --use_kspace --use_time_model
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
# # Test
|
| 28 |
+
# #fastMRI dataset, AF=4
|
| 29 |
+
# model_4x=model/FSMNet_fastmri_4x/iter_100000.pth
|
| 30 |
+
|
| 31 |
+
# python test_fastmri.py --root_path $data_root \
|
| 32 |
+
# --gpu 0 --batch_size 4 --base_lr 0.0001 --CENTER_FRACTIONS 0.08 --ACCELERATIONS 4 \
|
| 33 |
+
# --exp FSMNet_fastmri_4x --phase test --MRIDOWN 4X \
|
| 34 |
+
# --num_timesteps 5 --image_size 320 --use_kspace --use_time_model --test_sample Ksample # ColdDiffusion DDPM
|
| 35 |
+
|
| 36 |
+
# #fastMRI dataset, AF=8
|
| 37 |
+
|
| 38 |
+
# python test_fastmri.py --root_path $data_root \
|
| 39 |
+
# --gpu 0 --batch_size 4 --base_lr 0.0001 --CENTER_FRACTIONS 0.04 --ACCELERATIONS 8 \
|
| 40 |
+
# --exp FSMNet_fastmri_8x --phase test --MRIDOWN 8X \
|
| 41 |
+
# --num_timesteps 5 --image_size 320 --use_kspace --use_time_model --test_sample Ksample # ColdDiffusion DDPM
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
# python test_fastmri.py --root_path $data_root \
|
| 45 |
+
# --gpu 0 --batch_size 4 --base_lr 0.0001 --CENTER_FRACTIONS 0.03 --ACCELERATIONS 12 \
|
| 46 |
+
# --exp FSMNet_fastmri_12x --phase test --MRIDOWN 12X \
|
| 47 |
+
# --num_timesteps 5 --image_size 320 --use_kspace --use_time_model --test_sample Ksample # ColdDiffusion DDPM
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
# # FSMNet_fastmri_12x_t30_kspace_time
|
| 52 |
+
|
MRI_recon/code/Frequency-Diffusion/FSMNet/bash/m4raw.sh
ADDED
|
@@ -0,0 +1,149 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
cd /home/v-qichen3/MRI_recon/code/Frequency-Diffusion/FSMNet/
|
| 3 |
+
|
| 4 |
+
data_root=/home/v-qichen3/MRI_recon/data/m4raw
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
python train_m4raw.py --root_path $data_root \
|
| 8 |
+
--gpu 3 --batch_size 4 --base_lr 0.0005 --CENTER_FRACTIONS 0.08 --ACCELERATIONS 4 \
|
| 9 |
+
--exp FSMNet_m4raw_4x_lr5e-4 --MRIDOWN 4X --MASKTYPE random \
|
| 10 |
+
--num_timesteps 30 --image_size 240 --use_kspace --use_time_model
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
# m4raw dataset, AF=8
|
| 14 |
+
# python train_m4raw.py --root_path $data_root \
|
| 15 |
+
# --gpu 0 --batch_size 8 --base_lr 0.0001 --CENTER_FRACTIONS 0.04 --ACCELERATIONS 8 \
|
| 16 |
+
# --exp FSMNet_m4raw_8x --MRIDOWN 8X --MASKTYPE equispaced \
|
| 17 |
+
# --num_timesteps 5 --image_size 240 --use_kspace --use_time_model
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
# data_root=/bask/projects/j/jiaoj-rep-learn/Hao/datasets/knee
|
| 21 |
+
# python train_m4raw.py --root_path $data_root \
|
| 22 |
+
# --gpu 1 --batch_size 4 --base_lr 0.0001 --CENTER_FRACTIONS 0.03 --ACCELERATIONS 12 \
|
| 23 |
+
# --exp FSMNet_m4raw_12x --MRIDOWN 12X --MASKTYPE equispaced \
|
| 24 |
+
# --num_timesteps 30 --image_size 240 --use_kspace --use_time_model
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
# ---------------- Test ----------------
|
| 28 |
+
|
| 29 |
+
python test_m4raw.py --root_path $data_root \
|
| 30 |
+
--gpu 0 --batch_size 4 --base_lr 0.0001 --CENTER_FRACTIONS 0.08 --ACCELERATIONS 4 \
|
| 31 |
+
--exp FSMNet_m4raw_4x --phase test --MASKTYPE random --MRIDOWN 4X \
|
| 32 |
+
--num_timesteps 30 --image_size 240 --use_kspace --use_time_model --test_tag no_distortion \
|
| 33 |
+
--test_sample ColdDiffusion --snapshot_path /home/v-qichen3/MRI_recon/code/Frequency-Diffusion/FSMNet/model/FSMNet_m4raw_4x_lr5e-4_t30_new_kspace_time
|
| 34 |
+
|
| 35 |
+
#m4raw dataset, AF=8
|
| 36 |
+
# python test_m4raw.py --root_path $data_root \
|
| 37 |
+
# --gpu 0 --batch_size 4 --base_lr 0.0001 --CENTER_FRACTIONS 0.04 --ACCELERATIONS 8 \
|
| 38 |
+
# --exp FSMNet_m4raw_8x --phase test --MASKTYPE equispaced --MRIDOWN 8X \
|
| 39 |
+
# --num_timesteps 5 --image_size 240 --use_kspace --use_time_model --test_sample Ksample # ColdDiffusion DDPM
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
# python test_m4raw.py --root_path $data_root \
|
| 44 |
+
# --gpu 0 --batch_size 4 --base_lr 0.0001 --CENTER_FRACTIONS 0.03 --ACCELERATIONS 12 \
|
| 45 |
+
# --exp FSMNet_m4raw_12x --phase test --MASKTYPE equispaced --MRIDOWN 12X \
|
| 46 |
+
# --num_timesteps 5 --image_size 240 --use_kspace --use_time_model --test_sample Ksample # ColdDiffusion DDPM
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
# ------------------------------------
|
| 51 |
+
# NMSE: 3.2228 ± 0.2863
|
| 52 |
+
# PSNR: 28.8534 ± 0.4793
|
| 53 |
+
# SSIM: 0.7769 ± 0.0126
|
| 54 |
+
# ------------------------------------
|
| 55 |
+
# All NMSE: 3.2175 ± 0.5230
|
| 56 |
+
# All PSNR: 27.8081 ± 0.7997
|
| 57 |
+
# All SSIM: 0.7507 ± 0.0218
|
| 58 |
+
# ------------------------------------
|
| 59 |
+
# Save Path: model/FSMNet_m4raw_4x_t5_new_kspace_no_distortion//result_case/
|
| 60 |
+
|
| 61 |
+
# ------------------------------------
|
| 62 |
+
# NMSE: 2.9359 ± 0.2239
|
| 63 |
+
# PSNR: 29.2540 ± 0.4309
|
| 64 |
+
# SSIM: 0.7922 ± 0.0116
|
| 65 |
+
# ------------------------------------
|
| 66 |
+
# DDPM NMSE: 9.8704 ± 0.5135
|
| 67 |
+
# DDPM PSNR: 23.9812 ± 0.2724
|
| 68 |
+
# DDPM SSIM: 0.6568 ± 0.0124
|
| 69 |
+
# ------------------------------------
|
| 70 |
+
# Save Path: model/FSMNet_m4raw_4x_t5_new_kspace_no_distortion_time/result_case/
|
| 71 |
+
|
| 72 |
+
# ------------------------------------
|
| 73 |
+
# NMSE: 2.2551 ± 0.1361
|
| 74 |
+
# PSNR: 30.3949 ± 0.4174
|
| 75 |
+
# SSIM: 0.8167 ± 0.0126
|
| 76 |
+
# ------------------------------------
|
| 77 |
+
# Save Path: FSMNet_m4raw_4x_t5_new_kspace_time
|
| 78 |
+
|
| 79 |
+
# ------------------------------------
|
| 80 |
+
# NMSE: 3.8027 ± 0.3095
|
| 81 |
+
# PSNR: 27.7977 ± 0.4213
|
| 82 |
+
# SSIM: 0.7529 ± 0.0119
|
| 83 |
+
# ------------------------------------
|
| 84 |
+
# Save Path: FSMNet_m4raw_4x
|
| 85 |
+
|
| 86 |
+
# ------------------------------------
|
| 87 |
+
# NMSE: 6.6898 ± 0.6444
|
| 88 |
+
# PSNR: 25.6848 ± 0.4866
|
| 89 |
+
# SSIM: 0.6844 ± 0.0149
|
| 90 |
+
# ------------------------------------
|
| 91 |
+
# ColdDiffusion NMSE: 8.4526 ± 0.7364
|
| 92 |
+
# ColdDiffusion PSNR: 24.6651 ± 0.4320
|
| 93 |
+
# ColdDiffusion SSIM: 0.6489 ± 0.0141
|
| 94 |
+
# ------------------------------------
|
| 95 |
+
# Save Path: FSMNet_m4raw_8x_t5_new_kspace_time
|
| 96 |
+
|
| 97 |
+
# ------------------------------------
|
| 98 |
+
# NMSE: 7.8239 ± 0.7702
|
| 99 |
+
# PSNR: 24.1138 ± 0.5690
|
| 100 |
+
# SSIM: 0.6421 ± 0.0164
|
| 101 |
+
# ------------------------------------
|
| 102 |
+
# Save Path: FSMNet_m4raw_8x
|
| 103 |
+
|
| 104 |
+
|
| 105 |
+
# ------------------------------------
|
| 106 |
+
# NMSE: 7.9649 ± 0.7895
|
| 107 |
+
# PSNR: 24.9283 ± 0.4875
|
| 108 |
+
# SSIM: 0.6548 ± 0.0150
|
| 109 |
+
# ------------------------------------
|
| 110 |
+
# All NMSE: 7.9485 ± 1.4481
|
| 111 |
+
# All PSNR: 23.8971 ± 0.8682
|
| 112 |
+
# All SSIM: 0.6231 ± 0.0348
|
| 113 |
+
# ------------------------------------
|
| 114 |
+
# Save Path: model/FSMNet_m4raw_8x_t5_new_kspace_no_distortion//result_case/
|
| 115 |
+
|
| 116 |
+
|
| 117 |
+
# ------------------------------------
|
| 118 |
+
# NMSE: 7.4375 ± 0.7510
|
| 119 |
+
# PSNR: 25.2266 ± 0.4907
|
| 120 |
+
# SSIM: 0.6662 ± 0.0140
|
| 121 |
+
# ------------------------------------
|
| 122 |
+
# All NMSE: 7.4210 ± 1.3740
|
| 123 |
+
# All PSNR: 24.1975 ± 0.8648
|
| 124 |
+
# All SSIM: 0.6353 ± 0.0317
|
| 125 |
+
# ------------------------------------
|
| 126 |
+
# Save Path: model/FSMNet_m4raw_8x_t5_new_kspace_time_no_distortion//result_case/
|
| 127 |
+
|
| 128 |
+
|
| 129 |
+
|
| 130 |
+
# ------------------------------------
|
| 131 |
+
# NMSE: 9.9329 ± 0.8853
|
| 132 |
+
# PSNR: 23.9651 ± 0.4525
|
| 133 |
+
# SSIM: 0.6380 ± 0.0161
|
| 134 |
+
# ------------------------------------
|
| 135 |
+
# Save Path: model/FSMNet_m4raw_12x_t5_new_kspace_time//result_case/
|
| 136 |
+
|
| 137 |
+
|
| 138 |
+
# ------------------------------------
|
| 139 |
+
# NMSE: 9.4328 ± 0.9025
|
| 140 |
+
# PSNR: 23.1375 ± 0.5578
|
| 141 |
+
# SSIM: 0.6096 ± 0.0188
|
| 142 |
+
# ------------------------------------
|
| 143 |
+
# All NMSE: 9.4449 ± 1.9331
|
| 144 |
+
# All PSNR: 22.2995 ± 1.0077
|
| 145 |
+
# All SSIM: 0.5807 ± 0.0359
|
| 146 |
+
# ------------------------------------
|
| 147 |
+
# Save Path: model/FSMNet_m4raw_12x//result_case/
|
| 148 |
+
|
| 149 |
+
|
MRI_recon/code/Frequency-Diffusion/FSMNet/dataloaders/BRATS_DuDo_dataloader.py
ADDED
|
@@ -0,0 +1,295 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
dual-domain network的dataloader, 读取两个模态的under-sampled和fully-sampled kspace data, 以及high-quality image作为监督信号。
|
| 3 |
+
"""
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
from __future__ import print_function, division
|
| 7 |
+
import numpy as np
|
| 8 |
+
import pandas as pd
|
| 9 |
+
from glob import glob
|
| 10 |
+
import random
|
| 11 |
+
from skimage import transform
|
| 12 |
+
from PIL import Image
|
| 13 |
+
|
| 14 |
+
import cv2
|
| 15 |
+
import os
|
| 16 |
+
import torch
|
| 17 |
+
from torch.utils.data import Dataset
|
| 18 |
+
|
| 19 |
+
from .kspace_subsample import undersample_mri, mri_fft
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
def normalize(data, mean, stddev, eps=0.0):
|
| 23 |
+
"""
|
| 24 |
+
Normalize the given tensor.
|
| 25 |
+
|
| 26 |
+
Applies the formula (data - mean) / (stddev + eps).
|
| 27 |
+
|
| 28 |
+
Args:
|
| 29 |
+
data (torch.Tensor): Input data to be normalized.
|
| 30 |
+
mean (float): Mean value.
|
| 31 |
+
stddev (float): Standard deviation.
|
| 32 |
+
eps (float, default=0.0): Added to stddev to prevent dividing by zero.
|
| 33 |
+
|
| 34 |
+
Returns:
|
| 35 |
+
torch.Tensor: Normalized tensor
|
| 36 |
+
"""
|
| 37 |
+
return (data - mean) / (stddev + eps)
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
def normalize_instance(data, eps=0.0):
|
| 41 |
+
"""
|
| 42 |
+
Normalize the given tensor with instance norm/
|
| 43 |
+
|
| 44 |
+
Applies the formula (data - mean) / (stddev + eps), where mean and stddev
|
| 45 |
+
are computed from the data itself.
|
| 46 |
+
|
| 47 |
+
Args:
|
| 48 |
+
data (torch.Tensor): Input data to be normalized
|
| 49 |
+
eps (float): Added to stddev to prevent dividing by zero
|
| 50 |
+
|
| 51 |
+
Returns:
|
| 52 |
+
torch.Tensor: Normalized tensor
|
| 53 |
+
"""
|
| 54 |
+
mean = data.mean()
|
| 55 |
+
std = data.std()
|
| 56 |
+
|
| 57 |
+
return normalize(data, mean, std, eps), mean, std
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
def roll(x, shift, dim):
|
| 62 |
+
"""
|
| 63 |
+
Similar to np.roll but applies to PyTorch Tensors.
|
| 64 |
+
|
| 65 |
+
Args:
|
| 66 |
+
x (torch.Tensor): A PyTorch tensor.
|
| 67 |
+
shift (int): Amount to roll.
|
| 68 |
+
dim (int): Which dimension to roll.
|
| 69 |
+
|
| 70 |
+
Returns:
|
| 71 |
+
torch.Tensor: Rolled version of x.
|
| 72 |
+
"""
|
| 73 |
+
if isinstance(shift, (tuple, list)):
|
| 74 |
+
assert len(shift) == len(dim)
|
| 75 |
+
for s, d in zip(shift, dim):
|
| 76 |
+
x = roll(x, s, d)
|
| 77 |
+
return x
|
| 78 |
+
shift = shift % x.size(dim)
|
| 79 |
+
if shift == 0:
|
| 80 |
+
return x
|
| 81 |
+
left = x.narrow(dim, 0, x.size(dim) - shift)
|
| 82 |
+
right = x.narrow(dim, x.size(dim) - shift, shift)
|
| 83 |
+
return torch.cat((right, left), dim=dim)
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
def fftshift(x, dim=None):
|
| 88 |
+
"""
|
| 89 |
+
Similar to np.fft.fftshift but applies to PyTorch Tensors
|
| 90 |
+
|
| 91 |
+
Args:
|
| 92 |
+
x (torch.Tensor): A PyTorch tensor.
|
| 93 |
+
dim (int): Which dimension to fftshift.
|
| 94 |
+
|
| 95 |
+
Returns:
|
| 96 |
+
torch.Tensor: fftshifted version of x.
|
| 97 |
+
"""
|
| 98 |
+
if dim is None:
|
| 99 |
+
dim = tuple(range(x.dim()))
|
| 100 |
+
shift = [dim // 2 for dim in x.shape]
|
| 101 |
+
elif isinstance(dim, int):
|
| 102 |
+
shift = x.shape[dim] // 2
|
| 103 |
+
else:
|
| 104 |
+
shift = [x.shape[i] // 2 for i in dim]
|
| 105 |
+
|
| 106 |
+
return roll(x, shift, dim)
|
| 107 |
+
|
| 108 |
+
|
| 109 |
+
def ifftshift(x, dim=None):
|
| 110 |
+
"""
|
| 111 |
+
Similar to np.fft.ifftshift but applies to PyTorch Tensors
|
| 112 |
+
|
| 113 |
+
Args:
|
| 114 |
+
x (torch.Tensor): A PyTorch tensor.
|
| 115 |
+
dim (int): Which dimension to ifftshift.
|
| 116 |
+
|
| 117 |
+
Returns:
|
| 118 |
+
torch.Tensor: ifftshifted version of x.
|
| 119 |
+
"""
|
| 120 |
+
if dim is None:
|
| 121 |
+
dim = tuple(range(x.dim()))
|
| 122 |
+
shift = [(dim + 1) // 2 for dim in x.shape]
|
| 123 |
+
elif isinstance(dim, int):
|
| 124 |
+
shift = (x.shape[dim] + 1) // 2
|
| 125 |
+
else:
|
| 126 |
+
shift = [(x.shape[i] + 1) // 2 for i in dim]
|
| 127 |
+
|
| 128 |
+
return roll(x, shift, dim)
|
| 129 |
+
|
| 130 |
+
|
| 131 |
+
|
| 132 |
+
def ifft2c(data: torch.Tensor, norm: str = "ortho") -> torch.Tensor:
|
| 133 |
+
"""
|
| 134 |
+
Apply centered 2-dimensional Inverse Fast Fourier Transform.
|
| 135 |
+
Returns:
|
| 136 |
+
The IFFT of the input.
|
| 137 |
+
"""
|
| 138 |
+
if not data.shape[-1] == 2:
|
| 139 |
+
raise ValueError("Tensor does not have separate complex dim.")
|
| 140 |
+
|
| 141 |
+
data = ifftshift(data, dim=[-3, -2])
|
| 142 |
+
data = torch.view_as_real(
|
| 143 |
+
torch.fft.ifftn( # type: ignore
|
| 144 |
+
torch.view_as_complex(data), dim=(-2, -1), norm=norm
|
| 145 |
+
)
|
| 146 |
+
)
|
| 147 |
+
|
| 148 |
+
return data
|
| 149 |
+
|
| 150 |
+
|
| 151 |
+
|
| 152 |
+
class Hybrid(Dataset):
|
| 153 |
+
|
| 154 |
+
def __init__(self, base_dir=None, HF_refine = 'False', split='train', MRIDOWN='4X', SNR=15, \
|
| 155 |
+
transform=None, input_round = None, input_normalize=None):
|
| 156 |
+
|
| 157 |
+
super().__init__()
|
| 158 |
+
self._base_dir = base_dir
|
| 159 |
+
self.HF_refine = HF_refine
|
| 160 |
+
self.input_round = input_round
|
| 161 |
+
self._MRIDOWN = MRIDOWN
|
| 162 |
+
self._SNR = SNR
|
| 163 |
+
self.im_ids = []
|
| 164 |
+
self.t2_images = []
|
| 165 |
+
self.splits_path = "/data/xiaohan/BRATS_dataset/cv_splits_100patients/"
|
| 166 |
+
|
| 167 |
+
if split=='train':
|
| 168 |
+
self.train_file = self.splits_path + 'train_data.csv'
|
| 169 |
+
train_images = pd.read_csv(self.train_file).iloc[:, -1].values.tolist()
|
| 170 |
+
self.t1_images = [image for image in train_images if image.split('_')[-1]=='t1.png']
|
| 171 |
+
|
| 172 |
+
elif split=='test':
|
| 173 |
+
self.test_file = self.splits_path + 'test_data.csv'
|
| 174 |
+
# self.test_file = self.splits_path + 'train_data.csv'
|
| 175 |
+
test_images = pd.read_csv(self.test_file).iloc[:, -1].values.tolist()
|
| 176 |
+
# test_images = os.listdir(self._base_dir)
|
| 177 |
+
self.t1_images = [image for image in test_images if image.split('_')[-1]=='t1.png']
|
| 178 |
+
|
| 179 |
+
|
| 180 |
+
for image_path in self.t1_images:
|
| 181 |
+
t2_path = image_path.replace('t1', 't2')
|
| 182 |
+
self.t2_images.append(t2_path)
|
| 183 |
+
|
| 184 |
+
|
| 185 |
+
self.transform = transform
|
| 186 |
+
self.input_normalize = input_normalize
|
| 187 |
+
|
| 188 |
+
assert (len(self.t1_images) == len(self.t2_images))
|
| 189 |
+
|
| 190 |
+
# Display stats
|
| 191 |
+
print('Number of images in {}: {:d}'.format(split, len(self.t1_images)))
|
| 192 |
+
|
| 193 |
+
def __len__(self):
|
| 194 |
+
return len(self.t1_images)
|
| 195 |
+
|
| 196 |
+
|
| 197 |
+
def __getitem__(self, index):
|
| 198 |
+
|
| 199 |
+
image_name = self.t1_images[index].split('t1')[0]
|
| 200 |
+
# print("image name:", image_name)
|
| 201 |
+
|
| 202 |
+
t1 = np.array(Image.open(self._base_dir + self.t1_images[index]))/255.0
|
| 203 |
+
t2 = np.array(Image.open(self._base_dir + self.t2_images[index]))/255.0
|
| 204 |
+
# print("images:", t1_in.shape, t1.shape, t2_in.shape, t2.shape)
|
| 205 |
+
# print("t1 before standardization:", t1.max(), t1.min(), t1.mean())
|
| 206 |
+
# print("loaded t1 range:", t1.max(), t1.min())
|
| 207 |
+
# print("loaded t2 range:", t2.max(), t2 .min())
|
| 208 |
+
|
| 209 |
+
### normalize the MRI image by divide_max
|
| 210 |
+
t1_max, t2_max = t1.max(), t2.max()
|
| 211 |
+
t1 = t1/t1_max
|
| 212 |
+
t2 = t2/t2_max
|
| 213 |
+
sample_stats = {"t1_max": t1_max, "t2_max": t2_max, "image_name": image_name}
|
| 214 |
+
|
| 215 |
+
# sample_stats = {"t1_max": 1.0, "t2_max": 1.0}
|
| 216 |
+
|
| 217 |
+
### convert images to kspace and perform undersampling.
|
| 218 |
+
t1_kspace_in, t1_in, t1_kspace, t1_img = mri_fft(t1, _SNR = self._SNR)
|
| 219 |
+
t2_kspace_in, t2_in, t2_kspace, t2_img, mask = undersample_mri(
|
| 220 |
+
t2, _MRIDOWN = self._MRIDOWN, _SNR = self._SNR)
|
| 221 |
+
|
| 222 |
+
|
| 223 |
+
# print("loaded t2 range:", t2.max(), t2.min())
|
| 224 |
+
# print("t2_under_img range:", t2_under_img.max(), t2_under_img.min())
|
| 225 |
+
# print("t2_kspace real_part range:", t2_kspace.real.max(), t2_kspace.real.min())
|
| 226 |
+
# print("t2_kspace imaginary_part range:", t2_kspace.imag.max(), t2_kspace.imag.min())
|
| 227 |
+
# print("t2_kspace_in real_part range:", t2_kspace_in.real.max(), t2_kspace_in.real.min())
|
| 228 |
+
# print("t2_kspace_in imaginary_part range:", t2_kspace_in.imag.max(), t2_kspace_in.imag.min())
|
| 229 |
+
|
| 230 |
+
if self.HF_refine == "False":
|
| 231 |
+
sample = {'t1': t1_img, 't1_in': t1_in, 't1_kspace': t1_kspace, 't1_kspace_in': t1_kspace_in, \
|
| 232 |
+
't2': t2_img, 't2_in': t2_in, 't2_kspace': t2_kspace, 't2_kspace_in': t2_kspace_in, \
|
| 233 |
+
't2_mask': mask}
|
| 234 |
+
|
| 235 |
+
elif self.HF_refine == "True":
|
| 236 |
+
### 读取上一步重建的kspace data.
|
| 237 |
+
t1_krecon_path = self._base_dir + self.t1_images[index].replace(
|
| 238 |
+
't1.png', 't1_' + str(self._SNR) + 'dB_recon_kspace_' + self.input_round + '_DudoLoss.npy')
|
| 239 |
+
t2_krecon_path = self._base_dir + self.t1_images[index].replace('t1.png', 't2_' + self._MRIDOWN + \
|
| 240 |
+
'_' + str(self._SNR) + 'dB_recon_kspace_' + self.input_round + '_DudoLoss.npy')
|
| 241 |
+
|
| 242 |
+
t1_krecon = np.load(t1_krecon_path)
|
| 243 |
+
t2_krecon = np.load(t2_krecon_path)
|
| 244 |
+
# print("t1 and t2 recon kspace:", t1_krecon.shape, t2_krecon.shape)
|
| 245 |
+
#
|
| 246 |
+
sample = {'t1': t1_img, 't1_in': t1_in, 't1_kspace': t1_kspace, 't1_kspace_in': t1_kspace_in, \
|
| 247 |
+
't2': t2_img, 't2_in': t2_in, 't2_kspace': t2_kspace, 't2_kspace_in': t2_kspace_in, \
|
| 248 |
+
't2_mask': mask, 't1_krecon': t1_krecon, 't2_krecon': t2_krecon}
|
| 249 |
+
|
| 250 |
+
|
| 251 |
+
if self.transform is not None:
|
| 252 |
+
sample = self.transform(sample)
|
| 253 |
+
|
| 254 |
+
return sample, sample_stats
|
| 255 |
+
|
| 256 |
+
|
| 257 |
+
class ToTensor(object):
|
| 258 |
+
"""Convert ndarrays in sample to Tensors."""
|
| 259 |
+
|
| 260 |
+
def __call__(self, sample):
|
| 261 |
+
# swap color axis because
|
| 262 |
+
# numpy image: H x W x C
|
| 263 |
+
# torch image: C X H X W
|
| 264 |
+
img = sample['image'][:, :, None].transpose((2, 0, 1))
|
| 265 |
+
target = sample['target'][:, :, None].transpose((2, 0, 1))
|
| 266 |
+
# print("img_in before_numpy range:", img_in.max(), img_in.min())
|
| 267 |
+
img = torch.from_numpy(img).float()
|
| 268 |
+
target = torch.from_numpy(target).float()
|
| 269 |
+
# print("img_in range:", img_in.max(), img_in.min())
|
| 270 |
+
|
| 271 |
+
return {'ct': img, 'mri': target}
|
| 272 |
+
|
| 273 |
+
|
| 274 |
+
# class ToTensor(object):
|
| 275 |
+
# """Convert ndarrays in sample to Tensors."""
|
| 276 |
+
|
| 277 |
+
# def __call__(self, sample):
|
| 278 |
+
# # swap color axis because
|
| 279 |
+
# # numpy image: H x W x C
|
| 280 |
+
# # torch image: C X H X W
|
| 281 |
+
# img_in = sample['image_in'][:, :, None].transpose((2, 0, 1))
|
| 282 |
+
# img = sample['image'][:, :, None].transpose((2, 0, 1))
|
| 283 |
+
# target_in = sample['target_in'][:, :, None].transpose((2, 0, 1))
|
| 284 |
+
# target = sample['target'][:, :, None].transpose((2, 0, 1))
|
| 285 |
+
# # print("img_in before_numpy range:", img_in.max(), img_in.min())
|
| 286 |
+
# img_in = torch.from_numpy(img_in).float()
|
| 287 |
+
# img = torch.from_numpy(img).float()
|
| 288 |
+
# target_in = torch.from_numpy(target_in).float()
|
| 289 |
+
# target = torch.from_numpy(target).float()
|
| 290 |
+
# # print("img_in range:", img_in.max(), img_in.min())
|
| 291 |
+
|
| 292 |
+
# return {'ct_in': img_in,
|
| 293 |
+
# 'ct': img,
|
| 294 |
+
# 'mri_in': target_in,
|
| 295 |
+
# 'mri': target}
|
MRI_recon/code/Frequency-Diffusion/FSMNet/dataloaders/BRATS_dataloader.py
ADDED
|
@@ -0,0 +1,174 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import print_function, division
|
| 2 |
+
import numpy as np
|
| 3 |
+
import pandas as pd
|
| 4 |
+
from glob import glob
|
| 5 |
+
import random
|
| 6 |
+
from skimage import transform
|
| 7 |
+
from PIL import Image
|
| 8 |
+
|
| 9 |
+
import os
|
| 10 |
+
import torch
|
| 11 |
+
from torch.utils.data import Dataset
|
| 12 |
+
|
| 13 |
+
class Hybrid(Dataset):
|
| 14 |
+
|
| 15 |
+
def __init__(self, base_dir=None, split='train', MRIDOWN='4X', SNR=15, transform=None):
|
| 16 |
+
|
| 17 |
+
super().__init__()
|
| 18 |
+
self._base_dir = base_dir
|
| 19 |
+
self._MRIDOWN = MRIDOWN
|
| 20 |
+
self.im_ids = []
|
| 21 |
+
self.t2_images = []
|
| 22 |
+
self.t1_undermri_images, self.t2_undermri_images = [], []
|
| 23 |
+
self.splits_path = "/home/xiaohan/datasets/BRATS_dataset/BRATS_2020_images/cv_splits/"
|
| 24 |
+
|
| 25 |
+
if split=='train':
|
| 26 |
+
self.train_file = self.splits_path + 'train_data.csv'
|
| 27 |
+
train_images = pd.read_csv(self.train_file).iloc[:, -1].values.tolist()
|
| 28 |
+
self.t1_images = [image for image in train_images if image.split('_')[-1]=='t1.png']
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
elif split=='test':
|
| 32 |
+
self.test_file = self.splits_path + 'test_data.csv'
|
| 33 |
+
test_images = pd.read_csv(self.test_file).iloc[:, -1].values.tolist()
|
| 34 |
+
# test_images = os.listdir(self._base_dir)
|
| 35 |
+
self.t1_images = [image for image in test_images if image.split('_')[-1]=='t1.png']
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
for image_path in self.t1_images:
|
| 39 |
+
t2_path = image_path.replace('t1', 't2')
|
| 40 |
+
if SNR == 0:
|
| 41 |
+
# t1_under_path = image_path.replace('t1', 't1_' + self._MRIDOWN + '_undermri')
|
| 42 |
+
t1_under_path = image_path
|
| 43 |
+
t2_under_path = image_path.replace('t1', 't2_' + self._MRIDOWN + '_undermri')
|
| 44 |
+
else:
|
| 45 |
+
# t1_under_path = image_path.replace('t1', 't1_' + self._MRIDOWN + '_' + str(SNR) + 'dB_undermri')
|
| 46 |
+
t1_under_path = image_path.replace('t1', 't1_' + str(SNR) + 'dB')
|
| 47 |
+
if MRIDOWN == "False":
|
| 48 |
+
t2_under_path = image_path.replace('t1', 't2_' + str(SNR) + 'dB')
|
| 49 |
+
else:
|
| 50 |
+
t2_under_path = image_path.replace('t1', 't2_' + self._MRIDOWN + '_' + str(SNR) + 'dB_undermri')
|
| 51 |
+
|
| 52 |
+
# print("image paths:", image_path, t1_under_path, t2_path, t2_under_path)
|
| 53 |
+
|
| 54 |
+
self.t2_images.append(t2_path)
|
| 55 |
+
self.t1_undermri_images.append(t1_under_path)
|
| 56 |
+
self.t2_undermri_images.append(t2_under_path)
|
| 57 |
+
|
| 58 |
+
# print("t1 images:", self.t1_images)
|
| 59 |
+
# print("t2 images:", self.t2_images)
|
| 60 |
+
# print("t1_undermri_images:", self.t1_undermri_images)
|
| 61 |
+
# print("t2_undermri_images:", self.t2_undermri_images)
|
| 62 |
+
|
| 63 |
+
self.transform = transform
|
| 64 |
+
|
| 65 |
+
assert (len(self.t1_images) == len(self.t2_images))
|
| 66 |
+
assert (len(self.t1_images) == len(self.t1_undermri_images))
|
| 67 |
+
assert (len(self.t1_images) == len(self.t2_undermri_images))
|
| 68 |
+
|
| 69 |
+
# Display stats
|
| 70 |
+
print('Number of images in {}: {:d}'.format(split, len(self.t1_images)))
|
| 71 |
+
|
| 72 |
+
def __len__(self):
|
| 73 |
+
return len(self.t1_images)
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
def __getitem__(self, index):
|
| 77 |
+
### 两种settings.
|
| 78 |
+
### 1. T1 fully-sampled 不加noise, T2 down-sampled, 做MRI acceleration.
|
| 79 |
+
### 2. T1 fully-sampled 但是加noise, T2 down-sampled同时也加noise, 同时做MRI acceleration and enhancement.
|
| 80 |
+
### T1, T2两个模态的输入都是low-quality images.
|
| 81 |
+
sample = {'image_in': np.array(Image.open(self._base_dir + self.t1_undermri_images[index]))/255.0,
|
| 82 |
+
'image': np.array(Image.open(self._base_dir + self.t1_images[index]))/255.0,
|
| 83 |
+
'target_in': np.array(Image.open(self._base_dir + self.t2_undermri_images[index]))/255.0,
|
| 84 |
+
'target': np.array(Image.open(self._base_dir + self.t2_images[index]))/255.0}
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
# ### 2023/05/23, Xiaohan, 把T1模态的输入改成high-quality图像(和ground truth一致,看能否为T2提供更好的guidance)。
|
| 88 |
+
# sample = {'image_in': np.array(Image.open(self._base_dir + self.t1_images[index]))/255.0,
|
| 89 |
+
# 'image': np.array(Image.open(self._base_dir + self.t1_images[index]))/255.0,
|
| 90 |
+
# 'target_in': np.array(Image.open(self._base_dir + self.t2_undermri_images[index]))/255.0,
|
| 91 |
+
# 'target': np.array(Image.open(self._base_dir + self.t2_images[index]))/255.0}
|
| 92 |
+
|
| 93 |
+
if self.transform is not None:
|
| 94 |
+
sample = self.transform(sample)
|
| 95 |
+
|
| 96 |
+
return sample
|
| 97 |
+
|
| 98 |
+
|
| 99 |
+
class RandomPadCrop(object):
|
| 100 |
+
def __call__(self, sample):
|
| 101 |
+
new_w, new_h = 256, 256
|
| 102 |
+
crop_size = 240
|
| 103 |
+
pad_size = (256-240)//2
|
| 104 |
+
img_in = sample['image_in']
|
| 105 |
+
img = sample['image']
|
| 106 |
+
target_in = sample['target_in']
|
| 107 |
+
target = sample['target']
|
| 108 |
+
|
| 109 |
+
img_in = np.pad(img_in, pad_size, mode='reflect')
|
| 110 |
+
img = np.pad(img, pad_size, mode='reflect')
|
| 111 |
+
target_in = np.pad(target_in, pad_size, mode='reflect')
|
| 112 |
+
target = np.pad(target, pad_size, mode='reflect')
|
| 113 |
+
|
| 114 |
+
ww = random.randint(0, np.maximum(0, new_w - crop_size))
|
| 115 |
+
hh = random.randint(0, np.maximum(0, new_h - crop_size))
|
| 116 |
+
|
| 117 |
+
# print("img_in:", img_in.shape)
|
| 118 |
+
img_in = img_in[ww:ww+crop_size, hh:hh+crop_size]
|
| 119 |
+
img = img[ww:ww+crop_size, hh:hh+crop_size]
|
| 120 |
+
target_in = target_in[ww:ww+crop_size, hh:hh+crop_size]
|
| 121 |
+
target = target[ww:ww+crop_size, hh:hh+crop_size]
|
| 122 |
+
|
| 123 |
+
sample = {'image_in': img_in, 'image': img, 'target_in': target_in, 'target': target}
|
| 124 |
+
return sample
|
| 125 |
+
|
| 126 |
+
|
| 127 |
+
class RandomResizeCrop(object):
|
| 128 |
+
"""Convert ndarrays in sample to Tensors."""
|
| 129 |
+
|
| 130 |
+
def __call__(self, sample):
|
| 131 |
+
new_w, new_h = 270, 270
|
| 132 |
+
crop_size = 256
|
| 133 |
+
img_in = sample['image_in']
|
| 134 |
+
img = sample['image']
|
| 135 |
+
target_in = sample['target_in']
|
| 136 |
+
target = sample['target']
|
| 137 |
+
|
| 138 |
+
img_in = transform.resize(img_in, (new_h, new_w), order=3)
|
| 139 |
+
img = transform.resize(img, (new_h, new_w), order=3)
|
| 140 |
+
target_in = transform.resize(target_in, (new_h, new_w), order=3)
|
| 141 |
+
target = transform.resize(target, (new_h, new_w), order=3)
|
| 142 |
+
|
| 143 |
+
ww = random.randint(0, np.maximum(0, new_w - crop_size))
|
| 144 |
+
hh = random.randint(0, np.maximum(0, new_h - crop_size))
|
| 145 |
+
|
| 146 |
+
img_in = img_in[ww:ww+crop_size, hh:hh+crop_size]
|
| 147 |
+
img = img[ww:ww+crop_size, hh:hh+crop_size]
|
| 148 |
+
target_in = target_in[ww:ww+crop_size, hh:hh+crop_size]
|
| 149 |
+
target = target[ww:ww+crop_size, hh:hh+crop_size]
|
| 150 |
+
|
| 151 |
+
sample = {'image_in': img_in, 'image': img, 'target_in': target_in, 'target': target}
|
| 152 |
+
return sample
|
| 153 |
+
|
| 154 |
+
|
| 155 |
+
class ToTensor(object):
|
| 156 |
+
"""Convert ndarrays in sample to Tensors."""
|
| 157 |
+
|
| 158 |
+
def __call__(self, sample):
|
| 159 |
+
# swap color axis because
|
| 160 |
+
# numpy image: H x W x C
|
| 161 |
+
# torch image: C X H X W
|
| 162 |
+
img_in = sample['image_in'][:, :, None].transpose((2, 0, 1))
|
| 163 |
+
img = sample['image'][:, :, None].transpose((2, 0, 1))
|
| 164 |
+
target_in = sample['target_in'][:, :, None].transpose((2, 0, 1))
|
| 165 |
+
target = sample['target'][:, :, None].transpose((2, 0, 1))
|
| 166 |
+
img_in = torch.from_numpy(img_in).float()
|
| 167 |
+
img = torch.from_numpy(img).float()
|
| 168 |
+
target_in = torch.from_numpy(target_in).float()
|
| 169 |
+
target = torch.from_numpy(target).float()
|
| 170 |
+
|
| 171 |
+
return {'ct_in': img_in,
|
| 172 |
+
'ct': img,
|
| 173 |
+
'mri_in': target_in,
|
| 174 |
+
'mri': target}
|
MRI_recon/code/Frequency-Diffusion/FSMNet/dataloaders/BRATS_dataloader_new.py
ADDED
|
@@ -0,0 +1,384 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import print_function, division
|
| 2 |
+
import numpy as np
|
| 3 |
+
import pandas as pd
|
| 4 |
+
from glob import glob
|
| 5 |
+
import random
|
| 6 |
+
from skimage import transform
|
| 7 |
+
from PIL import Image
|
| 8 |
+
|
| 9 |
+
import cv2
|
| 10 |
+
import os
|
| 11 |
+
import torch
|
| 12 |
+
from torch.utils.data import Dataset
|
| 13 |
+
from torchvision import transforms
|
| 14 |
+
from .albu_transform import get_albu_transforms
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
def normalize(data, mean, stddev, eps=0.0):
|
| 18 |
+
"""
|
| 19 |
+
Normalize the given tensor.
|
| 20 |
+
|
| 21 |
+
Applies the formula (data - mean) / (stddev + eps).
|
| 22 |
+
|
| 23 |
+
Args:
|
| 24 |
+
data (torch.Tensor): Input data to be normalized.
|
| 25 |
+
mean (float): Mean value.
|
| 26 |
+
stddev (float): Standard deviation.
|
| 27 |
+
eps (float, default=0.0): Added to stddev to prevent dividing by zero.
|
| 28 |
+
|
| 29 |
+
Returns:
|
| 30 |
+
torch.Tensor: Normalized tensor
|
| 31 |
+
"""
|
| 32 |
+
return (data - mean) / (stddev + eps)
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
def normalize_instance(data, eps=0.0):
|
| 36 |
+
"""
|
| 37 |
+
Normalize the given tensor with instance norm/
|
| 38 |
+
|
| 39 |
+
Applies the formula (data - mean) / (stddev + eps), where mean and stddev
|
| 40 |
+
are computed from the data itself.
|
| 41 |
+
|
| 42 |
+
Args:
|
| 43 |
+
data (torch.Tensor): Input data to be normalized
|
| 44 |
+
eps (float): Added to stddev to prevent dividing by zero
|
| 45 |
+
|
| 46 |
+
Returns:
|
| 47 |
+
torch.Tensor: Normalized tensor
|
| 48 |
+
"""
|
| 49 |
+
mean = data.mean()
|
| 50 |
+
std = data.std()
|
| 51 |
+
|
| 52 |
+
return normalize(data, mean, std, eps), mean, std
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
class Hybrid(Dataset):
|
| 57 |
+
|
| 58 |
+
def __init__(self, base_dir=None, split='train', MRIDOWN='4X', \
|
| 59 |
+
SNR=15, transform=None, input_normalize=None, use_kspace=False):
|
| 60 |
+
|
| 61 |
+
super().__init__()
|
| 62 |
+
self._base_dir = base_dir
|
| 63 |
+
self._MRIDOWN = MRIDOWN
|
| 64 |
+
self.im_ids = []
|
| 65 |
+
self.t2_images = []
|
| 66 |
+
self.t1_undermri_images, self.t2_undermri_images = [], []
|
| 67 |
+
self.t1_krecon_images, self.t2_krecon_images = [], []
|
| 68 |
+
self.kspace_refine = "False" # ADD
|
| 69 |
+
self.use_kspace = use_kspace
|
| 70 |
+
|
| 71 |
+
self.albu_transforms = get_albu_transforms(split, (240, 240))
|
| 72 |
+
|
| 73 |
+
name = base_dir.rstrip("/ ").split('/')[-1]
|
| 74 |
+
print("base_dir=", base_dir, ", folder name =", name)
|
| 75 |
+
self.splits_path = base_dir.replace(name, 'cv_splits_100patients/')
|
| 76 |
+
|
| 77 |
+
if split=='train':
|
| 78 |
+
self.train_file = self.splits_path + 'train_data.csv'
|
| 79 |
+
train_images = pd.read_csv(self.train_file).iloc[:, -1].values.tolist()
|
| 80 |
+
self.t1_images = [image for image in train_images if image.split('_')[-1]=='t1.png']
|
| 81 |
+
|
| 82 |
+
elif split=='test':
|
| 83 |
+
self.test_file = self.splits_path + 'test_data.csv'
|
| 84 |
+
test_images = pd.read_csv(self.test_file).iloc[:, -1].values.tolist()
|
| 85 |
+
self.t1_images = [image for image in test_images if image.split('_')[-1]=='t1.png']
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
for image_path in self.t1_images:
|
| 89 |
+
t2_path = image_path.replace('t1', 't2')
|
| 90 |
+
|
| 91 |
+
if SNR == 0:
|
| 92 |
+
t1_under_path = image_path
|
| 93 |
+
|
| 94 |
+
if self.kspace_refine == "False":
|
| 95 |
+
t2_under_path = image_path.replace('t1', 't2_' + self._MRIDOWN + '_undermri')
|
| 96 |
+
elif self.kspace_refine == "True":
|
| 97 |
+
t2_under_path = image_path.replace('t1', 't2_' + self._MRIDOWN + '_krecon')
|
| 98 |
+
|
| 99 |
+
if self.kspace_refine == "False":
|
| 100 |
+
t1_krecon_path = image_path
|
| 101 |
+
t2_krecon_path = image_path
|
| 102 |
+
|
| 103 |
+
# if SNR == 0:
|
| 104 |
+
# t1_under_path = image_path.replace('t1', 't1_' + self._MRIDOWN + '_undermri')
|
| 105 |
+
t1_under_path = image_path
|
| 106 |
+
t2_under_path = image_path.replace('t1', 't2_' + self._MRIDOWN + '_undermri')
|
| 107 |
+
|
| 108 |
+
else:
|
| 109 |
+
t1_under_path = image_path.replace('t1', 't1_' + str(SNR) + 'dB')
|
| 110 |
+
t2_under_path = image_path.replace('t1', 't2_' + self._MRIDOWN + '_' + str(SNR) + 'dB')
|
| 111 |
+
|
| 112 |
+
t1_krecon_path = image_path.replace('t1', 't1_' + str(SNR) + 'dB')
|
| 113 |
+
t2_krecon_path = image_path.replace('t1', 't2_' + self._MRIDOWN + '_' + str(SNR) + 'dB')
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
self.t2_images.append(t2_path)
|
| 117 |
+
self.t1_undermri_images.append(t1_under_path)
|
| 118 |
+
self.t2_undermri_images.append(t2_under_path)
|
| 119 |
+
|
| 120 |
+
self.t1_krecon_images.append(t1_krecon_path)
|
| 121 |
+
self.t2_krecon_images.append(t2_krecon_path)
|
| 122 |
+
|
| 123 |
+
self.transform = transform
|
| 124 |
+
self.input_normalize = input_normalize
|
| 125 |
+
|
| 126 |
+
assert (len(self.t1_images) == len(self.t2_images))
|
| 127 |
+
assert (len(self.t1_images) == len(self.t1_undermri_images))
|
| 128 |
+
assert (len(self.t1_images) == len(self.t2_undermri_images))
|
| 129 |
+
|
| 130 |
+
# Display stats
|
| 131 |
+
print('Number of images in {}: {:d}'.format(split, len(self.t1_images)))
|
| 132 |
+
|
| 133 |
+
def __len__(self):
|
| 134 |
+
return len(self.t1_images)
|
| 135 |
+
|
| 136 |
+
|
| 137 |
+
def __getitem__(self, index):
|
| 138 |
+
|
| 139 |
+
t1_in = np.array(Image.open(self._base_dir + self.t1_undermri_images[index]))/255.0
|
| 140 |
+
t1 = np.array(Image.open(self._base_dir + self.t1_images[index]))/255.0
|
| 141 |
+
t1_krecon = np.array(Image.open(self._base_dir + self.t1_krecon_images[index]))/255.0
|
| 142 |
+
|
| 143 |
+
t2_in = np.array(Image.open(self._base_dir + self.t2_undermri_images[index]))/255.0
|
| 144 |
+
t2 = np.array(Image.open(self._base_dir + self.t2_images[index]))/255.0
|
| 145 |
+
t2_krecon = np.array(Image.open(self._base_dir + self.t2_krecon_images[index]))/255.0
|
| 146 |
+
|
| 147 |
+
if self.input_normalize == "mean_std":
|
| 148 |
+
t1_in, t1_mean, t1_std = normalize_instance(t1_in, eps=1e-11)
|
| 149 |
+
t1 = normalize(t1, t1_mean, t1_std, eps=1e-11)
|
| 150 |
+
t2_in, t2_mean, t2_std = normalize_instance(t2_in, eps=1e-11)
|
| 151 |
+
t2 = normalize(t2, t2_mean, t2_std, eps=1e-11)
|
| 152 |
+
|
| 153 |
+
t1_krecon = normalize(t1_krecon, t1_mean, t1_std, eps=1e-11)
|
| 154 |
+
t2_krecon = normalize(t2_krecon, t2_mean, t2_std, eps=1e-11)
|
| 155 |
+
|
| 156 |
+
### clamp input to ensure training stability.
|
| 157 |
+
t1_in = np.clip(t1_in, -6, 6)
|
| 158 |
+
t1 = np.clip(t1, -6, 6)
|
| 159 |
+
t2_in = np.clip(t2_in, -6, 6)
|
| 160 |
+
t2 = np.clip(t2, -6, 6)
|
| 161 |
+
|
| 162 |
+
t1_krecon = np.clip(t1_krecon, -6, 6)
|
| 163 |
+
t2_krecon = np.clip(t2_krecon, -6, 6)
|
| 164 |
+
|
| 165 |
+
sample_stats = {"t1_mean": t1_mean, "t1_std": t1_std, "t2_mean": t2_mean, "t2_std": t2_std}
|
| 166 |
+
|
| 167 |
+
elif self.input_normalize == "min_max":
|
| 168 |
+
t1_in = (t1_in - t1_in.min())/(t1_in.max() - t1_in.min())
|
| 169 |
+
t1 = (t1 - t1.min())/(t1.max() - t1.min())
|
| 170 |
+
t2_in = (t2_in - t2_in.min())/(t2_in.max() - t2_in.min())
|
| 171 |
+
t2 = (t2 - t2.min())/(t2.max() - t2.min())
|
| 172 |
+
sample_stats = 0
|
| 173 |
+
|
| 174 |
+
elif self.input_normalize == "divide":
|
| 175 |
+
sample_stats = 0
|
| 176 |
+
|
| 177 |
+
if True: #self.use_kspace:
|
| 178 |
+
sample = self.albu_transforms(image=t1_in, image2=t1,
|
| 179 |
+
image3=t2_in, image4=t2,
|
| 180 |
+
image5=t1_krecon, image6=t2_krecon)
|
| 181 |
+
|
| 182 |
+
sample = {'image_in': sample['image'].astype(float),
|
| 183 |
+
'image': sample['image2'].astype(float),
|
| 184 |
+
'image_krecon': sample['image5'].astype(float),
|
| 185 |
+
'target_in': sample['image3'].astype(float),
|
| 186 |
+
'target': sample['image4'].astype(float),
|
| 187 |
+
'target_krecon': sample['image6'].astype(float)}
|
| 188 |
+
|
| 189 |
+
else:
|
| 190 |
+
sample = {'image_in': t1_in.astype(float),
|
| 191 |
+
'image': t1.astype(float),
|
| 192 |
+
'image_krecon': t1_krecon.astype(float),
|
| 193 |
+
'target_in': t2_in.astype(float),
|
| 194 |
+
'target': t2.astype(float),
|
| 195 |
+
'target_krecon': t2_krecon.astype(float)}
|
| 196 |
+
|
| 197 |
+
|
| 198 |
+
if self.transform is not None:
|
| 199 |
+
sample = self.transform(sample)
|
| 200 |
+
|
| 201 |
+
return sample, sample_stats
|
| 202 |
+
|
| 203 |
+
|
| 204 |
+
|
| 205 |
+
def add_gaussian_noise(img, mean=0, std=1):
|
| 206 |
+
noise = std * torch.randn_like(img) + mean
|
| 207 |
+
noisy_img = img + noise
|
| 208 |
+
return torch.clamp(noisy_img, 0, 1)
|
| 209 |
+
|
| 210 |
+
|
| 211 |
+
|
| 212 |
+
class AddNoise(object):
|
| 213 |
+
def __call__(self, sample):
|
| 214 |
+
img_in = sample['image_in']
|
| 215 |
+
img = sample['image']
|
| 216 |
+
target_in = sample['target_in']
|
| 217 |
+
target = sample['target']
|
| 218 |
+
|
| 219 |
+
add_gauss_noise = transforms.GaussianBlur(kernel_size=5)
|
| 220 |
+
add_poiss_noise = transforms.Lambda(lambda x: x + 0.01 * torch.randn_like(x))
|
| 221 |
+
|
| 222 |
+
add_noise = transforms.RandomApply([add_gauss_noise, add_poiss_noise], p=0.5)
|
| 223 |
+
|
| 224 |
+
img_in = add_noise(img_in)
|
| 225 |
+
target_in = add_noise(target_in)
|
| 226 |
+
|
| 227 |
+
sample = {'image_in': img_in, 'image': img, 'target_in': target_in, 'target': target}
|
| 228 |
+
|
| 229 |
+
return sample
|
| 230 |
+
|
| 231 |
+
|
| 232 |
+
|
| 233 |
+
|
| 234 |
+
class RandomPadCrop(object):
|
| 235 |
+
def __call__(self, sample):
|
| 236 |
+
new_w, new_h = 256, 256
|
| 237 |
+
crop_size = 240
|
| 238 |
+
pad_size = (256-240)//2
|
| 239 |
+
img_in = sample['image_in']
|
| 240 |
+
img = sample['image']
|
| 241 |
+
target_in = sample['target_in']
|
| 242 |
+
target = sample['target']
|
| 243 |
+
|
| 244 |
+
img_krecon = sample['image_krecon']
|
| 245 |
+
target_krecon = sample['target_krecon']
|
| 246 |
+
|
| 247 |
+
img_in = np.pad(img_in, pad_size, mode='reflect')
|
| 248 |
+
img = np.pad(img, pad_size, mode='reflect')
|
| 249 |
+
target_in = np.pad(target_in, pad_size, mode='reflect')
|
| 250 |
+
target = np.pad(target, pad_size, mode='reflect')
|
| 251 |
+
|
| 252 |
+
img_krecon = np.pad(img_krecon, pad_size, mode='reflect')
|
| 253 |
+
target_krecon = np.pad(target_krecon, pad_size, mode='reflect')
|
| 254 |
+
|
| 255 |
+
ww = random.randint(0, np.maximum(0, new_w - crop_size))
|
| 256 |
+
hh = random.randint(0, np.maximum(0, new_h - crop_size))
|
| 257 |
+
|
| 258 |
+
# print("img_in:", img_in.shape)
|
| 259 |
+
img_in = img_in[ww:ww+crop_size, hh:hh+crop_size]
|
| 260 |
+
img = img[ww:ww+crop_size, hh:hh+crop_size]
|
| 261 |
+
target_in = target_in[ww:ww+crop_size, hh:hh+crop_size]
|
| 262 |
+
target = target[ww:ww+crop_size, hh:hh+crop_size]
|
| 263 |
+
|
| 264 |
+
img_krecon = img_krecon[ww:ww+crop_size, hh:hh+crop_size]
|
| 265 |
+
target_krecon = target_krecon[ww:ww+crop_size, hh:hh+crop_size]
|
| 266 |
+
|
| 267 |
+
sample = {'image_in': img_in, 'image': img, 'image_krecon': img_krecon, \
|
| 268 |
+
'target_in': target_in, 'target': target, 'target_krecon': target_krecon}
|
| 269 |
+
return sample
|
| 270 |
+
|
| 271 |
+
|
| 272 |
+
class RandomResizeCrop(object):
|
| 273 |
+
"""Convert ndarrays in sample to Tensors."""
|
| 274 |
+
|
| 275 |
+
def __call__(self, sample):
|
| 276 |
+
new_w, new_h = 270, 270
|
| 277 |
+
crop_size = 256
|
| 278 |
+
img_in = sample['image_in']
|
| 279 |
+
img = sample['image']
|
| 280 |
+
target_in = sample['target_in']
|
| 281 |
+
target = sample['target']
|
| 282 |
+
|
| 283 |
+
img_in = transform.resize(img_in, (new_h, new_w), order=3)
|
| 284 |
+
img = transform.resize(img, (new_h, new_w), order=3)
|
| 285 |
+
target_in = transform.resize(target_in, (new_h, new_w), order=3)
|
| 286 |
+
target = transform.resize(target, (new_h, new_w), order=3)
|
| 287 |
+
|
| 288 |
+
ww = random.randint(0, np.maximum(0, new_w - crop_size))
|
| 289 |
+
hh = random.randint(0, np.maximum(0, new_h - crop_size))
|
| 290 |
+
|
| 291 |
+
img_in = img_in[ww:ww+crop_size, hh:hh+crop_size]
|
| 292 |
+
img = img[ww:ww+crop_size, hh:hh+crop_size]
|
| 293 |
+
target_in = target_in[ww:ww+crop_size, hh:hh+crop_size]
|
| 294 |
+
target = target[ww:ww+crop_size, hh:hh+crop_size]
|
| 295 |
+
|
| 296 |
+
sample = {'image_in': img_in, 'image': img, 'target_in': target_in, 'target': target}
|
| 297 |
+
return sample
|
| 298 |
+
|
| 299 |
+
|
| 300 |
+
|
| 301 |
+
class RandomFlip(object):
|
| 302 |
+
def __call__(self, sample):
|
| 303 |
+
img_in = sample['image_in']
|
| 304 |
+
img = sample['image']
|
| 305 |
+
target_in = sample['target_in']
|
| 306 |
+
target = sample['target']
|
| 307 |
+
|
| 308 |
+
# horizontal flip
|
| 309 |
+
if random.random() < 0.5:
|
| 310 |
+
img_in = cv2.flip(img_in, 1)
|
| 311 |
+
img = cv2.flip(img, 1)
|
| 312 |
+
target_in = cv2.flip(target_in, 1)
|
| 313 |
+
target = cv2.flip(target, 1)
|
| 314 |
+
|
| 315 |
+
# vertical flip
|
| 316 |
+
if random.random() < 0.5:
|
| 317 |
+
img_in = cv2.flip(img_in, 0)
|
| 318 |
+
img = cv2.flip(img, 0)
|
| 319 |
+
target_in = cv2.flip(target_in, 0)
|
| 320 |
+
target = cv2.flip(target, 0)
|
| 321 |
+
|
| 322 |
+
sample = {'image_in': img_in, 'image': img, 'target_in': target_in, 'target': target}
|
| 323 |
+
return sample
|
| 324 |
+
|
| 325 |
+
|
| 326 |
+
|
| 327 |
+
|
| 328 |
+
class RandomRotate(object):
|
| 329 |
+
def __call__(self, sample, center=None, scale=1.0):
|
| 330 |
+
img_in = sample['image_in']
|
| 331 |
+
img = sample['image']
|
| 332 |
+
target_in = sample['target_in']
|
| 333 |
+
target = sample['target']
|
| 334 |
+
|
| 335 |
+
degrees = [0, 90, 180, 270]
|
| 336 |
+
angle = random.choice(degrees)
|
| 337 |
+
|
| 338 |
+
(h, w) = img.shape[:2]
|
| 339 |
+
|
| 340 |
+
if center is None:
|
| 341 |
+
center = (w // 2, h // 2)
|
| 342 |
+
|
| 343 |
+
matrix = cv2.getRotationMatrix2D(center, angle, scale)
|
| 344 |
+
|
| 345 |
+
img_in = cv2.warpAffine(img_in, matrix, (w, h))
|
| 346 |
+
img = cv2.warpAffine(img, matrix, (w, h))
|
| 347 |
+
target_in = cv2.warpAffine(target_in, matrix, (w, h))
|
| 348 |
+
target = cv2.warpAffine(target, matrix, (w, h))
|
| 349 |
+
|
| 350 |
+
sample = {'image_in': img_in, 'image': img, 'target_in': target_in, 'target': target}
|
| 351 |
+
return sample
|
| 352 |
+
|
| 353 |
+
|
| 354 |
+
class ToTensor(object):
|
| 355 |
+
"""Convert ndarrays in sample to Tensors."""
|
| 356 |
+
|
| 357 |
+
def __call__(self, sample):
|
| 358 |
+
# swap color axis because
|
| 359 |
+
# numpy image: H x W x C
|
| 360 |
+
# torch image: C X H X W
|
| 361 |
+
img_in = sample['image_in'][:, :, None].transpose((2, 0, 1))
|
| 362 |
+
img = sample['image'][:, :, None].transpose((2, 0, 1))
|
| 363 |
+
target_in = sample['target_in'][:, :, None].transpose((2, 0, 1))
|
| 364 |
+
target = sample['target'][:, :, None].transpose((2, 0, 1))
|
| 365 |
+
|
| 366 |
+
image_krecon = sample['image_krecon'][:, :, None].transpose((2, 0, 1))
|
| 367 |
+
target_krecon = sample['target_krecon'][:, :, None].transpose((2, 0, 1))
|
| 368 |
+
|
| 369 |
+
# print("img_in before_numpy range:", img_in.max(), img_in.min())
|
| 370 |
+
img_in = torch.from_numpy(img_in).float()
|
| 371 |
+
img = torch.from_numpy(img).float()
|
| 372 |
+
target_in = torch.from_numpy(target_in).float()
|
| 373 |
+
target = torch.from_numpy(target).float()
|
| 374 |
+
|
| 375 |
+
image_krecon = torch.from_numpy(image_krecon).float()
|
| 376 |
+
target_krecon = torch.from_numpy(target_krecon).float()
|
| 377 |
+
# print("img_in range:", img_in.max(), img_in.min())
|
| 378 |
+
|
| 379 |
+
return {'image_in': img_in,
|
| 380 |
+
'image': img,
|
| 381 |
+
'target_in': target_in,
|
| 382 |
+
'target': target,
|
| 383 |
+
'image_krecon': image_krecon,
|
| 384 |
+
'target_krecon': target_krecon}
|
MRI_recon/code/Frequency-Diffusion/FSMNet/dataloaders/BRATS_kspace_dataloader.py
ADDED
|
@@ -0,0 +1,298 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Load the low-quality and high-quality images from the BRATS dataset and transform to kspace.
|
| 3 |
+
"""
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
from __future__ import print_function, division
|
| 7 |
+
import numpy as np
|
| 8 |
+
import pandas as pd
|
| 9 |
+
from glob import glob
|
| 10 |
+
import random
|
| 11 |
+
from skimage import transform
|
| 12 |
+
from PIL import Image
|
| 13 |
+
|
| 14 |
+
import cv2
|
| 15 |
+
import os
|
| 16 |
+
import torch
|
| 17 |
+
from torch.utils.data import Dataset
|
| 18 |
+
|
| 19 |
+
from .kspace_subsample import undersample_mri, mri_fft
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
def normalize(data, mean, stddev, eps=0.0):
|
| 23 |
+
"""
|
| 24 |
+
Normalize the given tensor.
|
| 25 |
+
|
| 26 |
+
Applies the formula (data - mean) / (stddev + eps).
|
| 27 |
+
|
| 28 |
+
Args:
|
| 29 |
+
data (torch.Tensor): Input data to be normalized.
|
| 30 |
+
mean (float): Mean value.
|
| 31 |
+
stddev (float): Standard deviation.
|
| 32 |
+
eps (float, default=0.0): Added to stddev to prevent dividing by zero.
|
| 33 |
+
|
| 34 |
+
Returns:
|
| 35 |
+
torch.Tensor: Normalized tensor
|
| 36 |
+
"""
|
| 37 |
+
return (data - mean) / (stddev + eps)
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
def normalize_instance(data, eps=0.0):
|
| 41 |
+
"""
|
| 42 |
+
Normalize the given tensor with instance norm/
|
| 43 |
+
|
| 44 |
+
Applies the formula (data - mean) / (stddev + eps), where mean and stddev
|
| 45 |
+
are computed from the data itself.
|
| 46 |
+
|
| 47 |
+
Args:
|
| 48 |
+
data (torch.Tensor): Input data to be normalized
|
| 49 |
+
eps (float): Added to stddev to prevent dividing by zero
|
| 50 |
+
|
| 51 |
+
Returns:
|
| 52 |
+
torch.Tensor: Normalized tensor
|
| 53 |
+
"""
|
| 54 |
+
mean = data.mean()
|
| 55 |
+
std = data.std()
|
| 56 |
+
|
| 57 |
+
return normalize(data, mean, std, eps), mean, std
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
def roll(x, shift, dim):
|
| 62 |
+
"""
|
| 63 |
+
Similar to np.roll but applies to PyTorch Tensors.
|
| 64 |
+
|
| 65 |
+
Args:
|
| 66 |
+
x (torch.Tensor): A PyTorch tensor.
|
| 67 |
+
shift (int): Amount to roll.
|
| 68 |
+
dim (int): Which dimension to roll.
|
| 69 |
+
|
| 70 |
+
Returns:
|
| 71 |
+
torch.Tensor: Rolled version of x.
|
| 72 |
+
"""
|
| 73 |
+
if isinstance(shift, (tuple, list)):
|
| 74 |
+
assert len(shift) == len(dim)
|
| 75 |
+
for s, d in zip(shift, dim):
|
| 76 |
+
x = roll(x, s, d)
|
| 77 |
+
return x
|
| 78 |
+
shift = shift % x.size(dim)
|
| 79 |
+
if shift == 0:
|
| 80 |
+
return x
|
| 81 |
+
left = x.narrow(dim, 0, x.size(dim) - shift)
|
| 82 |
+
right = x.narrow(dim, x.size(dim) - shift, shift)
|
| 83 |
+
return torch.cat((right, left), dim=dim)
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
def fftshift(x, dim=None):
|
| 88 |
+
"""
|
| 89 |
+
Similar to np.fft.fftshift but applies to PyTorch Tensors
|
| 90 |
+
|
| 91 |
+
Args:
|
| 92 |
+
x (torch.Tensor): A PyTorch tensor.
|
| 93 |
+
dim (int): Which dimension to fftshift.
|
| 94 |
+
|
| 95 |
+
Returns:
|
| 96 |
+
torch.Tensor: fftshifted version of x.
|
| 97 |
+
"""
|
| 98 |
+
if dim is None:
|
| 99 |
+
dim = tuple(range(x.dim()))
|
| 100 |
+
shift = [dim // 2 for dim in x.shape]
|
| 101 |
+
elif isinstance(dim, int):
|
| 102 |
+
shift = x.shape[dim] // 2
|
| 103 |
+
else:
|
| 104 |
+
shift = [x.shape[i] // 2 for i in dim]
|
| 105 |
+
|
| 106 |
+
return roll(x, shift, dim)
|
| 107 |
+
|
| 108 |
+
|
| 109 |
+
def ifftshift(x, dim=None):
|
| 110 |
+
"""
|
| 111 |
+
Similar to np.fft.ifftshift but applies to PyTorch Tensors
|
| 112 |
+
|
| 113 |
+
Args:
|
| 114 |
+
x (torch.Tensor): A PyTorch tensor.
|
| 115 |
+
dim (int): Which dimension to ifftshift.
|
| 116 |
+
|
| 117 |
+
Returns:
|
| 118 |
+
torch.Tensor: ifftshifted version of x.
|
| 119 |
+
"""
|
| 120 |
+
if dim is None:
|
| 121 |
+
dim = tuple(range(x.dim()))
|
| 122 |
+
shift = [(dim + 1) // 2 for dim in x.shape]
|
| 123 |
+
elif isinstance(dim, int):
|
| 124 |
+
shift = (x.shape[dim] + 1) // 2
|
| 125 |
+
else:
|
| 126 |
+
shift = [(x.shape[i] + 1) // 2 for i in dim]
|
| 127 |
+
|
| 128 |
+
return roll(x, shift, dim)
|
| 129 |
+
|
| 130 |
+
|
| 131 |
+
|
| 132 |
+
def ifft2c(data: torch.Tensor, norm: str = "ortho") -> torch.Tensor:
|
| 133 |
+
"""
|
| 134 |
+
Apply centered 2-dimensional Inverse Fast Fourier Transform.
|
| 135 |
+
Returns:
|
| 136 |
+
The IFFT of the input.
|
| 137 |
+
"""
|
| 138 |
+
if not data.shape[-1] == 2:
|
| 139 |
+
raise ValueError("Tensor does not have separate complex dim.")
|
| 140 |
+
|
| 141 |
+
data = ifftshift(data, dim=[-3, -2])
|
| 142 |
+
data = torch.view_as_real(
|
| 143 |
+
torch.fft.ifftn( # type: ignore
|
| 144 |
+
torch.view_as_complex(data), dim=(-2, -1), norm=norm
|
| 145 |
+
)
|
| 146 |
+
)
|
| 147 |
+
|
| 148 |
+
return data
|
| 149 |
+
|
| 150 |
+
|
| 151 |
+
|
| 152 |
+
class Hybrid(Dataset):
|
| 153 |
+
|
| 154 |
+
def __init__(self, base_dir=None, split='train', MRIDOWN='4X', SNR=15, transform=None, input_normalize=None):
|
| 155 |
+
|
| 156 |
+
super().__init__()
|
| 157 |
+
self._base_dir = base_dir
|
| 158 |
+
self._MRIDOWN = MRIDOWN
|
| 159 |
+
self.im_ids = []
|
| 160 |
+
self.t2_images = []
|
| 161 |
+
self.t1_undermri_images, self.t2_undermri_images = [], []
|
| 162 |
+
self.splits_path = "/data/xiaohan/BRATS_dataset/cv_splits_100patients/"
|
| 163 |
+
|
| 164 |
+
if split=='train':
|
| 165 |
+
self.train_file = self.splits_path + 'train_data.csv'
|
| 166 |
+
train_images = pd.read_csv(self.train_file).iloc[:, -1].values.tolist()
|
| 167 |
+
self.t1_images = [image for image in train_images if image.split('_')[-1]=='t1.png']
|
| 168 |
+
|
| 169 |
+
elif split=='test':
|
| 170 |
+
self.test_file = self.splits_path + 'test_data.csv'
|
| 171 |
+
# self.test_file = self.splits_path + 'train_data.csv'
|
| 172 |
+
test_images = pd.read_csv(self.test_file).iloc[:, -1].values.tolist()
|
| 173 |
+
# test_images = os.listdir(self._base_dir)
|
| 174 |
+
self.t1_images = [image for image in test_images if image.split('_')[-1]=='t1.png']
|
| 175 |
+
|
| 176 |
+
|
| 177 |
+
for image_path in self.t1_images:
|
| 178 |
+
t2_path = image_path.replace('t1', 't2')
|
| 179 |
+
if SNR == 0:
|
| 180 |
+
# t1_under_path = image_path.replace('t1', 't1_' + self._MRIDOWN + '_undermri')
|
| 181 |
+
t1_under_path = image_path
|
| 182 |
+
t2_under_path = image_path.replace('t1', 't2_' + self._MRIDOWN + '_undermri')
|
| 183 |
+
else:
|
| 184 |
+
# t1_under_path = image_path.replace('t1', 't1_' + self._MRIDOWN + '_' + str(SNR) + 'dB_undermri')
|
| 185 |
+
t1_under_path = image_path.replace('t1', 't1_' + str(SNR) + 'dB')
|
| 186 |
+
t2_under_path = image_path.replace('t1', 't2_' + self._MRIDOWN + '_' + str(SNR) + 'dB_undermri')
|
| 187 |
+
|
| 188 |
+
self.t2_images.append(t2_path)
|
| 189 |
+
self.t1_undermri_images.append(t1_under_path)
|
| 190 |
+
self.t2_undermri_images.append(t2_under_path)
|
| 191 |
+
|
| 192 |
+
# print("t1 images:", self.t1_images)
|
| 193 |
+
# print("t2 images:", self.t2_images)
|
| 194 |
+
# print("t1_undermri_images:", self.t1_undermri_images)
|
| 195 |
+
# print("t2_undermri_images:", self.t2_undermri_images)
|
| 196 |
+
|
| 197 |
+
self.transform = transform
|
| 198 |
+
self.input_normalize = input_normalize
|
| 199 |
+
|
| 200 |
+
assert (len(self.t1_images) == len(self.t2_images))
|
| 201 |
+
assert (len(self.t1_images) == len(self.t1_undermri_images))
|
| 202 |
+
assert (len(self.t1_images) == len(self.t2_undermri_images))
|
| 203 |
+
|
| 204 |
+
# Display stats
|
| 205 |
+
print('Number of images in {}: {:d}'.format(split, len(self.t1_images)))
|
| 206 |
+
|
| 207 |
+
def __len__(self):
|
| 208 |
+
return len(self.t1_images)
|
| 209 |
+
|
| 210 |
+
|
| 211 |
+
def __getitem__(self, index):
|
| 212 |
+
|
| 213 |
+
# t1_in = np.array(Image.open(self._base_dir + self.t1_undermri_images[index]))/255.0
|
| 214 |
+
t1 = np.array(Image.open(self._base_dir + self.t1_images[index]))/255.0
|
| 215 |
+
# t2_in = np.array(Image.open(self._base_dir + self.t2_undermri_images[index]))/255.0
|
| 216 |
+
t2 = np.array(Image.open(self._base_dir + self.t2_images[index]))/255.0
|
| 217 |
+
# print("images:", t1_in.shape, t1.shape, t2_in.shape, t2.shape)
|
| 218 |
+
# print("t1 before standardization:", t1.max(), t1.min(), t1.mean())
|
| 219 |
+
# print("t1 range:", t1.max(), t1.min())
|
| 220 |
+
# print("t2 range:", t2.max(), t2 .min())
|
| 221 |
+
|
| 222 |
+
if self.input_normalize == "mean_std":
|
| 223 |
+
### 对input image和target image都做(x-mean)/std的归一化操作
|
| 224 |
+
t1, t1_mean, t1_std = normalize_instance(t1, eps=1e-11)
|
| 225 |
+
t2, t2_mean, t2_std = normalize_instance(t2, eps=1e-11)
|
| 226 |
+
|
| 227 |
+
### clamp input to ensure training stability.
|
| 228 |
+
t1 = np.clip(t1, -6, 6)
|
| 229 |
+
t2 = np.clip(t2, -6, 6)
|
| 230 |
+
# print("t1 after standardization:", t1.max(), t1.min(), t1.mean())
|
| 231 |
+
|
| 232 |
+
sample_stats = {"t1_mean": t1_mean, "t1_std": t1_std, "t2_mean": t2_mean, "t2_std": t2_std}
|
| 233 |
+
|
| 234 |
+
elif self.input_normalize == "min_max":
|
| 235 |
+
# t1 = (t1 - t1.min())/(t1.max() - t1.min())
|
| 236 |
+
# t2 = (t2 - t2.min())/(t2.max() - t2.min())
|
| 237 |
+
t1 = t1/t1.max()
|
| 238 |
+
t2 = t2/t2.max()
|
| 239 |
+
sample_stats = 0
|
| 240 |
+
|
| 241 |
+
elif self.input_normalize == "divide":
|
| 242 |
+
sample_stats = 0
|
| 243 |
+
|
| 244 |
+
|
| 245 |
+
### convert images to kspace and perform undersampling.
|
| 246 |
+
# t1_kspace, t1_masked_kspace, t1_img, t1_under_img = undersample_mri(t1, _MRIDOWN = None)
|
| 247 |
+
t1_kspace, t1_img = mri_fft(t1)
|
| 248 |
+
t2_kspace, t2_masked_kspace, t2_img, t2_under_img, mask = undersample_mri(t2, _MRIDOWN = self._MRIDOWN)
|
| 249 |
+
|
| 250 |
+
|
| 251 |
+
sample = {'t1': t1_img, 't2': t2_img, 'under_t2': t2_under_img, "t2_mask": mask, \
|
| 252 |
+
't1_kspace': t1_kspace, 't2_kspace': t2_kspace, 't2_masked_kspace': t2_masked_kspace}
|
| 253 |
+
|
| 254 |
+
if self.transform is not None:
|
| 255 |
+
sample = self.transform(sample)
|
| 256 |
+
|
| 257 |
+
return sample, sample_stats
|
| 258 |
+
|
| 259 |
+
|
| 260 |
+
class ToTensor(object):
|
| 261 |
+
"""Convert ndarrays in sample to Tensors."""
|
| 262 |
+
|
| 263 |
+
def __call__(self, sample):
|
| 264 |
+
# swap color axis because
|
| 265 |
+
# numpy image: H x W x C
|
| 266 |
+
# torch image: C X H X W
|
| 267 |
+
img = sample['image'][:, :, None].transpose((2, 0, 1))
|
| 268 |
+
target = sample['target'][:, :, None].transpose((2, 0, 1))
|
| 269 |
+
# print("img_in before_numpy range:", img_in.max(), img_in.min())
|
| 270 |
+
img = torch.from_numpy(img).float()
|
| 271 |
+
target = torch.from_numpy(target).float()
|
| 272 |
+
# print("img_in range:", img_in.max(), img_in.min())
|
| 273 |
+
|
| 274 |
+
return {'ct': img, 'mri': target}
|
| 275 |
+
|
| 276 |
+
|
| 277 |
+
# class ToTensor(object):
|
| 278 |
+
# """Convert ndarrays in sample to Tensors."""
|
| 279 |
+
|
| 280 |
+
# def __call__(self, sample):
|
| 281 |
+
# # swap color axis because
|
| 282 |
+
# # numpy image: H x W x C
|
| 283 |
+
# # torch image: C X H X W
|
| 284 |
+
# img_in = sample['image_in'][:, :, None].transpose((2, 0, 1))
|
| 285 |
+
# img = sample['image'][:, :, None].transpose((2, 0, 1))
|
| 286 |
+
# target_in = sample['target_in'][:, :, None].transpose((2, 0, 1))
|
| 287 |
+
# target = sample['target'][:, :, None].transpose((2, 0, 1))
|
| 288 |
+
# # print("img_in before_numpy range:", img_in.max(), img_in.min())
|
| 289 |
+
# img_in = torch.from_numpy(img_in).float()
|
| 290 |
+
# img = torch.from_numpy(img).float()
|
| 291 |
+
# target_in = torch.from_numpy(target_in).float()
|
| 292 |
+
# target = torch.from_numpy(target).float()
|
| 293 |
+
# # print("img_in range:", img_in.max(), img_in.min())
|
| 294 |
+
|
| 295 |
+
# return {'ct_in': img_in,
|
| 296 |
+
# 'ct': img,
|
| 297 |
+
# 'mri_in': target_in,
|
| 298 |
+
# 'mri': target}
|
MRI_recon/code/Frequency-Diffusion/FSMNet/dataloaders/__init__.py
ADDED
|
File without changes
|
MRI_recon/code/Frequency-Diffusion/FSMNet/dataloaders/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (174 Bytes). View file
|
|
|
MRI_recon/code/Frequency-Diffusion/FSMNet/dataloaders/__pycache__/albu_transform.cpython-310.pyc
ADDED
|
Binary file (1.57 kB). View file
|
|
|
MRI_recon/code/Frequency-Diffusion/FSMNet/dataloaders/__pycache__/fastmri.cpython-310.pyc
ADDED
|
Binary file (8.08 kB). View file
|
|
|
MRI_recon/code/Frequency-Diffusion/FSMNet/dataloaders/__pycache__/kspace_subsample.cpython-310.pyc
ADDED
|
Binary file (10.6 kB). View file
|
|
|
MRI_recon/code/Frequency-Diffusion/FSMNet/dataloaders/__pycache__/m4_utils.cpython-310.pyc
ADDED
|
Binary file (6.68 kB). View file
|
|
|
MRI_recon/code/Frequency-Diffusion/FSMNet/dataloaders/__pycache__/m4raw_dataloader.cpython-310.pyc
ADDED
|
Binary file (14.4 kB). View file
|
|
|
MRI_recon/code/Frequency-Diffusion/FSMNet/dataloaders/__pycache__/m4raw_std_dataloader.cpython-310.pyc
ADDED
|
Binary file (14.5 kB). View file
|
|
|
MRI_recon/code/Frequency-Diffusion/FSMNet/dataloaders/__pycache__/math.cpython-310.pyc
ADDED
|
Binary file (6.68 kB). View file
|
|
|
MRI_recon/code/Frequency-Diffusion/FSMNet/dataloaders/__pycache__/subsample.cpython-310.pyc
ADDED
|
Binary file (7.54 kB). View file
|
|
|
MRI_recon/code/Frequency-Diffusion/FSMNet/dataloaders/__pycache__/transforms.cpython-310.pyc
ADDED
|
Binary file (12.9 kB). View file
|
|
|
MRI_recon/code/Frequency-Diffusion/FSMNet/dataloaders/albu_transform.py
ADDED
|
@@ -0,0 +1,75 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- encoding: utf-8 -*-
|
| 2 |
+
#Time :2022/02/24 18:14:15
|
| 3 |
+
#Author :Hao Chen
|
| 4 |
+
#FileName :trans_lib.py
|
| 5 |
+
#Version :2.0
|
| 6 |
+
|
| 7 |
+
import cv2
|
| 8 |
+
import torch
|
| 9 |
+
import numpy as np
|
| 10 |
+
import albumentations as A
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
def get_albu_transforms(type="train", img_size = (192, 192)):
|
| 14 |
+
if type == 'train':
|
| 15 |
+
compose = [
|
| 16 |
+
# A.VerticalFlip(p=0.5),
|
| 17 |
+
# A.HorizontalFlip(p=0.5),
|
| 18 |
+
|
| 19 |
+
A.ShiftScaleRotate(shift_limit=0.2, scale_limit=(-0.2, 0.2),
|
| 20 |
+
rotate_limit=5, p=0.5),
|
| 21 |
+
|
| 22 |
+
A.OneOf([
|
| 23 |
+
A.GridDistortion(num_steps=1, distort_limit=0.3, p=1.0),
|
| 24 |
+
A.ElasticTransform(alpha=2, sigma=5, p=1.0)
|
| 25 |
+
], p=0.5),
|
| 26 |
+
|
| 27 |
+
A.Resize(img_size[0], img_size[1])]
|
| 28 |
+
else:
|
| 29 |
+
compose = [A.Resize(img_size[0], img_size[1])]
|
| 30 |
+
|
| 31 |
+
return A.Compose(compose, p=1.0, additional_targets={'image2': 'image',
|
| 32 |
+
'image3': 'image',
|
| 33 |
+
'image4': 'image',
|
| 34 |
+
'image5': 'image',
|
| 35 |
+
'image6': 'image',
|
| 36 |
+
"mask2": "mask"})
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
# Beta function
|
| 42 |
+
def gamma_concern(img, gamma):
|
| 43 |
+
mean = torch.mean(img)
|
| 44 |
+
|
| 45 |
+
img = (img - mean) * gamma
|
| 46 |
+
img = img + mean
|
| 47 |
+
img = torch.clip(img, 0, 1)
|
| 48 |
+
|
| 49 |
+
return img
|
| 50 |
+
|
| 51 |
+
def gamma_power(img, gamma, direction=0):
|
| 52 |
+
if direction == 1:
|
| 53 |
+
img = 1 - img
|
| 54 |
+
img = torch.pow(img, gamma)
|
| 55 |
+
|
| 56 |
+
img = img / torch.max(img)
|
| 57 |
+
if direction == 1:
|
| 58 |
+
img = 1 - img
|
| 59 |
+
|
| 60 |
+
return img
|
| 61 |
+
|
| 62 |
+
def gamma_exp(img, gamma, direction=0):
|
| 63 |
+
if direction == 1:
|
| 64 |
+
img = 1 - img
|
| 65 |
+
|
| 66 |
+
img = torch.exp(img * gamma)
|
| 67 |
+
img = img / torch.max(img)
|
| 68 |
+
|
| 69 |
+
if direction == 1:
|
| 70 |
+
img = 1 - img
|
| 71 |
+
return img
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
|
MRI_recon/code/Frequency-Diffusion/FSMNet/dataloaders/example_mask/brats_4X_mask.npy
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:76341ba680a0bc9c80389e01f8511e5bd99ab361eeb48d83516904b84cccc518
|
| 3 |
+
size 460928
|
MRI_recon/code/Frequency-Diffusion/FSMNet/dataloaders/example_mask/brats_8X_mask.npy
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:0c5160add079e8f4dc2496e5ef87c110015026d9f6116329da2238a73d8bc104
|
| 3 |
+
size 230528
|
MRI_recon/code/Frequency-Diffusion/FSMNet/dataloaders/example_mask/brats_data_gen.py
ADDED
|
@@ -0,0 +1,302 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Xiaohan Xing, 2023/04/08
|
| 3 |
+
对BRATS 2020数据集进行Pre-processing, 得到各个模态的under-sampled input image和2d groung-truth.
|
| 4 |
+
"""
|
| 5 |
+
import os
|
| 6 |
+
import argparse
|
| 7 |
+
import numpy as np
|
| 8 |
+
import nibabel as nib
|
| 9 |
+
from scipy import ndimage as nd
|
| 10 |
+
from scipy import ndimage
|
| 11 |
+
from skimage import filters
|
| 12 |
+
from skimage import io
|
| 13 |
+
import torch
|
| 14 |
+
import torch.fft
|
| 15 |
+
from matplotlib import pyplot as plt
|
| 16 |
+
|
| 17 |
+
MRIDOWN=2
|
| 18 |
+
SNR = 35
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
class MaskFunc_Cartesian:
|
| 22 |
+
"""
|
| 23 |
+
MaskFunc creates a sub-sampling mask of a given shape.
|
| 24 |
+
The mask selects a subset of columns from the input k-space data. If the k-space data has N
|
| 25 |
+
columns, the mask picks out:
|
| 26 |
+
a) N_low_freqs = (N * center_fraction) columns in the center corresponding to
|
| 27 |
+
low-frequencies
|
| 28 |
+
b) The other columns are selected uniformly at random with a probability equal to:
|
| 29 |
+
prob = (N / acceleration - N_low_freqs) / (N - N_low_freqs).
|
| 30 |
+
This ensures that the expected number of columns selected is equal to (N / acceleration)
|
| 31 |
+
It is possible to use multiple center_fractions and accelerations, in which case one possible
|
| 32 |
+
(center_fraction, acceleration) is chosen uniformly at random each time the MaskFunc object is
|
| 33 |
+
called.
|
| 34 |
+
For example, if accelerations = [4, 8] and center_fractions = [0.08, 0.04], then there
|
| 35 |
+
is a 50% probability that 4-fold acceleration with 8% center fraction is selected and a 50%
|
| 36 |
+
probability that 8-fold acceleration with 4% center fraction is selected.
|
| 37 |
+
"""
|
| 38 |
+
|
| 39 |
+
def __init__(self, center_fractions, accelerations):
|
| 40 |
+
"""
|
| 41 |
+
Args:
|
| 42 |
+
center_fractions (List[float]): Fraction of low-frequency columns to be retained.
|
| 43 |
+
If multiple values are provided, then one of these numbers is chosen uniformly
|
| 44 |
+
each time.
|
| 45 |
+
accelerations (List[int]): Amount of under-sampling. This should have the same length
|
| 46 |
+
as center_fractions. If multiple values are provided, then one of these is chosen
|
| 47 |
+
uniformly each time. An acceleration of 4 retains 25% of the columns, but they may
|
| 48 |
+
not be spaced evenly.
|
| 49 |
+
"""
|
| 50 |
+
if len(center_fractions) != len(accelerations):
|
| 51 |
+
raise ValueError('Number of center fractions should match number of accelerations')
|
| 52 |
+
|
| 53 |
+
self.center_fractions = center_fractions
|
| 54 |
+
self.accelerations = accelerations
|
| 55 |
+
self.rng = np.random.RandomState()
|
| 56 |
+
|
| 57 |
+
def __call__(self, shape, seed=None):
|
| 58 |
+
"""
|
| 59 |
+
Args:
|
| 60 |
+
shape (iterable[int]): The shape of the mask to be created. The shape should have
|
| 61 |
+
at least 3 dimensions. Samples are drawn along the second last dimension.
|
| 62 |
+
seed (int, optional): Seed for the random number generator. Setting the seed
|
| 63 |
+
ensures the same mask is generated each time for the same shape.
|
| 64 |
+
Returns:
|
| 65 |
+
torch.Tensor: A mask of the specified shape.
|
| 66 |
+
"""
|
| 67 |
+
if len(shape) < 3:
|
| 68 |
+
raise ValueError('Shape should have 3 or more dimensions')
|
| 69 |
+
|
| 70 |
+
self.rng.seed(seed)
|
| 71 |
+
num_cols = shape[-2]
|
| 72 |
+
|
| 73 |
+
choice = self.rng.randint(0, len(self.accelerations))
|
| 74 |
+
center_fraction = self.center_fractions[choice]
|
| 75 |
+
acceleration = self.accelerations[choice]
|
| 76 |
+
|
| 77 |
+
# Create the mask
|
| 78 |
+
num_low_freqs = int(round(num_cols * center_fraction))
|
| 79 |
+
prob = (num_cols / acceleration - num_low_freqs) / (num_cols - num_low_freqs + 1e-10)
|
| 80 |
+
mask = self.rng.uniform(size=num_cols) < prob
|
| 81 |
+
pad = (num_cols - num_low_freqs + 1) // 2
|
| 82 |
+
mask[pad:pad + num_low_freqs] = True
|
| 83 |
+
|
| 84 |
+
# Reshape the mask
|
| 85 |
+
mask_shape = [1 for _ in shape]
|
| 86 |
+
mask_shape[-2] = num_cols
|
| 87 |
+
mask = torch.from_numpy(mask.reshape(*mask_shape).astype(np.float32))
|
| 88 |
+
mask = mask.repeat(shape[0], 1, 1)
|
| 89 |
+
|
| 90 |
+
return mask
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
## mri related
|
| 94 |
+
def mri_fourier_transform_2d(image, mask):
|
| 95 |
+
'''
|
| 96 |
+
image: input tensor [B, H, W, C]
|
| 97 |
+
mask: mask tensor [H, W]
|
| 98 |
+
'''
|
| 99 |
+
spectrum = torch.fft.fftn(image, dim=(1, 2))
|
| 100 |
+
# K-space spectrum has been shifted to shift the zero-frequency component to the center of the spectrum
|
| 101 |
+
spectrum = torch.fft.fftshift(spectrum, dim=(1, 2))
|
| 102 |
+
# Downsample k-space
|
| 103 |
+
spectrum = spectrum * mask[None, :, :, None]
|
| 104 |
+
return spectrum
|
| 105 |
+
|
| 106 |
+
## mri related
|
| 107 |
+
def mri_inver_fourier_transform_2d(spectrum):
|
| 108 |
+
'''
|
| 109 |
+
image: input tensor [B, H, W, C]
|
| 110 |
+
'''
|
| 111 |
+
spectrum = torch.fft.ifftshift(spectrum, dim=(1, 2))
|
| 112 |
+
image = torch.fft.ifftn(spectrum, dim=(1, 2))
|
| 113 |
+
|
| 114 |
+
return image
|
| 115 |
+
|
| 116 |
+
|
| 117 |
+
def simulate_undersample_mri(raw_mri):
|
| 118 |
+
mri = torch.tensor(raw_mri)[None, :, :, None].to(torch.float32)
|
| 119 |
+
ff = MaskFunc_Cartesian([0.2], [MRIDOWN]) ## 0.2 for MRIDOWN=2, 0.1 for MRIDOWN=4
|
| 120 |
+
shape = [240, 240, 1]
|
| 121 |
+
mask = ff(shape, seed=1337)
|
| 122 |
+
mask = mask[:, :, 0]
|
| 123 |
+
# print("original MRI:", mri)
|
| 124 |
+
|
| 125 |
+
# print("original MRI:", mri.shape)
|
| 126 |
+
kspace = mri_fourier_transform_2d(mri, mask)
|
| 127 |
+
kspace = add_gaussian_noise(kspace)
|
| 128 |
+
mri_recon = mri_inver_fourier_transform_2d(kspace)
|
| 129 |
+
kdata = torch.sqrt(kspace.real ** 2 + kspace.imag ** 2 + 1e-10)
|
| 130 |
+
kdata = kdata.data.numpy()[0, :, :, 0]
|
| 131 |
+
|
| 132 |
+
under_img = torch.sqrt(mri_recon.real ** 2 + mri_recon.imag ** 2)
|
| 133 |
+
under_img = under_img.data.numpy()[0, :, :, 0]
|
| 134 |
+
|
| 135 |
+
return under_img, kspace
|
| 136 |
+
|
| 137 |
+
|
| 138 |
+
def add_gaussian_noise(img, snr=15):
|
| 139 |
+
### 根据SNR确定noise的放大比例
|
| 140 |
+
num_pixels = img.shape[0]*img.shape[1]*img.shape[2]*img.shape[3]
|
| 141 |
+
psr = torch.sum(torch.abs(img.real)**2)/num_pixels
|
| 142 |
+
pnr = psr/(np.power(10, snr/10))
|
| 143 |
+
|
| 144 |
+
noise_r = torch.randn_like(img.real)*np.sqrt(pnr)
|
| 145 |
+
|
| 146 |
+
psim = torch.sum(torch.abs(img.imag)**2)/num_pixels
|
| 147 |
+
pnim = psim/(np.power(10, snr/10))
|
| 148 |
+
noise_im = torch.randn_like(img.imag)*np.sqrt(pnim)
|
| 149 |
+
|
| 150 |
+
noise = noise_r + 1j*noise_im
|
| 151 |
+
noise_img = img + noise
|
| 152 |
+
# print("original image:", img)
|
| 153 |
+
# print("gaussian noise:", noise)
|
| 154 |
+
|
| 155 |
+
return noise_img
|
| 156 |
+
|
| 157 |
+
|
| 158 |
+
def complexsing_addnoise(img, snr):
|
| 159 |
+
### add noise to the real part of the image.
|
| 160 |
+
img_numpy = img.cpu().numpy()
|
| 161 |
+
# print("kspace data:", img)
|
| 162 |
+
s_r = np.real(img_numpy)
|
| 163 |
+
num_pixels = s_r.shape[0]*s_r.shape[1]*s_r.shape[2]*s_r.shape[3]
|
| 164 |
+
psr = np.sum(np.abs(s_r)**2)/num_pixels
|
| 165 |
+
pnr = psr/(np.power(10, snr/10))
|
| 166 |
+
# print("PSR:", psr, "PNR:", pnr)
|
| 167 |
+
noise_r = np.random.randn(num_pixels)*np.sqrt(pnr)
|
| 168 |
+
|
| 169 |
+
### add noise to the iamginary part of the image.
|
| 170 |
+
s_im = np.imag(img_numpy)
|
| 171 |
+
psim = np.sum(np.abs(s_im)**2)/num_pixels
|
| 172 |
+
pnim = psim/(np.power(10, snr/10))
|
| 173 |
+
noise_im = np.random.randn(num_pixels)*np.sqrt(pnim)
|
| 174 |
+
|
| 175 |
+
noise = torch.Tensor(noise_r) + 1j*torch.Tensor(noise_im)
|
| 176 |
+
sn = img + noise
|
| 177 |
+
# print("noisy data:", sn)
|
| 178 |
+
# sn = torch.Tensor(sn)
|
| 179 |
+
|
| 180 |
+
return sn
|
| 181 |
+
|
| 182 |
+
|
| 183 |
+
|
| 184 |
+
def _parse(rootdir):
|
| 185 |
+
filetree = {}
|
| 186 |
+
|
| 187 |
+
for sample_file in os.listdir(rootdir):
|
| 188 |
+
sample_dir = rootdir + sample_file
|
| 189 |
+
subject = sample_file
|
| 190 |
+
|
| 191 |
+
for filename in os.listdir(sample_dir):
|
| 192 |
+
modality = filename.split('.').pop(0).split('_')[-1]
|
| 193 |
+
|
| 194 |
+
if subject not in filetree:
|
| 195 |
+
filetree[subject] = {}
|
| 196 |
+
filetree[subject][modality] = filename
|
| 197 |
+
|
| 198 |
+
return filetree
|
| 199 |
+
|
| 200 |
+
|
| 201 |
+
|
| 202 |
+
def clean(rootdir, savedir, source_modality, target_modality):
|
| 203 |
+
filetree = _parse(rootdir)
|
| 204 |
+
print("filetree:", filetree)
|
| 205 |
+
|
| 206 |
+
if not os.path.exists(savedir+'/img_norm'):
|
| 207 |
+
os.makedirs(savedir+'/img_norm')
|
| 208 |
+
|
| 209 |
+
for subject, modalities in filetree.items():
|
| 210 |
+
print(f'{subject}:')
|
| 211 |
+
|
| 212 |
+
if source_modality not in modalities or target_modality not in modalities:
|
| 213 |
+
print('-> incomplete')
|
| 214 |
+
continue
|
| 215 |
+
|
| 216 |
+
source_path = os.path.join(rootdir, subject, modalities[source_modality])
|
| 217 |
+
target_path = os.path.join(rootdir, subject, modalities[target_modality])
|
| 218 |
+
|
| 219 |
+
source_image = nib.load(source_path)
|
| 220 |
+
target_image = nib.load(target_path)
|
| 221 |
+
|
| 222 |
+
source_volume = source_image.get_fdata()
|
| 223 |
+
target_volume = target_image.get_fdata()
|
| 224 |
+
source_binary_volume = np.zeros_like(source_volume)
|
| 225 |
+
target_binary_volume = np.zeros_like(target_volume)
|
| 226 |
+
|
| 227 |
+
print("source volume:", source_volume.shape)
|
| 228 |
+
print("target volume:", target_volume.shape)
|
| 229 |
+
|
| 230 |
+
for i in range(source_binary_volume.shape[-1]):
|
| 231 |
+
source_slice = source_volume[:, :, i]
|
| 232 |
+
target_slice = target_volume[:, :, i]
|
| 233 |
+
|
| 234 |
+
if source_slice.min() == source_slice.max():
|
| 235 |
+
print("invalide source slice")
|
| 236 |
+
source_binary_volume[:, :, i] = np.zeros_like(source_slice)
|
| 237 |
+
else:
|
| 238 |
+
source_binary_volume[:, :, i] = ndimage.morphology.binary_fill_holes(
|
| 239 |
+
source_slice > filters.threshold_li(source_slice))
|
| 240 |
+
|
| 241 |
+
if target_slice.min() == target_slice.max():
|
| 242 |
+
print("invalide target slice")
|
| 243 |
+
target_binary_volume[:, :, i] = np.zeros_like(target_slice)
|
| 244 |
+
else:
|
| 245 |
+
target_binary_volume[:, :, i] = ndimage.morphology.binary_fill_holes(
|
| 246 |
+
target_slice > filters.threshold_li(target_slice))
|
| 247 |
+
|
| 248 |
+
source_volume = np.where(source_binary_volume, source_volume, np.ones_like(
|
| 249 |
+
source_volume) * source_volume.min())
|
| 250 |
+
target_volume = np.where(target_binary_volume, target_volume, np.ones_like(
|
| 251 |
+
target_volume) * target_volume.min())
|
| 252 |
+
|
| 253 |
+
## resize
|
| 254 |
+
if source_image.header.get_zooms()[0] < 0.6:
|
| 255 |
+
scale = np.asarray([240, 240, source_volume.shape[-1]]) / np.asarray(source_volume.shape)
|
| 256 |
+
source_volume = nd.zoom(source_volume, zoom=scale, order=3, prefilter=False)
|
| 257 |
+
target_volume = nd.zoom(target_volume, zoom=scale, order=0, prefilter=False)
|
| 258 |
+
|
| 259 |
+
# save volume into images
|
| 260 |
+
source_volume = (source_volume-source_volume.min())/(source_volume.max()-source_volume.min())
|
| 261 |
+
target_volume = (target_volume-target_volume.min())/(target_volume.max()-target_volume.min())
|
| 262 |
+
|
| 263 |
+
for i in range(source_binary_volume.shape[-1]):
|
| 264 |
+
source_binary_slice = source_binary_volume[:, :, i]
|
| 265 |
+
target_binary_slice = target_binary_volume[:, :, i]
|
| 266 |
+
if source_binary_slice.max() > 0 and target_binary_slice.max() > 0:
|
| 267 |
+
dd = target_volume.shape[0] // 2
|
| 268 |
+
target_slice = target_volume[dd - 120:dd + 120, dd - 120:dd + 120, i]
|
| 269 |
+
source_slice = source_volume[dd - 120:dd + 120, dd - 120:dd + 120, i]
|
| 270 |
+
print("source slice range:", source_slice.shape)
|
| 271 |
+
print("target slice range:", target_slice.max(), target_slice.min())
|
| 272 |
+
# undersample MRI
|
| 273 |
+
source_under_img, source_kspace = simulate_undersample_mri(source_slice)
|
| 274 |
+
target_under_img, target_kspace = simulate_undersample_mri(target_slice)
|
| 275 |
+
|
| 276 |
+
# # io.imsave(savedir+'/img_norm/'+subject+'_'+str(i)+'_'+source_modality+'.png', (source_slice * 255.0).astype(np.uint8))
|
| 277 |
+
io.imsave(savedir + '/img_temp/' + subject + '_' + str(i) + '_' + source_modality + '_' + str(MRIDOWN) + 'X_' + str(SNR) + 'dB_undermri.png',
|
| 278 |
+
(source_under_img * 255.0).astype(np.uint8))
|
| 279 |
+
|
| 280 |
+
# io.imsave(savedir + '/img_temp/' + subject + '_' + str(i) + '_' + source_modality + '_' + str(MRIDOWN) + 'X_undermri.png',
|
| 281 |
+
# (source_under_img * 255.0).astype(np.uint8))
|
| 282 |
+
# # io.imsave(savedir+'/img_norm/'+subject+'_'+str(i)+'_'+target_modality+'.png', (target_slice * 255.0).astype(np.uint8))
|
| 283 |
+
# io.imsave(savedir + '/img_norm/' + subject + '_' + str(i) + '_' + target_modality + '_' + str(MRIDOWN) + 'X_undermri.png',
|
| 284 |
+
# (target_under_img * 255.0).astype(np.uint8))
|
| 285 |
+
|
| 286 |
+
# np.savez_compressed(rootdir + '/img_norm/' + subject + '_' + str(i) + '_' + target_modality + '_raw_'+str(MRIDOWN)+'X'+str(CTNVIEW)+'P',
|
| 287 |
+
# kspace=kspace, under_t1=under_img,
|
| 288 |
+
# t1=source_slice, ct=target_slice)
|
| 289 |
+
|
| 290 |
+
|
| 291 |
+
def main(args):
|
| 292 |
+
clean(args.rootdir,args.savedir, args.source, args.target)
|
| 293 |
+
|
| 294 |
+
|
| 295 |
+
if __name__ == '__main__':
|
| 296 |
+
parser = argparse.ArgumentParser()
|
| 297 |
+
parser.add_argument('--rootdir', type=str, default='/home/xiaohan/datasets/BRATS_dataset/BRATS_2020/')
|
| 298 |
+
parser.add_argument('--savedir', type=str, default='/home/xiaohan/datasets/BRATS_dataset/BRATS_2020_images/')
|
| 299 |
+
parser.add_argument('--source', default='t1')
|
| 300 |
+
parser.add_argument('--target', default='t2')
|
| 301 |
+
|
| 302 |
+
main(parser.parse_args())
|
MRI_recon/code/Frequency-Diffusion/FSMNet/dataloaders/example_mask/kspace_4_mask.npy
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f68ba364235a51534884b434ac3a1c16d0cf263b9e4c08c5b3757214a6f78216
|
| 3 |
+
size 2048128
|
MRI_recon/code/Frequency-Diffusion/FSMNet/dataloaders/example_mask/kspace_8_mask.npy
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:5f7397d527311ac6ba09ee2621d2f964e276a16b4bf0aaded163653abef882bb
|
| 3 |
+
size 2048128
|
MRI_recon/code/Frequency-Diffusion/FSMNet/dataloaders/example_mask/m4raw_4_mask.npy
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ee522a2b8e4afa7a3349c2729effacabd9e4502be601bb176200892bded99e7f
|
| 3 |
+
size 6912128
|
MRI_recon/code/Frequency-Diffusion/FSMNet/dataloaders/fastmri.py
ADDED
|
@@ -0,0 +1,339 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import csv
|
| 2 |
+
import os
|
| 3 |
+
import random
|
| 4 |
+
import xml.etree.ElementTree as etree
|
| 5 |
+
from typing import Callable, Dict, List, Optional, Sequence, Tuple, Union
|
| 6 |
+
import pathlib
|
| 7 |
+
|
| 8 |
+
import h5py
|
| 9 |
+
import numpy as np
|
| 10 |
+
import torch
|
| 11 |
+
import yaml
|
| 12 |
+
from torch.utils.data import Dataset
|
| 13 |
+
from .transforms import build_transforms
|
| 14 |
+
from matplotlib import pyplot as plt
|
| 15 |
+
|
| 16 |
+
from .albu_transform import get_albu_transforms
|
| 17 |
+
|
| 18 |
+
def fetch_dir(key, data_config_file=pathlib.Path("fastmri_dirs.yaml")):
|
| 19 |
+
"""
|
| 20 |
+
Data directory fetcher.
|
| 21 |
+
|
| 22 |
+
This is a brute-force simple way to configure data directories for a
|
| 23 |
+
project. Simply overwrite the variables for `knee_path` and `brain_path`
|
| 24 |
+
and this function will retrieve the requested subsplit of the data for use.
|
| 25 |
+
|
| 26 |
+
Args:
|
| 27 |
+
key (str): key to retrieve path from data_config_file.
|
| 28 |
+
data_config_file (pathlib.Path,
|
| 29 |
+
default=pathlib.Path("fastmri_dirs.yaml")): Default path config
|
| 30 |
+
file.
|
| 31 |
+
|
| 32 |
+
Returns:
|
| 33 |
+
pathlib.Path: The path to the specified directory.
|
| 34 |
+
"""
|
| 35 |
+
if not data_config_file.is_file():
|
| 36 |
+
default_config = dict(
|
| 37 |
+
knee_path="/home/jc3/Data/",
|
| 38 |
+
brain_path="/home/jc3/Data/",
|
| 39 |
+
)
|
| 40 |
+
with open(data_config_file, "w") as f:
|
| 41 |
+
yaml.dump(default_config, f)
|
| 42 |
+
|
| 43 |
+
raise ValueError(f"Please populate {data_config_file} with directory paths.")
|
| 44 |
+
|
| 45 |
+
with open(data_config_file, "r") as f:
|
| 46 |
+
data_dir = yaml.safe_load(f)[key]
|
| 47 |
+
|
| 48 |
+
data_dir = pathlib.Path(data_dir)
|
| 49 |
+
|
| 50 |
+
if not data_dir.exists():
|
| 51 |
+
raise ValueError(f"Path {data_dir} from {data_config_file} does not exist.")
|
| 52 |
+
|
| 53 |
+
return data_dir
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
def et_query(
|
| 57 |
+
root: etree.Element,
|
| 58 |
+
qlist: Sequence[str],
|
| 59 |
+
namespace: str = "http://www.ismrm.org/ISMRMRD",
|
| 60 |
+
) -> str:
|
| 61 |
+
"""
|
| 62 |
+
ElementTree query function.
|
| 63 |
+
This can be used to query an xml document via ElementTree. It uses qlist
|
| 64 |
+
for nested queries.
|
| 65 |
+
Args:
|
| 66 |
+
root: Root of the xml to search through.
|
| 67 |
+
qlist: A list of strings for nested searches, e.g. ["Encoding",
|
| 68 |
+
"matrixSize"]
|
| 69 |
+
namespace: Optional; xml namespace to prepend query.
|
| 70 |
+
Returns:
|
| 71 |
+
The retrieved data as a string.
|
| 72 |
+
"""
|
| 73 |
+
s = "."
|
| 74 |
+
prefix = "ismrmrd_namespace"
|
| 75 |
+
|
| 76 |
+
ns = {prefix: namespace}
|
| 77 |
+
|
| 78 |
+
for el in qlist:
|
| 79 |
+
s = s + f"//{prefix}:{el}"
|
| 80 |
+
|
| 81 |
+
value = root.find(s, ns)
|
| 82 |
+
if value is None:
|
| 83 |
+
raise RuntimeError("Element not found")
|
| 84 |
+
|
| 85 |
+
return str(value.text)
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
class SliceDataset(Dataset):
|
| 89 |
+
def __init__(
|
| 90 |
+
self,
|
| 91 |
+
root,
|
| 92 |
+
transform,
|
| 93 |
+
challenge,
|
| 94 |
+
sample_rate=1,
|
| 95 |
+
mode='train'
|
| 96 |
+
):
|
| 97 |
+
self.mode = mode
|
| 98 |
+
self.albu_transforms = get_albu_transforms(self.mode, (320, 320))
|
| 99 |
+
|
| 100 |
+
|
| 101 |
+
# challenge
|
| 102 |
+
if challenge not in ("singlecoil", "multicoil"):
|
| 103 |
+
raise ValueError('challenge should be either "singlecoil" or "multicoil"')
|
| 104 |
+
self.recons_key = (
|
| 105 |
+
"reconstruction_esc" if challenge == "singlecoil" else "reconstruction_rss"
|
| 106 |
+
)
|
| 107 |
+
# transform
|
| 108 |
+
self.transform = transform
|
| 109 |
+
|
| 110 |
+
self.examples = []
|
| 111 |
+
|
| 112 |
+
self.cur_path = root
|
| 113 |
+
if not os.path.exists(self.cur_path):
|
| 114 |
+
self.cur_path = self.cur_path + "_selected"
|
| 115 |
+
|
| 116 |
+
self.csv_file = "knee_data_split/singlecoil_" + self.mode + "_split_less.csv"
|
| 117 |
+
|
| 118 |
+
with open(self.csv_file, 'r') as f:
|
| 119 |
+
reader = csv.reader(f)
|
| 120 |
+
|
| 121 |
+
id = 0
|
| 122 |
+
|
| 123 |
+
for row in reader:
|
| 124 |
+
pd_metadata, pd_num_slices = self._retrieve_metadata(os.path.join(self.cur_path, row[0] + '.h5'))
|
| 125 |
+
|
| 126 |
+
pdfs_metadata, pdfs_num_slices = self._retrieve_metadata(os.path.join(self.cur_path, row[1] + '.h5'))
|
| 127 |
+
|
| 128 |
+
for slice_id in range(min(pd_num_slices, pdfs_num_slices)):
|
| 129 |
+
self.examples.append(
|
| 130 |
+
(os.path.join(self.cur_path, row[0] + '.h5'), os.path.join(self.cur_path, row[1] + '.h5')
|
| 131 |
+
, slice_id, pd_metadata, pdfs_metadata, id))
|
| 132 |
+
id += 1
|
| 133 |
+
|
| 134 |
+
if sample_rate < 1:
|
| 135 |
+
random.shuffle(self.examples)
|
| 136 |
+
num_examples = round(len(self.examples) * sample_rate)
|
| 137 |
+
|
| 138 |
+
self.examples = self.examples[0:num_examples]
|
| 139 |
+
|
| 140 |
+
def __len__(self):
|
| 141 |
+
return len(self.examples)
|
| 142 |
+
|
| 143 |
+
def __getitem__(self, i):
|
| 144 |
+
|
| 145 |
+
# read pd
|
| 146 |
+
pd_fname, pdfs_fname, slice, pd_metadata, pdfs_metadata, id = self.examples[i]
|
| 147 |
+
|
| 148 |
+
with h5py.File(pd_fname, "r") as hf:
|
| 149 |
+
pd_kspace = hf["kspace"][slice]
|
| 150 |
+
|
| 151 |
+
pd_mask = np.asarray(hf["mask"]) if "mask" in hf else None
|
| 152 |
+
|
| 153 |
+
pd_target = hf[self.recons_key][slice] if self.recons_key in hf else None
|
| 154 |
+
|
| 155 |
+
attrs = dict(hf.attrs)
|
| 156 |
+
|
| 157 |
+
attrs.update(pd_metadata)
|
| 158 |
+
|
| 159 |
+
if self.transform is None:
|
| 160 |
+
pd_sample = (pd_kspace, pd_mask, pd_target, attrs, pd_fname, slice)
|
| 161 |
+
else:
|
| 162 |
+
pd_sample = self.transform(pd_kspace, pd_mask, pd_target, attrs, pd_fname, slice)
|
| 163 |
+
|
| 164 |
+
with h5py.File(pdfs_fname, "r") as hf:
|
| 165 |
+
pdfs_kspace = hf["kspace"][slice]
|
| 166 |
+
pdfs_mask = np.asarray(hf["mask"]) if "mask" in hf else None
|
| 167 |
+
|
| 168 |
+
pdfs_target = hf[self.recons_key][slice] if self.recons_key in hf else None
|
| 169 |
+
|
| 170 |
+
attrs = dict(hf.attrs)
|
| 171 |
+
|
| 172 |
+
attrs.update(pdfs_metadata)
|
| 173 |
+
|
| 174 |
+
if self.transform is None:
|
| 175 |
+
pdfs_sample = (pdfs_kspace, pdfs_mask, pdfs_target, attrs, pdfs_fname, slice)
|
| 176 |
+
else:
|
| 177 |
+
pdfs_sample = self.transform(pdfs_kspace, pdfs_mask, pdfs_target, attrs, pdfs_fname, slice)
|
| 178 |
+
|
| 179 |
+
# 0: input, 1: target, 2: mean, 3: std
|
| 180 |
+
sample = self.albu_transforms(image=pdfs_sample[1].numpy(),
|
| 181 |
+
image2=pd_sample[1].numpy(),
|
| 182 |
+
image3=pdfs_sample[0].numpy(),
|
| 183 |
+
image4=pd_sample[0].numpy())
|
| 184 |
+
|
| 185 |
+
pdfs_sample = list(pdfs_sample)
|
| 186 |
+
pd_sample = list(pd_sample)
|
| 187 |
+
pdfs_sample[1] = sample['image']
|
| 188 |
+
pd_sample[1] = sample['image2']
|
| 189 |
+
pdfs_sample[0] = sample['image3']
|
| 190 |
+
pd_sample[0] = sample['image4']
|
| 191 |
+
|
| 192 |
+
# dataset pdf mean and std tensor(3.1980e-05) tensor(1.3093e-05)
|
| 193 |
+
# print("dataset pdf mean and std", pdfs_sample[2], pdfs_sample[3])
|
| 194 |
+
# print(pdfs_sample[1].shape, pdfs_sample[1].min(), pdfs_sample[1].max())
|
| 195 |
+
|
| 196 |
+
return (pd_sample, pdfs_sample, id)
|
| 197 |
+
|
| 198 |
+
def _retrieve_metadata(self, fname):
|
| 199 |
+
with h5py.File(fname, "r") as hf:
|
| 200 |
+
et_root = etree.fromstring(hf["ismrmrd_header"][()])
|
| 201 |
+
|
| 202 |
+
enc = ["encoding", "encodedSpace", "matrixSize"]
|
| 203 |
+
enc_size = (
|
| 204 |
+
int(et_query(et_root, enc + ["x"])),
|
| 205 |
+
int(et_query(et_root, enc + ["y"])),
|
| 206 |
+
int(et_query(et_root, enc + ["z"])),
|
| 207 |
+
)
|
| 208 |
+
rec = ["encoding", "reconSpace", "matrixSize"]
|
| 209 |
+
recon_size = (
|
| 210 |
+
int(et_query(et_root, rec + ["x"])),
|
| 211 |
+
int(et_query(et_root, rec + ["y"])),
|
| 212 |
+
int(et_query(et_root, rec + ["z"])),
|
| 213 |
+
)
|
| 214 |
+
|
| 215 |
+
lims = ["encoding", "encodingLimits", "kspace_encoding_step_1"]
|
| 216 |
+
enc_limits_center = int(et_query(et_root, lims + ["center"]))
|
| 217 |
+
enc_limits_max = int(et_query(et_root, lims + ["maximum"])) + 1
|
| 218 |
+
|
| 219 |
+
padding_left = enc_size[1] // 2 - enc_limits_center
|
| 220 |
+
padding_right = padding_left + enc_limits_max
|
| 221 |
+
|
| 222 |
+
num_slices = hf["kspace"].shape[0]
|
| 223 |
+
|
| 224 |
+
metadata = {
|
| 225 |
+
"padding_left": padding_left,
|
| 226 |
+
"padding_right": padding_right,
|
| 227 |
+
"encoding_size": enc_size,
|
| 228 |
+
"recon_size": recon_size,
|
| 229 |
+
}
|
| 230 |
+
|
| 231 |
+
return metadata, num_slices
|
| 232 |
+
|
| 233 |
+
|
| 234 |
+
def build_dataset(args, mode='train', sample_rate=1, use_kspace=False):
|
| 235 |
+
assert mode in ['train', 'val', 'test'], 'unknown mode'
|
| 236 |
+
transforms = build_transforms(args, mode, use_kspace)
|
| 237 |
+
|
| 238 |
+
return SliceDataset(os.path.join(args.root_path, 'singlecoil_' + mode), transforms, 'singlecoil', sample_rate=sample_rate, mode=mode)
|
| 239 |
+
|
| 240 |
+
|
| 241 |
+
if __name__ == "__main__":
|
| 242 |
+
## make logger file
|
| 243 |
+
from torch.utils.data import DataLoader
|
| 244 |
+
from option import args
|
| 245 |
+
import time
|
| 246 |
+
from frequency_diffusion.degradation.k_degradation import get_ksu_kernel, apply_ksu_kernel, apply_tofre, \
|
| 247 |
+
apply_to_spatial
|
| 248 |
+
|
| 249 |
+
batch_size = 1
|
| 250 |
+
db_train = build_dataset(args, mode='train')
|
| 251 |
+
|
| 252 |
+
trainloader = DataLoader(db_train, batch_size=batch_size, shuffle=True, num_workers=4, pin_memory=True)
|
| 253 |
+
|
| 254 |
+
for i_batch, sampled_batch in enumerate(trainloader):
|
| 255 |
+
time2 = time.time()
|
| 256 |
+
# print("time for data loading:", time2 - time1)
|
| 257 |
+
|
| 258 |
+
pd, pdfs, _ = sampled_batch
|
| 259 |
+
target = pdfs[1]
|
| 260 |
+
|
| 261 |
+
mean = pdfs[2]
|
| 262 |
+
std = pdfs[3]
|
| 263 |
+
|
| 264 |
+
pd_img = pd[1].unsqueeze(1)
|
| 265 |
+
pdfs_img = pdfs[0].unsqueeze(1)
|
| 266 |
+
target = target.unsqueeze(1)
|
| 267 |
+
|
| 268 |
+
b = pd_img.size(0)
|
| 269 |
+
|
| 270 |
+
pd_img = pd_img # [4, 1, 320, 320]
|
| 271 |
+
pdfs_img = pdfs_img # [4, 1, 320, 320]
|
| 272 |
+
target = target # [4, 1, 320, 320]
|
| 273 |
+
|
| 274 |
+
# ----------- Degradation -------------
|
| 275 |
+
num_timesteps = 1
|
| 276 |
+
image_size = 320
|
| 277 |
+
|
| 278 |
+
# Output a list of k-space kernels
|
| 279 |
+
kspace_masks = get_ksu_kernel(num_timesteps, image_size,
|
| 280 |
+
ksu_routine="LogSamplingRate",
|
| 281 |
+
accelerated_factor=args.ACCELERATIONS[0],
|
| 282 |
+
) # args.ACCELERATIONS = [4] or [8]
|
| 283 |
+
kspace_masks = torch.from_numpy(np.asarray(kspace_masks)).cuda()
|
| 284 |
+
|
| 285 |
+
|
| 286 |
+
|
| 287 |
+
t = torch.randint(0, num_timesteps, (b,)).long()
|
| 288 |
+
mask = kspace_masks[t]
|
| 289 |
+
fft, mask = apply_tofre(target.clone(), mask)
|
| 290 |
+
# fft = fft * mask + 0.0
|
| 291 |
+
pdfs_img = apply_to_spatial(fft)
|
| 292 |
+
pdfs_img_mask = apply_to_spatial(mask * fft)[0]
|
| 293 |
+
|
| 294 |
+
|
| 295 |
+
|
| 296 |
+
|
| 297 |
+
print("mask = ", mask.shape, mask.min(), mask.max())
|
| 298 |
+
print("pdfs_img_mask =", pdfs_img_mask.shape)
|
| 299 |
+
|
| 300 |
+
import matplotlib.pyplot as plt
|
| 301 |
+
|
| 302 |
+
# combine them together
|
| 303 |
+
pd_img = pd_img.squeeze(1).cpu().numpy()
|
| 304 |
+
pdfs_img = pdfs_img.squeeze(1).cpu().numpy()
|
| 305 |
+
target = target.squeeze(1).cpu().numpy()
|
| 306 |
+
|
| 307 |
+
plt.figure()
|
| 308 |
+
|
| 309 |
+
plt.subplot(161)
|
| 310 |
+
plt.imshow(pd_img[0], cmap='gray')
|
| 311 |
+
plt.title('PD')
|
| 312 |
+
plt.axis('off')
|
| 313 |
+
plt.subplot(162)
|
| 314 |
+
|
| 315 |
+
plt.imshow(pdfs_img_mask[0], cmap='gray')
|
| 316 |
+
plt.title('PDFS_mask')
|
| 317 |
+
plt.axis('off')
|
| 318 |
+
|
| 319 |
+
plt.subplot(163)
|
| 320 |
+
plt.imshow(pdfs_img[0], cmap='gray')
|
| 321 |
+
plt.title('PDFS')
|
| 322 |
+
plt.axis('off')
|
| 323 |
+
|
| 324 |
+
plt.subplot(164)
|
| 325 |
+
plt.imshow(pdfs_img_mask[0] - target[0], cmap='gray')
|
| 326 |
+
plt.title('Diff')
|
| 327 |
+
plt.axis('off')
|
| 328 |
+
|
| 329 |
+
plt.subplot(165)
|
| 330 |
+
plt.imshow(target[0], cmap='gray')
|
| 331 |
+
plt.title('Target')
|
| 332 |
+
plt.axis('off')
|
| 333 |
+
|
| 334 |
+
plt.subplot(166)
|
| 335 |
+
plt.imshow(pdfs_img[0] - target[0], cmap='gray')#mask[0][0], cmap='gray')
|
| 336 |
+
plt.title('Target')
|
| 337 |
+
plt.axis('off')
|
| 338 |
+
|
| 339 |
+
plt.show()
|
MRI_recon/code/Frequency-Diffusion/FSMNet/dataloaders/hybrid_sparse.py
ADDED
|
@@ -0,0 +1,156 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import print_function, division
|
| 2 |
+
import numpy as np
|
| 3 |
+
from glob import glob
|
| 4 |
+
import random
|
| 5 |
+
from skimage import transform
|
| 6 |
+
|
| 7 |
+
import torch
|
| 8 |
+
from torch.utils.data import Dataset
|
| 9 |
+
|
| 10 |
+
class Hybrid(Dataset):
|
| 11 |
+
|
| 12 |
+
def __init__(self, base_dir=None, split='train', transform=None):
|
| 13 |
+
|
| 14 |
+
super().__init__()
|
| 15 |
+
self._base_dir = base_dir
|
| 16 |
+
self.im_ids = []
|
| 17 |
+
self.images = []
|
| 18 |
+
self.gts = []
|
| 19 |
+
|
| 20 |
+
if split=='train':
|
| 21 |
+
self._image_dir = self._base_dir
|
| 22 |
+
imagelist = glob(self._image_dir+"/*_ct.png")
|
| 23 |
+
imagelist=sorted(imagelist)
|
| 24 |
+
for image_path in imagelist:
|
| 25 |
+
gt_path = image_path.replace('ct', 't1')
|
| 26 |
+
self.images.append(image_path)
|
| 27 |
+
self.gts.append(gt_path)
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
elif split=='test':
|
| 31 |
+
self._image_dir = self._base_dir
|
| 32 |
+
imagelist = glob(self._image_dir + "/*_ct.png")
|
| 33 |
+
imagelist=sorted(imagelist)
|
| 34 |
+
for image_path in imagelist:
|
| 35 |
+
gt_path = image_path.replace('ct', 't1')
|
| 36 |
+
self.images.append(image_path)
|
| 37 |
+
self.gts.append(gt_path)
|
| 38 |
+
|
| 39 |
+
self.transform = transform
|
| 40 |
+
|
| 41 |
+
assert (len(self.images) == len(self.gts))
|
| 42 |
+
|
| 43 |
+
# Display stats
|
| 44 |
+
print('Number of images in {}: {:d}'.format(split, len(self.images)))
|
| 45 |
+
|
| 46 |
+
def __len__(self):
|
| 47 |
+
return len(self.images)
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
def __getitem__(self, index):
|
| 51 |
+
img_in, img, target_in, target= self._make_img_gt_point_pair(index)
|
| 52 |
+
sample = {'image_in': img_in, 'image':img, 'target_in': target_in, 'target': target}
|
| 53 |
+
# print("image in:", img_in.shape)
|
| 54 |
+
|
| 55 |
+
if self.transform is not None:
|
| 56 |
+
sample = self.transform(sample)
|
| 57 |
+
|
| 58 |
+
return sample
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
def _make_img_gt_point_pair(self, index):
|
| 62 |
+
# Read Image and Target
|
| 63 |
+
|
| 64 |
+
# the default setting (i.e., rawdata.npz) is 4X64P
|
| 65 |
+
dd = np.load(self.images[index].replace('.png', '_raw_4X64P.npz'))
|
| 66 |
+
# print("images range:", dd['fbp'].max(), dd['ct'].max(), dd['under_t1'].max(), dd['t1'].max())
|
| 67 |
+
_img_in = dd['fbp']
|
| 68 |
+
_img_in[_img_in>0.6]=0.6
|
| 69 |
+
_img_in = _img_in/0.6
|
| 70 |
+
|
| 71 |
+
_img = dd['ct']
|
| 72 |
+
_img =(_img/1000*0.192+0.192)
|
| 73 |
+
_img[_img<0.0]=0.0
|
| 74 |
+
_img[_img>0.6]=0.6
|
| 75 |
+
_img = _img/0.6
|
| 76 |
+
|
| 77 |
+
_target_in = dd['under_t1']
|
| 78 |
+
_target = dd['t1']
|
| 79 |
+
|
| 80 |
+
return _img_in, _img, _target_in, _target
|
| 81 |
+
|
| 82 |
+
class RandomPadCrop(object):
|
| 83 |
+
def __call__(self, sample):
|
| 84 |
+
new_w, new_h = 400, 400
|
| 85 |
+
crop_size = 384
|
| 86 |
+
pad_size = (400-384)//2
|
| 87 |
+
img_in = sample['image_in']
|
| 88 |
+
img = sample['image']
|
| 89 |
+
target_in = sample['target_in']
|
| 90 |
+
target = sample['target']
|
| 91 |
+
|
| 92 |
+
img_in = np.pad(img_in, pad_size, mode='reflect')
|
| 93 |
+
img = np.pad(img, pad_size, mode='reflect')
|
| 94 |
+
target_in = np.pad(target_in, pad_size, mode='reflect')
|
| 95 |
+
target = np.pad(target, pad_size, mode='reflect')
|
| 96 |
+
|
| 97 |
+
ww = random.randint(0, np.maximum(0, new_w - crop_size))
|
| 98 |
+
hh = random.randint(0, np.maximum(0, new_h - crop_size))
|
| 99 |
+
|
| 100 |
+
img_in = img_in[ww:ww+crop_size, hh:hh+crop_size]
|
| 101 |
+
img = img[ww:ww+crop_size, hh:hh+crop_size]
|
| 102 |
+
target_in = target_in[ww:ww+crop_size, hh:hh+crop_size]
|
| 103 |
+
target = target[ww:ww+crop_size, hh:hh+crop_size]
|
| 104 |
+
|
| 105 |
+
sample = {'image_in': img_in, 'image': img, 'target_in': target_in, 'target': target}
|
| 106 |
+
return sample
|
| 107 |
+
|
| 108 |
+
|
| 109 |
+
class RandomResizeCrop(object):
|
| 110 |
+
"""Convert ndarrays in sample to Tensors."""
|
| 111 |
+
|
| 112 |
+
def __call__(self, sample):
|
| 113 |
+
new_w, new_h = 270, 270
|
| 114 |
+
crop_size = 256
|
| 115 |
+
img_in = sample['image_in']
|
| 116 |
+
img = sample['image']
|
| 117 |
+
target_in = sample['target_in']
|
| 118 |
+
target = sample['target']
|
| 119 |
+
|
| 120 |
+
img_in = transform.resize(img_in, (new_h, new_w), order=3)
|
| 121 |
+
img = transform.resize(img, (new_h, new_w), order=3)
|
| 122 |
+
target_in = transform.resize(target_in, (new_h, new_w), order=3)
|
| 123 |
+
target = transform.resize(target, (new_h, new_w), order=3)
|
| 124 |
+
|
| 125 |
+
ww = random.randint(0, np.maximum(0, new_w - crop_size))
|
| 126 |
+
hh = random.randint(0, np.maximum(0, new_h - crop_size))
|
| 127 |
+
|
| 128 |
+
img_in = img_in[ww:ww+crop_size, hh:hh+crop_size]
|
| 129 |
+
img = img[ww:ww+crop_size, hh:hh+crop_size]
|
| 130 |
+
target_in = target_in[ww:ww+crop_size, hh:hh+crop_size]
|
| 131 |
+
target = target[ww:ww+crop_size, hh:hh+crop_size]
|
| 132 |
+
|
| 133 |
+
sample = {'image_in': img_in, 'image': img, 'target_in': target_in, 'target': target}
|
| 134 |
+
return sample
|
| 135 |
+
|
| 136 |
+
|
| 137 |
+
class ToTensor(object):
|
| 138 |
+
"""Convert ndarrays in sample to Tensors."""
|
| 139 |
+
|
| 140 |
+
def __call__(self, sample):
|
| 141 |
+
# swap color axis because
|
| 142 |
+
# numpy image: H x W x C
|
| 143 |
+
# torch image: C X H X W
|
| 144 |
+
img_in = sample['image_in'][:, :, None].transpose((2, 0, 1))
|
| 145 |
+
img = sample['image'][:, :, None].transpose((2, 0, 1))
|
| 146 |
+
target_in = sample['target_in'][:, :, None].transpose((2, 0, 1))
|
| 147 |
+
target = sample['target'][:, :, None].transpose((2, 0, 1))
|
| 148 |
+
img_in = torch.from_numpy(img_in).float()
|
| 149 |
+
img = torch.from_numpy(img).float()
|
| 150 |
+
target_in = torch.from_numpy(target_in).float()
|
| 151 |
+
target = torch.from_numpy(target).float()
|
| 152 |
+
|
| 153 |
+
return {'ct_in': img_in,
|
| 154 |
+
'ct': img,
|
| 155 |
+
'mri_in': target_in,
|
| 156 |
+
'mri': target}
|
MRI_recon/code/Frequency-Diffusion/FSMNet/dataloaders/kspace_subsample.py
ADDED
|
@@ -0,0 +1,328 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
2023/10/16,
|
| 3 |
+
preprocess kspace data with the undersampling mask in the fastMRI project.
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
import contextlib
|
| 7 |
+
|
| 8 |
+
import numpy as np
|
| 9 |
+
import torch
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
@contextlib.contextmanager
|
| 13 |
+
def temp_seed(rng, seed):
|
| 14 |
+
state = rng.get_state()
|
| 15 |
+
rng.seed(seed)
|
| 16 |
+
try:
|
| 17 |
+
yield
|
| 18 |
+
finally:
|
| 19 |
+
rng.set_state(state)
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
def create_mask_for_mask_type(mask_type_str, center_fractions, accelerations):
|
| 23 |
+
if mask_type_str == "random":
|
| 24 |
+
return RandomMaskFunc(center_fractions, accelerations)
|
| 25 |
+
elif mask_type_str == "equispaced":
|
| 26 |
+
return EquispacedMaskFunc(center_fractions, accelerations)
|
| 27 |
+
else:
|
| 28 |
+
raise Exception(f"{mask_type_str} not supported")
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
## mri related
|
| 32 |
+
def mri_fourier_transform_2d(image, mask):
|
| 33 |
+
'''
|
| 34 |
+
image: input tensor [B, H, W, C]
|
| 35 |
+
mask: mask tensor [H, W]
|
| 36 |
+
'''
|
| 37 |
+
spectrum = torch.fft.fftn(image, dim=(1, 2), norm='ortho')
|
| 38 |
+
# K-space spectrum has been shifted to shift the zero-frequency component to the center of the spectrum
|
| 39 |
+
spectrum = torch.fft.fftshift(spectrum, dim=(1, 2))
|
| 40 |
+
# Downsample k-space
|
| 41 |
+
masked_spectrum = spectrum * mask[None, :, :, None]
|
| 42 |
+
return spectrum, masked_spectrum
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
## mri related
|
| 46 |
+
def mri_inver_fourier_transform_2d(spectrum):
|
| 47 |
+
'''
|
| 48 |
+
image: input tensor [B, H, W, C]
|
| 49 |
+
'''
|
| 50 |
+
spectrum = torch.fft.ifftshift(spectrum, dim=(1, 2))
|
| 51 |
+
image = torch.fft.ifftn(spectrum, dim=(1, 2), norm='ortho')
|
| 52 |
+
|
| 53 |
+
return image
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
def add_gaussian_noise(kspace, snr):
|
| 57 |
+
### 根据SNR确定noise的放大比例
|
| 58 |
+
num_pixels = kspace.shape[0]*kspace.shape[1]*kspace.shape[2]*kspace.shape[3]
|
| 59 |
+
psr = torch.sum(torch.abs(kspace.real)**2)/num_pixels
|
| 60 |
+
pnr = psr/(np.power(10, snr/10))
|
| 61 |
+
noise_r = torch.randn_like(kspace.real)*np.sqrt(pnr)
|
| 62 |
+
|
| 63 |
+
psim = torch.sum(torch.abs(kspace.imag)**2)/num_pixels
|
| 64 |
+
pnim = psim/(np.power(10, snr/10))
|
| 65 |
+
noise_im = torch.randn_like(kspace.imag)*np.sqrt(pnim)
|
| 66 |
+
|
| 67 |
+
noise = noise_r + 1j*noise_im
|
| 68 |
+
noisy_kspace = kspace + noise
|
| 69 |
+
|
| 70 |
+
return noisy_kspace
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
def mri_fft(raw_mri, _SNR):
|
| 74 |
+
mri = torch.tensor(raw_mri)[None, :, :, None].to(torch.float32)
|
| 75 |
+
spectrum = torch.fft.fftn(mri, dim=(1, 2), norm='ortho')
|
| 76 |
+
# K-space spectrum has been shifted to shift the zero-frequency component to the center of the spectrum
|
| 77 |
+
kspace = torch.fft.fftshift(spectrum, dim=(1, 2))
|
| 78 |
+
|
| 79 |
+
if _SNR > 0:
|
| 80 |
+
noisy_kspace = add_gaussian_noise(kspace, _SNR)
|
| 81 |
+
else:
|
| 82 |
+
noisy_kspace = kspace
|
| 83 |
+
|
| 84 |
+
noisy_mri = mri_inver_fourier_transform_2d(noisy_kspace)
|
| 85 |
+
noisy_mri = torch.sqrt(torch.real(noisy_mri)**2 + torch.imag(noisy_mri)**2)
|
| 86 |
+
|
| 87 |
+
return noisy_kspace[0].permute(2, 0, 1), noisy_mri[0].permute(2, 0, 1), \
|
| 88 |
+
kspace[0].permute(2, 0, 1), mri[0].permute(2, 0, 1)
|
| 89 |
+
|
| 90 |
+
|
| 91 |
+
from dataloaders.math import complex_abs, complex_abs_numpy, complex_abs_sq
|
| 92 |
+
|
| 93 |
+
def mri_fft_m4raw(lq_mri, hq_mri):
|
| 94 |
+
# breakpoint()
|
| 95 |
+
lq_mri = torch.tensor(lq_mri[0])[None, :, :, None].to(torch.float32)
|
| 96 |
+
lq_mri_spectrum = torch.fft.fftn(lq_mri, dim=(1, 2), norm='ortho')
|
| 97 |
+
lq_mri_spectrum = torch.fft.fftshift(lq_mri_spectrum, dim=(1, 2))
|
| 98 |
+
|
| 99 |
+
# Complex
|
| 100 |
+
lq_mri = mri_inver_fourier_transform_2d(lq_mri_spectrum[0])
|
| 101 |
+
# print("lq_mri shape:", lq_mri.shape)
|
| 102 |
+
lq_mri = torch.cat([torch.real(lq_mri), torch.imag(lq_mri)], dim=-1)
|
| 103 |
+
lq_mri = complex_abs(lq_mri)
|
| 104 |
+
lq_mri = torch.abs(lq_mri)
|
| 105 |
+
# print("lq_mri after shape:", lq_mri.shape)
|
| 106 |
+
lq_mri = lq_mri.unsqueeze(-1)
|
| 107 |
+
#
|
| 108 |
+
lq_kspace = torch.cat([torch.real(lq_mri_spectrum), torch.imag(lq_mri_spectrum)], dim=-1)
|
| 109 |
+
lq_kspace = torch.abs(complex_abs(lq_kspace[0]))
|
| 110 |
+
lq_kspace = lq_kspace.unsqueeze(-1)
|
| 111 |
+
|
| 112 |
+
hq_mri = torch.tensor(hq_mri[0])[None, :, :, None].to(torch.float32)
|
| 113 |
+
hq_mri_spectrum = torch.fft.fftn(hq_mri, dim=(1, 2), norm='ortho')
|
| 114 |
+
hq_mri_spectrum = torch.fft.fftshift(hq_mri_spectrum, dim=(1, 2))
|
| 115 |
+
|
| 116 |
+
hq_mri = mri_inver_fourier_transform_2d(hq_mri_spectrum[0])
|
| 117 |
+
hq_mri = torch.cat([torch.real(hq_mri), torch.imag(hq_mri)], dim=-1)
|
| 118 |
+
|
| 119 |
+
hq_mri = complex_abs(hq_mri) # Convert the complex number to the absolute value.
|
| 120 |
+
hq_mri = torch.abs(hq_mri)
|
| 121 |
+
hq_mri = hq_mri.unsqueeze(-1)
|
| 122 |
+
#
|
| 123 |
+
hq_kspace = torch.cat([torch.real(hq_mri_spectrum), torch.imag(hq_mri_spectrum)], dim=-1)
|
| 124 |
+
|
| 125 |
+
hq_kspace = torch.abs(complex_abs(hq_kspace[0]))
|
| 126 |
+
hq_kspace = hq_kspace.unsqueeze(-1)
|
| 127 |
+
|
| 128 |
+
# breakpoint()
|
| 129 |
+
return lq_kspace, lq_mri.permute(2, 0, 1), \
|
| 130 |
+
hq_kspace, hq_mri.permute(2, 0, 1)
|
| 131 |
+
|
| 132 |
+
|
| 133 |
+
def undersample_mri(raw_mri, _MRIDOWN, _SNR):
|
| 134 |
+
mri = torch.tensor(raw_mri)[None, :, :, None].to(torch.float32)
|
| 135 |
+
if _MRIDOWN == "4X":
|
| 136 |
+
mask_type_str, center_fraction, MRIDOWN = "random", 0.1, 4
|
| 137 |
+
elif _MRIDOWN == "8X":
|
| 138 |
+
mask_type_str, center_fraction, MRIDOWN = "equispaced", 0.04, 8
|
| 139 |
+
|
| 140 |
+
ff = create_mask_for_mask_type(mask_type_str, [center_fraction], [MRIDOWN]) ## 0.2 for MRIDOWN=2, 0.1 for MRIDOWN=4, 0.04 for MRIDOWN=8
|
| 141 |
+
|
| 142 |
+
shape = [240, 240, 1]
|
| 143 |
+
mask = ff(shape, seed=1337)
|
| 144 |
+
mask = mask[:, :, 0] # [1, 240]
|
| 145 |
+
# print("mask:", mask.shape)
|
| 146 |
+
# print("original MRI:", mri)
|
| 147 |
+
|
| 148 |
+
# print("original MRI:", mri.shape)
|
| 149 |
+
### under-sample the kspace data.
|
| 150 |
+
kspace, masked_kspace = mri_fourier_transform_2d(mri, mask)
|
| 151 |
+
### add low-field noise to the kspace data.
|
| 152 |
+
if _SNR > 0:
|
| 153 |
+
noisy_kspace = add_gaussian_noise(masked_kspace, _SNR)
|
| 154 |
+
else:
|
| 155 |
+
noisy_kspace = masked_kspace
|
| 156 |
+
|
| 157 |
+
### conver the corrupted kspace data back to noisy MRI image.
|
| 158 |
+
noisy_mri = mri_inver_fourier_transform_2d(noisy_kspace)
|
| 159 |
+
noisy_mri = torch.sqrt(torch.real(noisy_mri)**2 + torch.imag(noisy_mri)**2)
|
| 160 |
+
|
| 161 |
+
return noisy_kspace[0].permute(2, 0, 1), noisy_mri[0].permute(2, 0, 1), \
|
| 162 |
+
kspace[0].permute(2, 0, 1), mri[0].permute(2, 0, 1), mask.unsqueeze(-1)
|
| 163 |
+
|
| 164 |
+
|
| 165 |
+
|
| 166 |
+
class MaskFunc(object):
|
| 167 |
+
"""
|
| 168 |
+
An object for GRAPPA-style sampling masks.
|
| 169 |
+
|
| 170 |
+
This crates a sampling mask that densely samples the center while
|
| 171 |
+
subsampling outer k-space regions based on the undersampling factor.
|
| 172 |
+
"""
|
| 173 |
+
|
| 174 |
+
def __init__(self, center_fractions, accelerations):
|
| 175 |
+
"""
|
| 176 |
+
Args:
|
| 177 |
+
center_fractions (List[float]): Fraction of low-frequency columns to be
|
| 178 |
+
retained. If multiple values are provided, then one of these
|
| 179 |
+
numbers is chosen uniformly each time.
|
| 180 |
+
accelerations (List[int]): Amount of under-sampling. This should have
|
| 181 |
+
the same length as center_fractions. If multiple values are
|
| 182 |
+
provided, then one of these is chosen uniformly each time.
|
| 183 |
+
"""
|
| 184 |
+
if len(center_fractions) != len(accelerations):
|
| 185 |
+
raise ValueError(
|
| 186 |
+
"Number of center fractions should match number of accelerations"
|
| 187 |
+
)
|
| 188 |
+
|
| 189 |
+
self.center_fractions = center_fractions
|
| 190 |
+
self.accelerations = accelerations
|
| 191 |
+
self.rng = np.random
|
| 192 |
+
|
| 193 |
+
def choose_acceleration(self):
|
| 194 |
+
"""Choose acceleration based on class parameters."""
|
| 195 |
+
choice = self.rng.randint(0, len(self.accelerations))
|
| 196 |
+
center_fraction = self.center_fractions[choice]
|
| 197 |
+
acceleration = self.accelerations[choice]
|
| 198 |
+
|
| 199 |
+
return center_fraction, acceleration
|
| 200 |
+
|
| 201 |
+
|
| 202 |
+
class RandomMaskFunc(MaskFunc):
|
| 203 |
+
"""
|
| 204 |
+
RandomMaskFunc creates a sub-sampling mask of a given shape.
|
| 205 |
+
|
| 206 |
+
The mask selects a subset of columns from the input k-space data. If the
|
| 207 |
+
k-space data has N columns, the mask picks out:
|
| 208 |
+
1. N_low_freqs = (N * center_fraction) columns in the center
|
| 209 |
+
corresponding to low-frequencies.
|
| 210 |
+
2. The other columns are selected uniformly at random with a
|
| 211 |
+
probability equal to: prob = (N / acceleration - N_low_freqs) /
|
| 212 |
+
(N - N_low_freqs). This ensures that the expected number of columns
|
| 213 |
+
selected is equal to (N / acceleration).
|
| 214 |
+
|
| 215 |
+
It is possible to use multiple center_fractions and accelerations, in which
|
| 216 |
+
case one possible (center_fraction, acceleration) is chosen uniformly at
|
| 217 |
+
random each time the RandomMaskFunc object is called.
|
| 218 |
+
|
| 219 |
+
For example, if accelerations = [4, 8] and center_fractions = [0.08, 0.04],
|
| 220 |
+
then there is a 50% probability that 4-fold acceleration with 8% center
|
| 221 |
+
fraction is selected and a 50% probability that 8-fold acceleration with 4%
|
| 222 |
+
center fraction is selected.
|
| 223 |
+
"""
|
| 224 |
+
|
| 225 |
+
def __call__(self, shape, seed=None):
|
| 226 |
+
"""
|
| 227 |
+
Create the mask.
|
| 228 |
+
|
| 229 |
+
Args:
|
| 230 |
+
shape (iterable[int]): The shape of the mask to be created. The
|
| 231 |
+
shape should have at least 3 dimensions. Samples are drawn
|
| 232 |
+
along the second last dimension.
|
| 233 |
+
seed (int, optional): Seed for the random number generator. Setting
|
| 234 |
+
the seed ensures the same mask is generated each time for the
|
| 235 |
+
same shape. The random state is reset afterwards.
|
| 236 |
+
|
| 237 |
+
Returns:
|
| 238 |
+
torch.Tensor: A mask of the specified shape.
|
| 239 |
+
"""
|
| 240 |
+
if len(shape) < 3:
|
| 241 |
+
raise ValueError("Shape should have 3 or more dimensions")
|
| 242 |
+
|
| 243 |
+
with temp_seed(self.rng, seed):
|
| 244 |
+
num_cols = shape[-2]
|
| 245 |
+
center_fraction, acceleration = self.choose_acceleration()
|
| 246 |
+
|
| 247 |
+
# create the mask
|
| 248 |
+
num_low_freqs = int(round(num_cols * center_fraction))
|
| 249 |
+
prob = (num_cols / acceleration - num_low_freqs) / (
|
| 250 |
+
num_cols - num_low_freqs
|
| 251 |
+
)
|
| 252 |
+
mask = self.rng.uniform(size=num_cols) < prob
|
| 253 |
+
pad = (num_cols - num_low_freqs + 1) // 2
|
| 254 |
+
mask[pad : pad + num_low_freqs] = True
|
| 255 |
+
|
| 256 |
+
# reshape the mask
|
| 257 |
+
mask_shape = [1 for _ in shape]
|
| 258 |
+
mask_shape[-2] = num_cols
|
| 259 |
+
mask = torch.from_numpy(mask.reshape(*mask_shape).astype(np.float32))
|
| 260 |
+
|
| 261 |
+
return mask
|
| 262 |
+
|
| 263 |
+
|
| 264 |
+
class EquispacedMaskFunc(MaskFunc):
|
| 265 |
+
"""
|
| 266 |
+
EquispacedMaskFunc creates a sub-sampling mask of a given shape.
|
| 267 |
+
|
| 268 |
+
The mask selects a subset of columns from the input k-space data. If the
|
| 269 |
+
k-space data has N columns, the mask picks out:
|
| 270 |
+
1. N_low_freqs = (N * center_fraction) columns in the center
|
| 271 |
+
corresponding tovlow-frequencies.
|
| 272 |
+
2. The other columns are selected with equal spacing at a proportion
|
| 273 |
+
that reaches the desired acceleration rate taking into consideration
|
| 274 |
+
the number of low frequencies. This ensures that the expected number
|
| 275 |
+
of columns selected is equal to (N / acceleration)
|
| 276 |
+
|
| 277 |
+
It is possible to use multiple center_fractions and accelerations, in which
|
| 278 |
+
case one possible (center_fraction, acceleration) is chosen uniformly at
|
| 279 |
+
random each time the EquispacedMaskFunc object is called.
|
| 280 |
+
|
| 281 |
+
Note that this function may not give equispaced samples (documented in
|
| 282 |
+
https://github.com/facebookresearch/fastMRI/issues/54), which will require
|
| 283 |
+
modifications to standard GRAPPA approaches. Nonetheless, this aspect of
|
| 284 |
+
the function has been preserved to match the public multicoil data.
|
| 285 |
+
"""
|
| 286 |
+
|
| 287 |
+
def __call__(self, shape, seed):
|
| 288 |
+
"""
|
| 289 |
+
Args:
|
| 290 |
+
shape (iterable[int]): The shape of the mask to be created. The
|
| 291 |
+
shape should have at least 3 dimensions. Samples are drawn
|
| 292 |
+
along the second last dimension.
|
| 293 |
+
seed (int, optional): Seed for the random number generator. Setting
|
| 294 |
+
the seed ensures the same mask is generated each time for the
|
| 295 |
+
same shape. The random state is reset afterwards.
|
| 296 |
+
|
| 297 |
+
Returns:
|
| 298 |
+
torch.Tensor: A mask of the specified shape.
|
| 299 |
+
"""
|
| 300 |
+
if len(shape) < 3:
|
| 301 |
+
raise ValueError("Shape should have 3 or more dimensions")
|
| 302 |
+
|
| 303 |
+
with temp_seed(self.rng, seed):
|
| 304 |
+
center_fraction, acceleration = self.choose_acceleration()
|
| 305 |
+
num_cols = shape[-2]
|
| 306 |
+
num_low_freqs = int(round(num_cols * center_fraction))
|
| 307 |
+
|
| 308 |
+
# create the mask
|
| 309 |
+
mask = np.zeros(num_cols, dtype=np.float32)
|
| 310 |
+
pad = (num_cols - num_low_freqs + 1) // 2
|
| 311 |
+
mask[pad : pad + num_low_freqs] = True
|
| 312 |
+
|
| 313 |
+
# determine acceleration rate by adjusting for the number of low frequencies
|
| 314 |
+
adjusted_accel = (acceleration * (num_low_freqs - num_cols)) / (
|
| 315 |
+
num_low_freqs * acceleration - num_cols
|
| 316 |
+
)
|
| 317 |
+
offset = self.rng.randint(0, round(adjusted_accel))
|
| 318 |
+
|
| 319 |
+
accel_samples = np.arange(offset, num_cols - 1, adjusted_accel)
|
| 320 |
+
accel_samples = np.around(accel_samples).astype(np.uint)
|
| 321 |
+
mask[accel_samples] = True
|
| 322 |
+
|
| 323 |
+
# reshape the mask
|
| 324 |
+
mask_shape = [1 for _ in shape]
|
| 325 |
+
mask_shape[-2] = num_cols
|
| 326 |
+
mask = torch.from_numpy(mask.reshape(*mask_shape).astype(np.float32))
|
| 327 |
+
|
| 328 |
+
return mask
|
MRI_recon/code/Frequency-Diffusion/FSMNet/dataloaders/m4_utils.py
ADDED
|
@@ -0,0 +1,272 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Copyright (c) Facebook, Inc. and its affiliates.
|
| 3 |
+
|
| 4 |
+
This source code is licensed under the MIT license found in the
|
| 5 |
+
LICENSE file in the root directory of this source tree.
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
import torch
|
| 9 |
+
import numpy as np
|
| 10 |
+
|
| 11 |
+
def complex_mul(x, y):
|
| 12 |
+
"""
|
| 13 |
+
Complex multiplication.
|
| 14 |
+
|
| 15 |
+
This multiplies two complex tensors assuming that they are both stored as
|
| 16 |
+
real arrays with the last dimension being the complex dimension.
|
| 17 |
+
|
| 18 |
+
Args:
|
| 19 |
+
x (torch.Tensor): A PyTorch tensor with the last dimension of size 2.
|
| 20 |
+
y (torch.Tensor): A PyTorch tensor with the last dimension of size 2.
|
| 21 |
+
|
| 22 |
+
Returns:
|
| 23 |
+
torch.Tensor: A PyTorch tensor with the last dimension of size 2.
|
| 24 |
+
"""
|
| 25 |
+
assert x.shape[-1] == y.shape[-1] == 2
|
| 26 |
+
re = x[..., 0] * y[..., 0] - x[..., 1] * y[..., 1]
|
| 27 |
+
im = x[..., 0] * y[..., 1] + x[..., 1] * y[..., 0]
|
| 28 |
+
|
| 29 |
+
return torch.stack((re, im), dim=-1)
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
def complex_conj(x):
|
| 33 |
+
"""
|
| 34 |
+
Complex conjugate.
|
| 35 |
+
|
| 36 |
+
This applies the complex conjugate assuming that the input array has the
|
| 37 |
+
last dimension as the complex dimension.
|
| 38 |
+
|
| 39 |
+
Args:
|
| 40 |
+
x (torch.Tensor): A PyTorch tensor with the last dimension of size 2.
|
| 41 |
+
y (torch.Tensor): A PyTorch tensor with the last dimension of size 2.
|
| 42 |
+
|
| 43 |
+
Returns:
|
| 44 |
+
torch.Tensor: A PyTorch tensor with the last dimension of size 2.
|
| 45 |
+
"""
|
| 46 |
+
assert x.shape[-1] == 2
|
| 47 |
+
|
| 48 |
+
return torch.stack((x[..., 0], -x[..., 1]), dim=-1)
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
# def fft2c(data):
|
| 52 |
+
# """
|
| 53 |
+
# Apply centered 2 dimensional Fast Fourier Transform.
|
| 54 |
+
|
| 55 |
+
# Args:
|
| 56 |
+
# data (torch.Tensor): Complex valued input data containing at least 3
|
| 57 |
+
# dimensions: dimensions -3 & -2 are spatial dimensions and dimension
|
| 58 |
+
# -1 has size 2. All other dimensions are assumed to be batch
|
| 59 |
+
# dimensions.
|
| 60 |
+
|
| 61 |
+
# Returns:
|
| 62 |
+
# torch.Tensor: The FFT of the input.
|
| 63 |
+
# """
|
| 64 |
+
# assert data.size(-1) == 2
|
| 65 |
+
# data = ifftshift(data, dim=(-3, -2))
|
| 66 |
+
# data = torch.fft(data, 2, normalized=True)
|
| 67 |
+
# data = fftshift(data, dim=(-3, -2))
|
| 68 |
+
|
| 69 |
+
# return data
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
# def ifft2c(data):
|
| 73 |
+
# """
|
| 74 |
+
# Apply centered 2-dimensional Inverse Fast Fourier Transform.
|
| 75 |
+
|
| 76 |
+
# Args:
|
| 77 |
+
# data (torch.Tensor): Complex valued input data containing at least 3
|
| 78 |
+
# dimensions: dimensions -3 & -2 are spatial dimensions and dimension
|
| 79 |
+
# -1 has size 2. All other dimensions are assumed to be batch
|
| 80 |
+
# dimensions.
|
| 81 |
+
|
| 82 |
+
# Returns:
|
| 83 |
+
# torch.Tensor: The IFFT of the input.
|
| 84 |
+
# """
|
| 85 |
+
# assert data.size(-1) == 2
|
| 86 |
+
# data = ifftshift(data, dim=(-3, -2))
|
| 87 |
+
# # data = torch.ifft(data, 2, normalized=True)
|
| 88 |
+
# data = torch.fft.ifft(data, 2, normalized=True)
|
| 89 |
+
# data = fftshift(data, dim=(-3, -2))
|
| 90 |
+
|
| 91 |
+
# return data
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
def fft2c(data: torch.Tensor, norm: str = "ortho") -> torch.Tensor:
|
| 96 |
+
"""
|
| 97 |
+
Apply centered 2 dimensional Fast Fourier Transform.
|
| 98 |
+
|
| 99 |
+
Args:
|
| 100 |
+
data: Complex valued input data containing at least 3 dimensions:
|
| 101 |
+
dimensions -3 & -2 are spatial dimensions and dimension -1 has size
|
| 102 |
+
2. All other dimensions are assumed to be batch dimensions.
|
| 103 |
+
norm: Normalization mode. See ``torch.fft.fft``.
|
| 104 |
+
|
| 105 |
+
Returns:
|
| 106 |
+
The FFT of the input.
|
| 107 |
+
"""
|
| 108 |
+
if not data.shape[-1] == 2:
|
| 109 |
+
raise ValueError("Tensor does not have separate complex dim.")
|
| 110 |
+
|
| 111 |
+
data = ifftshift(data, dim=[-3, -2])
|
| 112 |
+
data = torch.view_as_real(
|
| 113 |
+
torch.fft.fftn( # type: ignore
|
| 114 |
+
torch.view_as_complex(data), dim=(-2, -1), norm=norm
|
| 115 |
+
)
|
| 116 |
+
)
|
| 117 |
+
data = fftshift(data, dim=[-3, -2])
|
| 118 |
+
|
| 119 |
+
return data
|
| 120 |
+
|
| 121 |
+
|
| 122 |
+
def ifft2c(data: torch.Tensor, norm: str = "ortho") -> torch.Tensor:
|
| 123 |
+
"""
|
| 124 |
+
Apply centered 2-dimensional Inverse Fast Fourier Transform.
|
| 125 |
+
|
| 126 |
+
Args:
|
| 127 |
+
data: Complex valued input data containing at least 3 dimensions:
|
| 128 |
+
dimensions -3 & -2 are spatial dimensions and dimension -1 has size
|
| 129 |
+
2. All other dimensions are assumed to be batch dimensions.
|
| 130 |
+
norm: Normalization mode. See ``torch.fft.ifft``.
|
| 131 |
+
|
| 132 |
+
Returns:
|
| 133 |
+
The IFFT of the input.
|
| 134 |
+
"""
|
| 135 |
+
if not data.shape[-1] == 2:
|
| 136 |
+
raise ValueError("Tensor does not have separate complex dim.")
|
| 137 |
+
|
| 138 |
+
data = ifftshift(data, dim=[-3, -2])
|
| 139 |
+
data = torch.view_as_real(
|
| 140 |
+
torch.fft.ifftn( # type: ignore
|
| 141 |
+
torch.view_as_complex(data), dim=(-2, -1), norm=norm
|
| 142 |
+
)
|
| 143 |
+
)
|
| 144 |
+
data = fftshift(data, dim=[-3, -2])
|
| 145 |
+
|
| 146 |
+
return data
|
| 147 |
+
|
| 148 |
+
|
| 149 |
+
|
| 150 |
+
|
| 151 |
+
|
| 152 |
+
def complex_abs(data):
|
| 153 |
+
"""
|
| 154 |
+
Compute the absolute value of a complex valued input tensor.
|
| 155 |
+
|
| 156 |
+
Args:
|
| 157 |
+
data (torch.Tensor): A complex valued tensor, where the size of the
|
| 158 |
+
final dimension should be 2.
|
| 159 |
+
|
| 160 |
+
Returns:
|
| 161 |
+
torch.Tensor: Absolute value of data.
|
| 162 |
+
"""
|
| 163 |
+
assert data.size(-1) == 2
|
| 164 |
+
|
| 165 |
+
return (data ** 2).sum(dim=-1).sqrt()
|
| 166 |
+
|
| 167 |
+
|
| 168 |
+
|
| 169 |
+
def complex_abs_numpy(data):
|
| 170 |
+
assert data.shape[-1] == 2
|
| 171 |
+
|
| 172 |
+
return np.sqrt(np.sum(data ** 2, axis=-1))
|
| 173 |
+
|
| 174 |
+
|
| 175 |
+
def complex_abs_sq(data):#multi coil
|
| 176 |
+
"""
|
| 177 |
+
Compute the squared absolute value of a complex tensor.
|
| 178 |
+
|
| 179 |
+
Args:
|
| 180 |
+
data (torch.Tensor): A complex valued tensor, where the size of the
|
| 181 |
+
final dimension should be 2.
|
| 182 |
+
|
| 183 |
+
Returns:
|
| 184 |
+
torch.Tensor: Squared absolute value of data.
|
| 185 |
+
"""
|
| 186 |
+
assert data.size(-1) == 2
|
| 187 |
+
return (data ** 2).sum(dim=-1)
|
| 188 |
+
|
| 189 |
+
|
| 190 |
+
# Helper functions
|
| 191 |
+
|
| 192 |
+
|
| 193 |
+
def roll(x, shift, dim):
|
| 194 |
+
"""
|
| 195 |
+
Similar to np.roll but applies to PyTorch Tensors.
|
| 196 |
+
|
| 197 |
+
Args:
|
| 198 |
+
x (torch.Tensor): A PyTorch tensor.
|
| 199 |
+
shift (int): Amount to roll.
|
| 200 |
+
dim (int): Which dimension to roll.
|
| 201 |
+
|
| 202 |
+
Returns:
|
| 203 |
+
torch.Tensor: Rolled version of x.
|
| 204 |
+
"""
|
| 205 |
+
if isinstance(shift, (tuple, list)):
|
| 206 |
+
assert len(shift) == len(dim)
|
| 207 |
+
for s, d in zip(shift, dim):
|
| 208 |
+
x = roll(x, s, d)
|
| 209 |
+
return x
|
| 210 |
+
shift = shift % x.size(dim)
|
| 211 |
+
if shift == 0:
|
| 212 |
+
return x
|
| 213 |
+
left = x.narrow(dim, 0, x.size(dim) - shift)
|
| 214 |
+
right = x.narrow(dim, x.size(dim) - shift, shift)
|
| 215 |
+
return torch.cat((right, left), dim=dim)
|
| 216 |
+
|
| 217 |
+
|
| 218 |
+
def fftshift(x, dim=None):
|
| 219 |
+
"""
|
| 220 |
+
Similar to np.fft.fftshift but applies to PyTorch Tensors
|
| 221 |
+
|
| 222 |
+
Args:
|
| 223 |
+
x (torch.Tensor): A PyTorch tensor.
|
| 224 |
+
dim (int): Which dimension to fftshift.
|
| 225 |
+
|
| 226 |
+
Returns:
|
| 227 |
+
torch.Tensor: fftshifted version of x.
|
| 228 |
+
"""
|
| 229 |
+
if dim is None:
|
| 230 |
+
dim = tuple(range(x.dim()))
|
| 231 |
+
shift = [dim // 2 for dim in x.shape]
|
| 232 |
+
elif isinstance(dim, int):
|
| 233 |
+
shift = x.shape[dim] // 2
|
| 234 |
+
else:
|
| 235 |
+
shift = [x.shape[i] // 2 for i in dim]
|
| 236 |
+
|
| 237 |
+
return roll(x, shift, dim)
|
| 238 |
+
|
| 239 |
+
|
| 240 |
+
def ifftshift(x, dim=None):
|
| 241 |
+
"""
|
| 242 |
+
Similar to np.fft.ifftshift but applies to PyTorch Tensors
|
| 243 |
+
|
| 244 |
+
Args:
|
| 245 |
+
x (torch.Tensor): A PyTorch tensor.
|
| 246 |
+
dim (int): Which dimension to ifftshift.
|
| 247 |
+
|
| 248 |
+
Returns:
|
| 249 |
+
torch.Tensor: ifftshifted version of x.
|
| 250 |
+
"""
|
| 251 |
+
if dim is None:
|
| 252 |
+
dim = tuple(range(x.dim()))
|
| 253 |
+
shift = [(dim + 1) // 2 for dim in x.shape]
|
| 254 |
+
elif isinstance(dim, int):
|
| 255 |
+
shift = (x.shape[dim] + 1) // 2
|
| 256 |
+
else:
|
| 257 |
+
shift = [(x.shape[i] + 1) // 2 for i in dim]
|
| 258 |
+
|
| 259 |
+
return roll(x, shift, dim)
|
| 260 |
+
|
| 261 |
+
|
| 262 |
+
def tensor_to_complex_np(data):
|
| 263 |
+
"""
|
| 264 |
+
Converts a complex torch tensor to numpy array.
|
| 265 |
+
Args:
|
| 266 |
+
data (torch.Tensor): Input data to be converted to numpy.
|
| 267 |
+
|
| 268 |
+
Returns:
|
| 269 |
+
np.array: Complex numpy version of data
|
| 270 |
+
"""
|
| 271 |
+
data = data.numpy()
|
| 272 |
+
return data[..., 0] + 1j * data[..., 1]
|
MRI_recon/code/Frequency-Diffusion/FSMNet/dataloaders/m4raw_dataloader.py
ADDED
|
@@ -0,0 +1,574 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
from __future__ import print_function, division
|
| 3 |
+
from typing import Dict, NamedTuple, Optional, Sequence, Tuple, Union
|
| 4 |
+
import sys
|
| 5 |
+
sys.path.append('.')
|
| 6 |
+
from glob import glob
|
| 7 |
+
import os, time
|
| 8 |
+
os.environ['OPENBLAS_NUM_THREADS'] = '1'
|
| 9 |
+
import numpy as np
|
| 10 |
+
import torch
|
| 11 |
+
from torch.utils.data import Dataset
|
| 12 |
+
|
| 13 |
+
import h5py
|
| 14 |
+
from matplotlib import pyplot as plt
|
| 15 |
+
from dataloaders.math import ifft2c, fft2c, complex_abs
|
| 16 |
+
from dataloaders.kspace_subsample import create_mask_for_mask_type
|
| 17 |
+
|
| 18 |
+
import argparse
|
| 19 |
+
from torch.utils.data import DataLoader
|
| 20 |
+
from skimage.metrics import mean_squared_error, peak_signal_noise_ratio, structural_similarity
|
| 21 |
+
from dataloaders.kspace_subsample import undersample_mri, mri_fft, mri_fft_m4raw
|
| 22 |
+
from tqdm import tqdm
|
| 23 |
+
|
| 24 |
+
def normalize(data, mean, stddev, eps=0.0):
|
| 25 |
+
"""
|
| 26 |
+
Normalize the given tensor.
|
| 27 |
+
|
| 28 |
+
Applies the formula (data - mean) / (stddev + eps).
|
| 29 |
+
|
| 30 |
+
Args:
|
| 31 |
+
data (torch.Tensor): Input data to be normalized.
|
| 32 |
+
mean (float): Mean value.
|
| 33 |
+
stddev (float): Standard deviation.
|
| 34 |
+
eps (float, default=0.0): Added to stddev to prevent dividing by zero.
|
| 35 |
+
|
| 36 |
+
Returns:
|
| 37 |
+
torch.Tensor: Normalized tensor
|
| 38 |
+
"""
|
| 39 |
+
return (data - mean) / (stddev + eps)
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
def normalize_instance(data, eps=0.0):
|
| 43 |
+
"""
|
| 44 |
+
Normalize the given tensor with instance norm/
|
| 45 |
+
|
| 46 |
+
Applies the formula (data - mean) / (stddev + eps), where mean and stddev
|
| 47 |
+
are computed from the data itself.
|
| 48 |
+
|
| 49 |
+
Args:
|
| 50 |
+
data (torch.Tensor): Input data to be normalized
|
| 51 |
+
eps (float): Added to stddev to prevent dividing by zero
|
| 52 |
+
|
| 53 |
+
Returns:
|
| 54 |
+
torch.Tensor: Normalized tensor
|
| 55 |
+
"""
|
| 56 |
+
mean = data.mean()
|
| 57 |
+
std = data.std()
|
| 58 |
+
|
| 59 |
+
return normalize(data, mean, std, eps), mean, std
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
def normal(x):
|
| 63 |
+
y = np.zeros_like(x)
|
| 64 |
+
for i in range(y.shape[0]):
|
| 65 |
+
x_min = x[i].min()
|
| 66 |
+
x_max = x[i].max()
|
| 67 |
+
y[i] = (x[i] - x_min)/(x_max-x_min)
|
| 68 |
+
return y
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
def undersample_mri(kspace, _MRIDOWN):
|
| 73 |
+
# print("kspace shape:", kspace.shape) ## [18, 4, 256, 256, 2]
|
| 74 |
+
|
| 75 |
+
if _MRIDOWN == "4X":
|
| 76 |
+
mask_type_str, center_fraction, MRIDOWN = "random", 0.1, 4
|
| 77 |
+
elif _MRIDOWN == "8X":
|
| 78 |
+
mask_type_str, center_fraction, MRIDOWN = "equispaced", 0.04, 8
|
| 79 |
+
|
| 80 |
+
ff = create_mask_for_mask_type(mask_type_str, [center_fraction], [MRIDOWN]) ## 0.2 for MRIDOWN=2, 0.1 for MRIDOWN=4, 0.04 for MRIDOWN=8
|
| 81 |
+
|
| 82 |
+
shape = [256, 256, 1]
|
| 83 |
+
mask = ff(shape, seed=1337) ## [1, 256, 1]
|
| 84 |
+
mask = mask[:, :, 0] # [1, 256]
|
| 85 |
+
|
| 86 |
+
masked_kspace = kspace * mask[None, None, :, :, None]
|
| 87 |
+
|
| 88 |
+
return masked_kspace, mask.unsqueeze(-1)
|
| 89 |
+
|
| 90 |
+
def to_tensor(data):
|
| 91 |
+
"""
|
| 92 |
+
Convert numpy array to PyTorch tensor.
|
| 93 |
+
|
| 94 |
+
For complex arrays, the real and imaginary parts are stacked along the last
|
| 95 |
+
dimension.
|
| 96 |
+
|
| 97 |
+
Args:
|
| 98 |
+
data (np.array): Input numpy array.
|
| 99 |
+
|
| 100 |
+
Returns:
|
| 101 |
+
torch.Tensor: PyTorch version of data.
|
| 102 |
+
"""
|
| 103 |
+
if np.iscomplexobj(data):
|
| 104 |
+
data = np.stack((data.real, data.imag), axis=-1)
|
| 105 |
+
|
| 106 |
+
return torch.from_numpy(data)
|
| 107 |
+
|
| 108 |
+
def rss(data, dim=0):
|
| 109 |
+
"""
|
| 110 |
+
Compute the Root Sum of Squares (RSS).
|
| 111 |
+
|
| 112 |
+
RSS is computed assuming that dim is the coil dimension.
|
| 113 |
+
|
| 114 |
+
Args:
|
| 115 |
+
data (torch.Tensor): The input tensor
|
| 116 |
+
dim (int): The dimensions along which to apply the RSS transform
|
| 117 |
+
|
| 118 |
+
Returns:
|
| 119 |
+
torch.Tensor: The RSS value.
|
| 120 |
+
"""
|
| 121 |
+
return torch.sqrt((data ** 2).sum(dim))
|
| 122 |
+
|
| 123 |
+
def read_h5(file_name, _MRIDOWN='None', use_kspace=False):
|
| 124 |
+
hf = h5py.File(file_name)
|
| 125 |
+
volume_kspace = hf['kspace'][()]
|
| 126 |
+
slice_kspace = volume_kspace
|
| 127 |
+
slice_kspace2 = to_tensor(slice_kspace)
|
| 128 |
+
|
| 129 |
+
slice_image = ifft2c(slice_kspace2)
|
| 130 |
+
slice_image_abs = complex_abs(slice_image)
|
| 131 |
+
slice_image_rss = rss(slice_image_abs, dim=1)
|
| 132 |
+
slice_image_rss = np.abs(slice_image_rss.numpy())
|
| 133 |
+
slice_image_rss = normal(slice_image_rss)
|
| 134 |
+
|
| 135 |
+
if _MRIDOWN == 'None' or use_kspace:
|
| 136 |
+
masked_image_rss = slice_image_rss
|
| 137 |
+
|
| 138 |
+
else:
|
| 139 |
+
# print("Undersample MRI")
|
| 140 |
+
# Undersample MRI
|
| 141 |
+
masked_kspace, mask = undersample_mri(slice_kspace2, _MRIDOWN) # Masked
|
| 142 |
+
|
| 143 |
+
masked_image = ifft2c(masked_kspace)
|
| 144 |
+
masked_image_abs = complex_abs(masked_image)
|
| 145 |
+
masked_image_rss = rss(masked_image_abs, dim=1)
|
| 146 |
+
masked_image_rss = np.abs(masked_image_rss.numpy())
|
| 147 |
+
masked_image_rss = normal(masked_image_rss)
|
| 148 |
+
|
| 149 |
+
return slice_image_rss, masked_image_rss
|
| 150 |
+
|
| 151 |
+
|
| 152 |
+
DEBUG = True
|
| 153 |
+
|
| 154 |
+
class M4Raw_TrainSet(Dataset):
|
| 155 |
+
def __init__(self, root_path, MRIDOWN, kspace_refine='False', use_kspace=False):
|
| 156 |
+
|
| 157 |
+
self.use_kspace = use_kspace
|
| 158 |
+
self.kspace_refine = kspace_refine
|
| 159 |
+
start_time = time.time()
|
| 160 |
+
|
| 161 |
+
input_list1 = sorted(glob(os.path.join(root_path + '/multicoil_train' + '/*_T102.h5')))
|
| 162 |
+
input_list2 = [path.replace('_T102.h5','_T101.h5') for path in input_list1]
|
| 163 |
+
input_list3 = [path.replace('_T102.h5','_T103.h5') for path in input_list1]
|
| 164 |
+
if DEBUG:
|
| 165 |
+
input_list1 = input_list1[:2]
|
| 166 |
+
input_list2 = input_list2[:2]
|
| 167 |
+
input_list3 = input_list3[:2]
|
| 168 |
+
|
| 169 |
+
T1_input_list = [input_list1, input_list2, input_list3]
|
| 170 |
+
|
| 171 |
+
input_list1 = sorted(glob(os.path.join(root_path + '/multicoil_train' +'/*_T202.h5')))
|
| 172 |
+
input_list2 = [path.replace('_T202.h5','_T201.h5') for path in input_list1]
|
| 173 |
+
input_list3 = [path.replace('_T202.h5','_T203.h5') for path in input_list1]
|
| 174 |
+
if DEBUG:
|
| 175 |
+
input_list1 = input_list1[:2]
|
| 176 |
+
input_list2 = input_list2[:2]
|
| 177 |
+
input_list3 = input_list3[:2]
|
| 178 |
+
|
| 179 |
+
T2_input_list = [input_list1, input_list2, input_list3]
|
| 180 |
+
|
| 181 |
+
self.T1_input_list = T1_input_list
|
| 182 |
+
self.T2_input_list = T2_input_list
|
| 183 |
+
self.T1_images = np.zeros([len(input_list1),len(T1_input_list), 18, 256, 256])
|
| 184 |
+
self.T2_images = np.zeros([len(input_list2),len(T2_input_list), 18, 256, 256])
|
| 185 |
+
self.T2_masked_images = np.zeros([len(input_list2),len(T2_input_list), 18, 256, 256])
|
| 186 |
+
|
| 187 |
+
"""
|
| 188 |
+
读取kspace network重建的图像
|
| 189 |
+
"""
|
| 190 |
+
if kspace_refine == 'True':
|
| 191 |
+
krecon_list1 = sorted(glob(os.path.join(root_path + 'multicoil_train' + '/*_T102_recon_kspace_round2_images.npy')))
|
| 192 |
+
krecon_list2 = [path.replace('_T102','_T101') for path in krecon_list1]
|
| 193 |
+
krecon_list3 = [path.replace('_T102','_T103') for path in krecon_list1]
|
| 194 |
+
T1_krecon_list = [krecon_list1, krecon_list2, krecon_list3]
|
| 195 |
+
|
| 196 |
+
krecon_list1 = sorted(glob(os.path.join(root_path + 'multicoil_train' + '/*_T202_recon_kspace_round2_images.npy')))
|
| 197 |
+
krecon_list2 = [path.replace('_T202','_T201') for path in krecon_list1]
|
| 198 |
+
krecon_list3 = [path.replace('_T202','_T203') for path in krecon_list1]
|
| 199 |
+
T2_krecon_list = [krecon_list1, krecon_list2, krecon_list3]
|
| 200 |
+
|
| 201 |
+
self.T1_krecon_list = T1_krecon_list
|
| 202 |
+
self.T2_krecon_list = T2_krecon_list
|
| 203 |
+
|
| 204 |
+
self.T1_krecon = np.zeros([len(input_list1), len(T1_krecon_list), 18, 240, 240]).astype(np.float32)
|
| 205 |
+
self.T2_krecon = np.zeros([len(input_list2), len(T2_krecon_list), 18, 240, 240]).astype(np.float32)
|
| 206 |
+
|
| 207 |
+
|
| 208 |
+
|
| 209 |
+
print('TrainSet loading...')
|
| 210 |
+
for i in tqdm(range(len(self.T1_input_list))):
|
| 211 |
+
for j, path in enumerate(T1_input_list[i]):
|
| 212 |
+
self.T1_images[j][i], _ = read_h5(path, use_kspace=use_kspace)
|
| 213 |
+
# self.fname_slices[i].append(path) # each coil
|
| 214 |
+
|
| 215 |
+
if kspace_refine == 'True':
|
| 216 |
+
for k, path in enumerate(T1_krecon_list[i]):
|
| 217 |
+
self.T1_krecon[k][i] = np.load(path).astype(np.float32)/255.0
|
| 218 |
+
self.T1_labels = np.mean(self.T1_images, axis=1) # multi-coil mean
|
| 219 |
+
|
| 220 |
+
for i in tqdm(range(len(self.T2_input_list))):
|
| 221 |
+
for j, path in enumerate(T2_input_list[i]):
|
| 222 |
+
self.T2_images[j][i], self.T2_masked_images[j][i] = read_h5(path, _MRIDOWN=MRIDOWN, use_kspace=use_kspace)
|
| 223 |
+
if kspace_refine == 'True':
|
| 224 |
+
for k, path in enumerate(T2_krecon_list[i]):
|
| 225 |
+
self.T2_krecon[k][i] = np.load(path).astype(np.float32)/255.0
|
| 226 |
+
|
| 227 |
+
self.T2_labels = np.mean(self.T2_images, axis=1)
|
| 228 |
+
self.T2_images = self.T2_masked_images
|
| 229 |
+
|
| 230 |
+
print(f'Finish loading with time = {time.time() - start_time}s')
|
| 231 |
+
|
| 232 |
+
# print("T1 image original shape:", self.T1_images.shape) # T1 image original shape: (128, 3, 18, 256, 256)
|
| 233 |
+
# print("T2 image original shape:", self.T2_images.shape)
|
| 234 |
+
|
| 235 |
+
N, _, S, H, W = self.T1_images.shape
|
| 236 |
+
self.fname_slices = []
|
| 237 |
+
|
| 238 |
+
for i in range(N):
|
| 239 |
+
for j in range(S):
|
| 240 |
+
self.fname_slices.append((i, j))
|
| 241 |
+
# print(f'nan value at {i}, {j}, {k}, {l}')
|
| 242 |
+
|
| 243 |
+
self.T1_images = self.T1_images.transpose(0,2,1,3,4).reshape(-1,len(T1_input_list),256,256)[:, :, 8:248, 8:248]
|
| 244 |
+
self.T2_images = self.T2_images.transpose(0,2,1,3,4).reshape(-1,len(T2_input_list),256,256)[:, :, 8:248, 8:248]
|
| 245 |
+
self.T1_labels = self.T1_labels.reshape(-1,1,256,256)[:, :, 8:248, 8:248]
|
| 246 |
+
self.T2_labels = self.T2_labels.reshape(-1,1,256,256)[:, :, 8:248, 8:248]
|
| 247 |
+
|
| 248 |
+
# Train data shape: (2304, 3, 240, 240)
|
| 249 |
+
|
| 250 |
+
# T1 N, 3, 240, 240
|
| 251 |
+
|
| 252 |
+
if kspace_refine == 'True':
|
| 253 |
+
self.T1_krecon = self.T1_krecon.transpose(0,2,1,3,4).reshape(-1,len(T1_krecon_list),240,240)
|
| 254 |
+
self.T2_krecon = self.T2_krecon.transpose(0,2,1,3,4).reshape(-1,len(T2_krecon_list),240,240)
|
| 255 |
+
|
| 256 |
+
|
| 257 |
+
def __len__(self):
|
| 258 |
+
return len(self.T1_images)
|
| 259 |
+
|
| 260 |
+
def __getitem__(self, idx):
|
| 261 |
+
T1_images = self.T1_images[idx] # lq_mri
|
| 262 |
+
T2_images = self.T2_images[idx]
|
| 263 |
+
T1_labels = self.T1_labels[idx] # gt_mri
|
| 264 |
+
T2_labels = self.T2_labels[idx]
|
| 265 |
+
|
| 266 |
+
fname = self.fname_slices[idx][0]
|
| 267 |
+
slice = self.fname_slices[idx][1]
|
| 268 |
+
|
| 269 |
+
## 每次都是从三个repetition中选择一个作为input.
|
| 270 |
+
choices = np.random.choice([i for i in range(len(self.T1_input_list))],1)
|
| 271 |
+
T1_images = T1_images[choices]
|
| 272 |
+
T2_images = T2_images[choices]
|
| 273 |
+
|
| 274 |
+
t1_kspace_in, t1_in, t1_kspace, t1_img = mri_fft_m4raw(T1_images, T1_labels)
|
| 275 |
+
t2_kspace_in, t2_in, t2_kspace, t2_img = mri_fft_m4raw(T2_images, T2_labels)
|
| 276 |
+
|
| 277 |
+
|
| 278 |
+
# normalize
|
| 279 |
+
t1_img, t1_mean, t1_std = normalize_instance(t1_img)
|
| 280 |
+
t1_in = normalize(t1_in, t1_mean, t1_std)
|
| 281 |
+
# t1_mean = 0
|
| 282 |
+
# t1_std = 1
|
| 283 |
+
|
| 284 |
+
t2_img, t2_mean, t2_std = normalize_instance(t2_img)
|
| 285 |
+
t2_in = normalize(t2_in, t2_mean, t2_std)
|
| 286 |
+
|
| 287 |
+
|
| 288 |
+
|
| 289 |
+
# filter value that greater or less than 6
|
| 290 |
+
t1_img = torch.clamp(t1_img, -6, 6)
|
| 291 |
+
t2_img = torch.clamp(t2_img, -6, 6)
|
| 292 |
+
t1_in = torch.clamp(t1_in, -6, 6)
|
| 293 |
+
t2_in = torch.clamp(t2_in, -6, 6)
|
| 294 |
+
|
| 295 |
+
|
| 296 |
+
# t2_mean = 0
|
| 297 |
+
# t2_std = 1
|
| 298 |
+
|
| 299 |
+
# t1_img: torch.Size([2, 240, 240]) torch.float32 tensor(0.9775) tensor(-9.0143e-08)
|
| 300 |
+
# t2_img: torch.Size([1, 240, 240]) torch.float32 tensor(22.1929) tensor(-0.3244)
|
| 301 |
+
|
| 302 |
+
# t1_img: torch.Size([1, 240, 240]) torch.float32 tensor(5.1340) tensor(1.7756e-06)
|
| 303 |
+
# t2_img: torch.Size([1, 240, 240]) torch.float32 tensor(4.4957) tensor(2.8719e-05)
|
| 304 |
+
# t1_in: torch.Size([1, 240, 240]) torch.float32 tensor(5.2390) tensor(0.0003)
|
| 305 |
+
# t2_in: torch.Size([1, 240, 240]) torch.float32 tensor(4.7321) tensor(4.5622e-05)
|
| 306 |
+
|
| 307 |
+
# print("t1_img:", t1_img.shape, t1_img.dtype, t1_img.max(), t1_img.min())
|
| 308 |
+
# print("t2_img:", t2_img.shape, t2_img.dtype, t2_img.max(), t2_img.min())
|
| 309 |
+
# print("t1_in:", t1_in.shape, t1_in.dtype, t1_in.max(), t1_in.min())
|
| 310 |
+
# print("t2_in:", t2_in.shape, t2_in.dtype, t2_in.max(), t2_in.min()) # t1_img: torch.Size([1, 240, 240]) torch.float32 tensor(20.5561) tensor(-0.2671)
|
| 311 |
+
# print()
|
| 312 |
+
|
| 313 |
+
# How to get mean and std of the training data?
|
| 314 |
+
# fname, slice
|
| 315 |
+
sample = {
|
| 316 |
+
'fname': fname,
|
| 317 |
+
'slice': slice,
|
| 318 |
+
|
| 319 |
+
'ref_kspace_full': t1_kspace,
|
| 320 |
+
'ref_kspace_sub': t1_kspace_in,
|
| 321 |
+
'ref_image_full': t1_img,
|
| 322 |
+
'ref_image_sub': t1_in,
|
| 323 |
+
't1_mean': t1_mean,
|
| 324 |
+
't1_std': t1_std,
|
| 325 |
+
|
| 326 |
+
'tag_kspace_full': t2_kspace,
|
| 327 |
+
'tag_kspace_sub': t2_kspace_in,
|
| 328 |
+
'tag_image_full': t2_img,
|
| 329 |
+
'tag_image_sub': t2_in,
|
| 330 |
+
't2_mean': t2_mean,
|
| 331 |
+
't2_std': t2_std,
|
| 332 |
+
|
| 333 |
+
}
|
| 334 |
+
|
| 335 |
+
return sample
|
| 336 |
+
|
| 337 |
+
|
| 338 |
+
|
| 339 |
+
class M4Raw_TestSet(Dataset):
|
| 340 |
+
def __init__(self, root_path, MRIDOWN, kspace_refine='False', use_kspace=False):
|
| 341 |
+
|
| 342 |
+
self.kspace_refine = kspace_refine
|
| 343 |
+
|
| 344 |
+
input_list1 = sorted(glob(os.path.join(root_path + '/multicoil_val' + '/*_T102.h5')))
|
| 345 |
+
input_list2 = [path.replace('_T102.h5','_T101.h5') for path in input_list1]
|
| 346 |
+
input_list3 = [path.replace('_T102.h5','_T103.h5') for path in input_list1]
|
| 347 |
+
if DEBUG:
|
| 348 |
+
input_list1 = input_list1[:2]
|
| 349 |
+
input_list2 = input_list2[:2]
|
| 350 |
+
input_list3 = input_list3[:2]
|
| 351 |
+
|
| 352 |
+
T1_input_list = [input_list1, input_list2, input_list3]
|
| 353 |
+
|
| 354 |
+
input_list1 = sorted(glob(os.path.join(root_path + '/multicoil_val' + '/*_T202.h5')))
|
| 355 |
+
input_list2 = [path.replace('_T202.h5','_T201.h5') for path in input_list1]
|
| 356 |
+
input_list3 = [path.replace('_T202.h5','_T203.h5') for path in input_list1]
|
| 357 |
+
if DEBUG:
|
| 358 |
+
input_list1 = input_list1[:2]
|
| 359 |
+
input_list2 = input_list2[:2]
|
| 360 |
+
input_list3 = input_list3[:2]
|
| 361 |
+
|
| 362 |
+
T2_input_list = [input_list1,input_list2,input_list3]
|
| 363 |
+
|
| 364 |
+
self.T1_input_list = T1_input_list
|
| 365 |
+
self.T2_input_list = T2_input_list
|
| 366 |
+
self.T1_images = np.zeros([len(input_list1),len(T1_input_list), 18, 256, 256])
|
| 367 |
+
self.T2_images = np.zeros([len(input_list2),len(T2_input_list), 18, 256, 256])
|
| 368 |
+
self.T2_masked_images = np.zeros([len(input_list2),len(T2_input_list), 18, 256, 256])
|
| 369 |
+
|
| 370 |
+
"""
|
| 371 |
+
读取kspace network重建的图像
|
| 372 |
+
"""
|
| 373 |
+
if kspace_refine == 'True':
|
| 374 |
+
krecon_list1 = sorted(glob(os.path.join(root_path + 'multicoil_val' + '/*_T102_recon_kspace_round2_images.npy')))
|
| 375 |
+
krecon_list2 = [path.replace('_T102','_T101') for path in krecon_list1]
|
| 376 |
+
krecon_list3 = [path.replace('_T102','_T103') for path in krecon_list1]
|
| 377 |
+
T1_krecon_list = [krecon_list1, krecon_list2, krecon_list3]
|
| 378 |
+
|
| 379 |
+
krecon_list1 = sorted(glob(os.path.join(root_path + 'multicoil_val' + '/*_T202_recon_kspace_round2_images.npy')))
|
| 380 |
+
krecon_list2 = [path.replace('_T202','_T201') for path in krecon_list1]
|
| 381 |
+
krecon_list3 = [path.replace('_T202','_T203') for path in krecon_list1]
|
| 382 |
+
T2_krecon_list = [krecon_list1, krecon_list2, krecon_list3]
|
| 383 |
+
|
| 384 |
+
self.T1_krecon_list = T1_krecon_list
|
| 385 |
+
self.T2_krecon_list = T2_krecon_list
|
| 386 |
+
|
| 387 |
+
self.T1_krecon = np.zeros([len(input_list1), len(T1_krecon_list), 18, 240, 240]).astype(np.float32)
|
| 388 |
+
self.T2_krecon = np.zeros([len(input_list2), len(T2_krecon_list), 18, 240, 240]).astype(np.float32)
|
| 389 |
+
|
| 390 |
+
|
| 391 |
+
print('TestSet loading...')
|
| 392 |
+
for i in range(len(self.T1_input_list)):
|
| 393 |
+
for j, path in enumerate(T1_input_list[i]):
|
| 394 |
+
self.T1_images[j][i], _ = read_h5(path, use_kspace=use_kspace)
|
| 395 |
+
|
| 396 |
+
if kspace_refine == 'True':
|
| 397 |
+
for k, path in enumerate(T1_krecon_list[i]):
|
| 398 |
+
self.T1_krecon[k][i] = np.load(path).astype(np.float32)/255.0
|
| 399 |
+
self.T1_labels = np.mean(self.T1_images, axis=1)
|
| 400 |
+
|
| 401 |
+
for i in range(len(self.T2_input_list)):
|
| 402 |
+
for j, path in enumerate(T2_input_list[i]):
|
| 403 |
+
self.T2_images[j][i], self.T2_masked_images[j][i] = read_h5(path, _MRIDOWN = MRIDOWN, use_kspace=use_kspace)
|
| 404 |
+
|
| 405 |
+
if kspace_refine == 'True':
|
| 406 |
+
for k, path in enumerate(T2_krecon_list[i]):
|
| 407 |
+
self.T2_krecon[k][i] = np.load(path).astype(np.float32)/255.0
|
| 408 |
+
self.T2_labels = np.mean(self.T2_images, axis=1)
|
| 409 |
+
self.T2_images = self.T2_masked_images
|
| 410 |
+
|
| 411 |
+
print('Finish loading')
|
| 412 |
+
N, _, S, H, W = self.T1_images.shape
|
| 413 |
+
self.fname_slices = []
|
| 414 |
+
|
| 415 |
+
for i in range(N):
|
| 416 |
+
for j in range(S):
|
| 417 |
+
self.fname_slices.append((i, j))
|
| 418 |
+
|
| 419 |
+
self.T1_images = self.T1_images.transpose(0,2,1,3,4).reshape(-1,len(T1_input_list),256,256)[:, :, 8:248, 8:248]
|
| 420 |
+
self.T2_images = self.T2_images.transpose(0,2,1,3,4).reshape(-1,len(T2_input_list),256,256)[:, :, 8:248, 8:248]
|
| 421 |
+
self.T1_labels = self.T1_labels.reshape(-1,1,256,256)[:, :, 8:248, 8:248]
|
| 422 |
+
self.T2_labels = self.T2_labels.reshape(-1,1,256,256)[:, :, 8:248, 8:248]
|
| 423 |
+
print("Test data shape:", self.T1_images.shape)
|
| 424 |
+
|
| 425 |
+
if kspace_refine == 'True':
|
| 426 |
+
self.T1_krecon = self.T1_krecon.transpose(0,2,1,3,4).reshape(-1,len(T1_krecon_list),240,240)
|
| 427 |
+
self.T2_krecon = self.T2_krecon.transpose(0,2,1,3,4).reshape(-1,len(T2_krecon_list),240,240)
|
| 428 |
+
|
| 429 |
+
|
| 430 |
+
def __len__(self):
|
| 431 |
+
return len(self.T1_images)
|
| 432 |
+
|
| 433 |
+
def __getitem__(self, idx):
|
| 434 |
+
|
| 435 |
+
T1_images = self.T1_images[idx]
|
| 436 |
+
T2_images = self.T2_images[idx]
|
| 437 |
+
|
| 438 |
+
T1_labels = self.T1_labels[idx]
|
| 439 |
+
T2_labels = self.T2_labels[idx]
|
| 440 |
+
|
| 441 |
+
# print("T1_labels:", T1_labels.shape, T1_labels.dtype, T1_labels.max(), T1_labels.min())
|
| 442 |
+
# print("T2_labels:", T2_labels.shape, T2_labels.dtype, T2_labels.max(), T2_labels.min())
|
| 443 |
+
|
| 444 |
+
choices = np.random.choice([0],1) ## 用第一个repetition作为输入图像进行测试
|
| 445 |
+
T1_images = T1_images[choices]
|
| 446 |
+
T2_images = T2_images[choices]
|
| 447 |
+
|
| 448 |
+
t1_kspace_in, t1_in, t1_kspace, t1_img = mri_fft_m4raw(T1_images, T1_labels)
|
| 449 |
+
t2_kspace_in, t2_in, t2_kspace, t2_img = mri_fft_m4raw(T2_images, T2_labels)
|
| 450 |
+
|
| 451 |
+
fname = self.fname_slices[idx][0]
|
| 452 |
+
slice = self.fname_slices[idx][1]
|
| 453 |
+
|
| 454 |
+
# normalize
|
| 455 |
+
t1_img, t1_mean, t1_std = normalize_instance(t1_img)
|
| 456 |
+
t1_in = normalize(t1_in, t1_mean, t1_std)
|
| 457 |
+
# t1_mean = 0
|
| 458 |
+
# t1_std = 1
|
| 459 |
+
|
| 460 |
+
t2_img, t2_mean, t2_std = normalize_instance(t2_img)
|
| 461 |
+
t2_in = normalize(t2_in, t2_mean, t2_std)
|
| 462 |
+
|
| 463 |
+
# filter value that greater or less than 6
|
| 464 |
+
t1_img = torch.clamp(t1_img, -6, 6)
|
| 465 |
+
t2_img = torch.clamp(t2_img, -6, 6)
|
| 466 |
+
t1_in = torch.clamp(t1_in, -6, 6)
|
| 467 |
+
t2_in = torch.clamp(t2_in, -6, 6)
|
| 468 |
+
|
| 469 |
+
|
| 470 |
+
# print("t1_img:", t1_img.shape, t1_img.dtype, t1_img.max(), t1_img.min())
|
| 471 |
+
# print("in dataset t2_img:", t2_img.shape, t2_img.dtype, t2_img.max(), t2_img.min())
|
| 472 |
+
# print("t1_in:", t1_in.shape, t1_in.dtype, t1_in.max(), t1_in.min())
|
| 473 |
+
# print("t2_in:", t2_in.shape, t2_in.dtype, t2_in.max(), t2_in.min()) # t1_img: torch.Size([1, 240, 240]) torch.float32 tensor(20.5561) tensor(-0.2671)
|
| 474 |
+
# print()
|
| 475 |
+
|
| 476 |
+
# fname, slice
|
| 477 |
+
sample = {
|
| 478 |
+
'fname': fname,
|
| 479 |
+
'slice': slice,
|
| 480 |
+
|
| 481 |
+
'ref_kspace_full': t1_kspace,
|
| 482 |
+
'ref_kspace_sub': t1_kspace_in,
|
| 483 |
+
'ref_image_full': t1_img,
|
| 484 |
+
'ref_image_sub': t1_in,
|
| 485 |
+
't1_mean': t1_mean,
|
| 486 |
+
't1_std': t1_std,
|
| 487 |
+
|
| 488 |
+
'tag_kspace_full': t2_kspace,
|
| 489 |
+
'tag_kspace_sub': t2_kspace_in,
|
| 490 |
+
'tag_image_full': t2_img,
|
| 491 |
+
'tag_image_sub': t2_in,
|
| 492 |
+
't2_mean': t2_mean,
|
| 493 |
+
't2_std': t2_std,
|
| 494 |
+
|
| 495 |
+
}
|
| 496 |
+
|
| 497 |
+
return sample
|
| 498 |
+
|
| 499 |
+
|
| 500 |
+
def compute_metrics(image, labels):
|
| 501 |
+
MSE = mean_squared_error(labels, image)/np.var(labels)
|
| 502 |
+
PSNR = peak_signal_noise_ratio(labels, image)
|
| 503 |
+
SSIM = structural_similarity(labels, image)
|
| 504 |
+
|
| 505 |
+
# print("metrics:", MSE, PSNR, SSIM)
|
| 506 |
+
|
| 507 |
+
return MSE, PSNR, SSIM
|
| 508 |
+
|
| 509 |
+
def complex_abs_eval(data):
|
| 510 |
+
return (data[0:1, :, :] ** 2 + data[1:2, :, :] ** 2).sqrt()
|
| 511 |
+
|
| 512 |
+
|
| 513 |
+
if __name__ == "__main__":
|
| 514 |
+
parser = argparse.ArgumentParser()
|
| 515 |
+
parser.add_argument('--root_path', type=str, default='/data/qic99/MRI_recon/')
|
| 516 |
+
parser.add_argument('--MRIDOWN', type=str, default='4X', help='MRI down-sampling rate')
|
| 517 |
+
parser.add_argument('--kspace_refine', type=str, default='False', help='whether use the image reconstructed from kspace network.')
|
| 518 |
+
args = parser.parse_args()
|
| 519 |
+
|
| 520 |
+
db_test = M4Raw_TestSet(args)
|
| 521 |
+
testloader = DataLoader(db_test, batch_size=4, shuffle=False, num_workers=4, pin_memory=True)
|
| 522 |
+
|
| 523 |
+
t1_MSE_all, t1_PSNR_all, t1_SSIM_all = [], [], []
|
| 524 |
+
t2_MSE_all, t2_PSNR_all, t2_SSIM_all = [], [], []
|
| 525 |
+
|
| 526 |
+
save_dir = "./visualize_images/"
|
| 527 |
+
|
| 528 |
+
for i_batch, sampled_batch in enumerate(testloader):
|
| 529 |
+
# t1_in, t2_in = sampled_batch['t1_in'].cuda(), sampled_batch['t2_in'].cuda()
|
| 530 |
+
# t1, t2 = sampled_batch['t1_labels'].cuda(), sampled_batch['t2_labels'].cuda()
|
| 531 |
+
|
| 532 |
+
t1_in, t2_in = sampled_batch['ref_image_sub'].cuda(), sampled_batch['tag_image_sub'].cuda()
|
| 533 |
+
t1, t2 = sampled_batch['ref_image_full'].cuda(), sampled_batch['tag_image_full'].cuda()
|
| 534 |
+
|
| 535 |
+
# breakpoint()
|
| 536 |
+
for j in range(t1_in.shape[0]):
|
| 537 |
+
# t1_in_img = (np.clip(complex_abs_eval(t1_in[j])[0].cpu().numpy(), 0, 1) * 255).astype(np.uint8)
|
| 538 |
+
# t1_img = (np.clip(complex_abs_eval(t1[j])[0].cpu().numpy(), 0, 1) * 255).astype(np.uint8)
|
| 539 |
+
# t2_in_img = (np.clip(complex_abs_eval(t2_in[j])[0].cpu().numpy(), 0, 1) * 255).astype(np.uint8)
|
| 540 |
+
# t2_img = (np.clip(complex_abs_eval(t2[j])[0].cpu().numpy(), 0, 1) * 255).astype(np.uint8)
|
| 541 |
+
# breakpoint()
|
| 542 |
+
t1_in_img = (np.clip(t1_in[j][0].cpu().numpy(), 0, 1) * 255).astype(np.uint8)
|
| 543 |
+
t1_img = (np.clip(t1[j][0].cpu().numpy(), 0, 1) * 255).astype(np.uint8)
|
| 544 |
+
t2_in_img = (np.clip(t2_in[j][0].cpu().numpy(), 0, 1) * 255).astype(np.uint8)
|
| 545 |
+
t2_img = (np.clip(t2[j][0].cpu().numpy(), 0, 1) * 255).astype(np.uint8)
|
| 546 |
+
|
| 547 |
+
# t1_in_img = (np.clip(t1_in[j][0].cpu().numpy(), 0, 1) * 255).astype(np.uint8)
|
| 548 |
+
# t1_img = (np.clip(t1[j][0].cpu().numpy(), 0, 1) * 255).astype(np.uint8)
|
| 549 |
+
# t2_in_img = (np.clip(t2_in[j][0].cpu().numpy(), 0, 1) * 255).astype(np.uint8)
|
| 550 |
+
# t2_img = (np.clip(t2[j][0].cpu().numpy(), 0, 1) * 255).astype(np.uint8)
|
| 551 |
+
|
| 552 |
+
|
| 553 |
+
# print(t1_in_img.shape, t1_img.shape)
|
| 554 |
+
t1_MSE, t1_PSNR, t1_SSIM = compute_metrics(t1_in_img, t1_img)
|
| 555 |
+
t2_MSE, t2_PSNR, t2_SSIM = compute_metrics(t2_in_img, t2_img)
|
| 556 |
+
|
| 557 |
+
|
| 558 |
+
t1_MSE_all.append(t1_MSE)
|
| 559 |
+
t1_PSNR_all.append(t1_PSNR)
|
| 560 |
+
t1_SSIM_all.append(t1_SSIM)
|
| 561 |
+
|
| 562 |
+
t2_MSE_all.append(t2_MSE)
|
| 563 |
+
t2_PSNR_all.append(t2_PSNR)
|
| 564 |
+
t2_SSIM_all.append(t2_SSIM)
|
| 565 |
+
|
| 566 |
+
|
| 567 |
+
# print("t1_PSNR:", t1_PSNR_all)
|
| 568 |
+
print("t1_PSNR:", round(np.array(t1_PSNR_all).mean(), 4))
|
| 569 |
+
print("t1_NMSE:", round(np.array(t1_MSE_all).mean(), 4))
|
| 570 |
+
print("t1_SSIM:", round(np.array(t1_SSIM_all).mean(), 4))
|
| 571 |
+
|
| 572 |
+
print("t2_PSNR:", round(np.array(t2_PSNR_all).mean(), 4))
|
| 573 |
+
print("t2_NMSE:", round(np.array(t2_MSE_all).mean(), 4))
|
| 574 |
+
print("t2_SSIM:", round(np.array(t2_SSIM_all).mean(), 4))
|
MRI_recon/code/Frequency-Diffusion/FSMNet/dataloaders/m4raw_std_dataloader.py
ADDED
|
@@ -0,0 +1,583 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
from __future__ import print_function, division
|
| 3 |
+
from typing import Dict, NamedTuple, Optional, Sequence, Tuple, Union
|
| 4 |
+
import sys
|
| 5 |
+
sys.path.append('.')
|
| 6 |
+
from glob import glob
|
| 7 |
+
import os
|
| 8 |
+
os.environ['OPENBLAS_NUM_THREADS'] = '1'
|
| 9 |
+
import numpy as np
|
| 10 |
+
import torch
|
| 11 |
+
from torch.utils.data import Dataset
|
| 12 |
+
|
| 13 |
+
import h5py
|
| 14 |
+
from matplotlib import pyplot as plt
|
| 15 |
+
from dataloaders.m4_utils import ifft2c, fft2c, complex_abs
|
| 16 |
+
from dataloaders.kspace_subsample import create_mask_for_mask_type
|
| 17 |
+
|
| 18 |
+
from .albu_transform import get_albu_transforms
|
| 19 |
+
|
| 20 |
+
import argparse, time
|
| 21 |
+
from torch.utils.data import DataLoader
|
| 22 |
+
from skimage.metrics import mean_squared_error, peak_signal_noise_ratio, structural_similarity
|
| 23 |
+
|
| 24 |
+
def normalize(data, mean, stddev, eps=0.0):
|
| 25 |
+
"""
|
| 26 |
+
Normalize the given tensor.
|
| 27 |
+
|
| 28 |
+
Applies the formula (data - mean) / (stddev + eps).
|
| 29 |
+
|
| 30 |
+
Args:
|
| 31 |
+
data (torch.Tensor): Input data to be normalized.
|
| 32 |
+
mean (float): Mean value.
|
| 33 |
+
stddev (float): Standard deviation.
|
| 34 |
+
eps (float, default=0.0): Added to stddev to prevent dividing by zero.
|
| 35 |
+
|
| 36 |
+
Returns:
|
| 37 |
+
torch.Tensor: Normalized tensor
|
| 38 |
+
"""
|
| 39 |
+
return (data - mean) / (stddev + eps)
|
| 40 |
+
|
| 41 |
+
def normalize_instance_dim(data, eps=0.0):
|
| 42 |
+
"""
|
| 43 |
+
Normalize the given tensor with instance norm/
|
| 44 |
+
|
| 45 |
+
Applies the formula (data - mean) / (stddev + eps), where mean and stddev
|
| 46 |
+
are computed from the data itself.
|
| 47 |
+
|
| 48 |
+
Args:
|
| 49 |
+
data (torch.Tensor): Input data to be normalized
|
| 50 |
+
eps (float): Added to stddev to prevent dividing by zero
|
| 51 |
+
|
| 52 |
+
Returns:
|
| 53 |
+
torch.Tensor: Normalized tensor
|
| 54 |
+
"""
|
| 55 |
+
mean = data.mean(dim=(1, 2, 3), keepdim=True) # B, C, H, W
|
| 56 |
+
std = data.std(dim=(1, 2, 3), keepdim=True)
|
| 57 |
+
|
| 58 |
+
return normalize(data, mean, std, eps), mean, std
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
def normalize_instance(data, eps=0.0):
|
| 62 |
+
"""
|
| 63 |
+
Normalize the given tensor with instance norm/
|
| 64 |
+
|
| 65 |
+
Applies the formula (data - mean) / (stddev + eps), where mean and stddev
|
| 66 |
+
are computed from the data itself.
|
| 67 |
+
|
| 68 |
+
Args:
|
| 69 |
+
data (torch.Tensor): Input data to be normalized
|
| 70 |
+
eps (float): Added to stddev to prevent dividing by zero
|
| 71 |
+
|
| 72 |
+
Returns:
|
| 73 |
+
torch.Tensor: Normalized tensor
|
| 74 |
+
"""
|
| 75 |
+
mean = data.mean()
|
| 76 |
+
std = data.std()
|
| 77 |
+
|
| 78 |
+
return normalize(data, mean, std, eps), mean, std
|
| 79 |
+
|
| 80 |
+
def undersample_mri(kspace, _MRIDOWN):
|
| 81 |
+
# print("kspace shape:", kspace.shape) ## [18, 4, 256, 256, 2]
|
| 82 |
+
if _MRIDOWN == "4X":
|
| 83 |
+
mask_type_str, center_fraction, MRIDOWN = "random", 0.1, 4
|
| 84 |
+
elif _MRIDOWN == "8X":
|
| 85 |
+
mask_type_str, center_fraction, MRIDOWN = "equispaced", 0.04, 8
|
| 86 |
+
elif _MRIDOWN == "12X":
|
| 87 |
+
mask_type_str, center_fraction, MRIDOWN = "equispaced", 0.03, 12
|
| 88 |
+
|
| 89 |
+
ff = create_mask_for_mask_type(mask_type_str, [center_fraction], [MRIDOWN]) ## 0.2 for MRIDOWN=2, 0.1 for MRIDOWN=4, 0.04 f| -------------------------------------------------------------------------------------------------------------------------------
|
| 90 |
+
|
| 91 |
+
shape = [256, 256, 1]
|
| 92 |
+
mask = ff(shape, seed=1337) ## [1, 256, 1]
|
| 93 |
+
|
| 94 |
+
mask = mask[:, :, 0] # [1, 256]
|
| 95 |
+
|
| 96 |
+
masked_kspace = kspace * mask[None, None, :, :, None]
|
| 97 |
+
|
| 98 |
+
return masked_kspace, mask.unsqueeze(-1)
|
| 99 |
+
|
| 100 |
+
|
| 101 |
+
def apply_mask(data, mask_func, seed=None, padding=None):
|
| 102 |
+
"""
|
| 103 |
+
Subsample given k-space by multiplying with a mask.
|
| 104 |
+
|
| 105 |
+
Args:
|
| 106 |
+
data (torch.Tensor): The input k-space data. This should have at least 3 dimensions, where
|
| 107 |
+
dimensions -3 and -2 are the spatial dimensions, and the final dimension has size
|
| 108 |
+
2 (for complex values).
|
| 109 |
+
mask_func (callable): A function that takes a shape (tuple of ints) and a random
|
| 110 |
+
number seed and returns a mask.
|
| 111 |
+
seed (int or 1-d array_like, optional): Seed for the random number generator.
|
| 112 |
+
|
| 113 |
+
Returns:
|
| 114 |
+
(tuple): tuple containing:
|
| 115 |
+
masked data (torch.Tensor): Subsampled k-space data
|
| 116 |
+
mask (torch.Tensor): The generated mask
|
| 117 |
+
"""
|
| 118 |
+
shape = np.array(data.shape)
|
| 119 |
+
shape[:-3] = 1
|
| 120 |
+
mask = mask_func(shape, seed)
|
| 121 |
+
if padding is not None:
|
| 122 |
+
mask[:, :, : padding[0]] = 0
|
| 123 |
+
mask[:, :, padding[1] :] = 0 # padding value inclusive on right of zeros
|
| 124 |
+
|
| 125 |
+
masked_data = data * mask + 0.0 # the + 0.0 removes the sign of the zeros
|
| 126 |
+
|
| 127 |
+
return masked_data, mask
|
| 128 |
+
|
| 129 |
+
def complex_center_crop(data, shape):
|
| 130 |
+
"""
|
| 131 |
+
Apply a center crop to the input image or batch of complex images.
|
| 132 |
+
|
| 133 |
+
Args:
|
| 134 |
+
data (torch.Tensor): The complex input tensor to be center cropped. It
|
| 135 |
+
should have at least 3 dimensions and the cropping is applied along
|
| 136 |
+
dimensions -3 and -2 and the last dimensions should have a size of
|
| 137 |
+
2.
|
| 138 |
+
shape (int): The output shape. The shape should be smaller than
|
| 139 |
+
the corresponding dimensions of data.
|
| 140 |
+
|
| 141 |
+
Returns:
|
| 142 |
+
torch.Tensor: The center cropped image
|
| 143 |
+
"""
|
| 144 |
+
assert 0 < shape[0] <= data.shape[-3]
|
| 145 |
+
assert 0 < shape[1] <= data.shape[-2]
|
| 146 |
+
|
| 147 |
+
w_from = (data.shape[-3] - shape[0]) // 2 #80
|
| 148 |
+
h_from = (data.shape[-2] - shape[1]) // 2 #80
|
| 149 |
+
w_to = w_from + shape[0] #240
|
| 150 |
+
h_to = h_from + shape[1] #240
|
| 151 |
+
|
| 152 |
+
return data[..., w_from:w_to, h_from:h_to, :]
|
| 153 |
+
|
| 154 |
+
|
| 155 |
+
|
| 156 |
+
def to_tensor(data):
|
| 157 |
+
"""
|
| 158 |
+
Convert numpy array to PyTorch tensor.
|
| 159 |
+
|
| 160 |
+
For complex arrays, the real and imaginary parts are stacked along the last
|
| 161 |
+
dimension.
|
| 162 |
+
|
| 163 |
+
Args:
|
| 164 |
+
data (np.array): Input numpy array.
|
| 165 |
+
|
| 166 |
+
Returns:
|
| 167 |
+
torch.Tensor: PyTorch version of data.
|
| 168 |
+
"""
|
| 169 |
+
if np.iscomplexobj(data):
|
| 170 |
+
data = np.stack((data.real, data.imag), axis=-1)
|
| 171 |
+
|
| 172 |
+
return torch.from_numpy(data)
|
| 173 |
+
|
| 174 |
+
def rss(data, dim=0):
|
| 175 |
+
"""
|
| 176 |
+
Compute the Root Sum of Squares (RSS).
|
| 177 |
+
|
| 178 |
+
RSS is computed assuming that dim is the coil dimension.
|
| 179 |
+
|
| 180 |
+
Args:
|
| 181 |
+
data (torch.Tensor): The input tensor
|
| 182 |
+
dim (int): The dimensions along which to apply the RSS transform
|
| 183 |
+
|
| 184 |
+
Returns:
|
| 185 |
+
torch.Tensor: The RSS value.
|
| 186 |
+
"""
|
| 187 |
+
return torch.sqrt((data ** 2).sum(dim))
|
| 188 |
+
|
| 189 |
+
def read_h5(file_name, _MRIDOWN, use_kspace):
|
| 190 |
+
crop_size=[240,240]
|
| 191 |
+
|
| 192 |
+
hf = h5py.File(file_name)
|
| 193 |
+
volume_kspace = hf['kspace'][()]
|
| 194 |
+
|
| 195 |
+
slice_kspace = volume_kspace
|
| 196 |
+
slice_kspace = to_tensor(slice_kspace)
|
| 197 |
+
import imageio as io
|
| 198 |
+
|
| 199 |
+
target = ifft2c(slice_kspace)
|
| 200 |
+
target = complex_center_crop(target, crop_size)
|
| 201 |
+
target = complex_abs(target)
|
| 202 |
+
target = rss(target, dim=1)
|
| 203 |
+
|
| 204 |
+
if not use_kspace:
|
| 205 |
+
# print("use_kspace is False")
|
| 206 |
+
|
| 207 |
+
# masked_kspace, mask = apply_mask(slice_kspace, mask_func, seed=123456)
|
| 208 |
+
masked_kspace, mask = undersample_mri(slice_kspace, _MRIDOWN)
|
| 209 |
+
lq_image = ifft2c(masked_kspace)
|
| 210 |
+
lq_image = complex_center_crop(lq_image, crop_size)
|
| 211 |
+
lq_image = complex_abs(lq_image)
|
| 212 |
+
lq_image = rss(lq_image, dim=1)
|
| 213 |
+
|
| 214 |
+
else:
|
| 215 |
+
lq_image = target
|
| 216 |
+
|
| 217 |
+
lq_image_list=[]
|
| 218 |
+
mean_list=[]
|
| 219 |
+
std_list=[]
|
| 220 |
+
for i in range(lq_image.shape[0]):
|
| 221 |
+
image, mean, std = normalize_instance(lq_image[i], eps=1e-11)
|
| 222 |
+
image = image.clamp(-6, 6)
|
| 223 |
+
lq_image_list.append(image)
|
| 224 |
+
mean_list.append(mean)
|
| 225 |
+
std_list.append(std)
|
| 226 |
+
|
| 227 |
+
|
| 228 |
+
|
| 229 |
+
target_list=[]
|
| 230 |
+
|
| 231 |
+
for i in range(lq_image.shape[0]):
|
| 232 |
+
target_slice = normalize(target[i], mean_list[i], std_list[i], eps=1e-11)
|
| 233 |
+
target_slice = target_slice.clamp(-6, 6)
|
| 234 |
+
target_list.append(target_slice)
|
| 235 |
+
|
| 236 |
+
return torch.stack(lq_image_list), torch.stack(target_list), torch.stack(mean_list), torch.stack(std_list)
|
| 237 |
+
|
| 238 |
+
|
| 239 |
+
|
| 240 |
+
|
| 241 |
+
|
| 242 |
+
class M4Raw_TrainSet(Dataset):
|
| 243 |
+
def __init__(self, args, use_kspace=False, DEBUG=False):
|
| 244 |
+
mask_func = create_mask_for_mask_type(
|
| 245 |
+
args.MASKTYPE, args.CENTER_FRACTIONS, args.ACCELERATIONS,
|
| 246 |
+
)
|
| 247 |
+
start_time = time.time()
|
| 248 |
+
|
| 249 |
+
self.albu_transforms = get_albu_transforms("train", (240, 240))
|
| 250 |
+
|
| 251 |
+
self._MRIDOWN = args.MRIDOWN
|
| 252 |
+
self.input_normalize = args.input_normalize
|
| 253 |
+
input_list1 = sorted(glob(os.path.join(args.root_path, 'multicoil_train', '*_T102.h5')))
|
| 254 |
+
input_list2 = [path.replace('_T102.h5','_T101.h5') for path in input_list1]
|
| 255 |
+
input_list3 = [path.replace('_T102.h5','_T103.h5') for path in input_list1]
|
| 256 |
+
if DEBUG:
|
| 257 |
+
input_list1 = input_list1[:2]
|
| 258 |
+
input_list2 = input_list2[:2]
|
| 259 |
+
input_list3 = input_list3[:2]
|
| 260 |
+
|
| 261 |
+
T1_input_list = [input_list1, input_list2, input_list3]
|
| 262 |
+
|
| 263 |
+
input_list1 = sorted(glob(os.path.join(args.root_path, 'multicoil_train', '*_T202.h5')))
|
| 264 |
+
input_list2 = [path.replace('_T202.h5','_T201.h5') for path in input_list1]
|
| 265 |
+
input_list3 = [path.replace('_T202.h5','_T203.h5') for path in input_list1]
|
| 266 |
+
if DEBUG:
|
| 267 |
+
input_list1 = input_list1[:2]
|
| 268 |
+
input_list2 = input_list2[:2]
|
| 269 |
+
input_list3 = input_list3[:2]
|
| 270 |
+
|
| 271 |
+
|
| 272 |
+
T2_input_list = [input_list1, input_list2, input_list3]
|
| 273 |
+
|
| 274 |
+
|
| 275 |
+
self.T1_input_list = T1_input_list
|
| 276 |
+
self.T2_input_list = T2_input_list
|
| 277 |
+
self.T1_images = np.zeros([len(input_list1),len(T1_input_list), 18, 240, 240])
|
| 278 |
+
self.T2_images = np.zeros([len(input_list2),len(T2_input_list), 18, 240, 240])
|
| 279 |
+
self.T2_masked_images = np.zeros([len(input_list2),len(T2_input_list), 18, 240, 240])
|
| 280 |
+
self.T2_mean = np.zeros([len(input_list2),len(T2_input_list), 18])
|
| 281 |
+
self.T2_std = np.zeros([len(input_list2),len(T2_input_list), 18])
|
| 282 |
+
|
| 283 |
+
print('TrainSet loading...')
|
| 284 |
+
for i in range(len(self.T1_input_list)):
|
| 285 |
+
for j, path in enumerate(T1_input_list[i]):
|
| 286 |
+
_, self.T1_images[j][i], _, _ = read_h5(path, self._MRIDOWN, use_kspace=use_kspace)
|
| 287 |
+
|
| 288 |
+
self.T1_labels = np.mean(self.T1_images, axis=1)
|
| 289 |
+
|
| 290 |
+
for i in range(len(self.T2_input_list)):
|
| 291 |
+
for j, path in enumerate(T2_input_list[i]):
|
| 292 |
+
self.T2_masked_images[j][i], self.T2_images[j][i], self.T2_mean[j][i], self.T2_std[j][i] = read_h5(path, self._MRIDOWN, use_kspace=use_kspace)
|
| 293 |
+
# lq_image_list, target_list
|
| 294 |
+
|
| 295 |
+
|
| 296 |
+
# self.T2_labels = np.mean(self.T2_images, axis=1) # TODO
|
| 297 |
+
self.T2_labels = np.mean(self.T2_images, axis=1)
|
| 298 |
+
self.T2_images = self.T2_masked_images
|
| 299 |
+
|
| 300 |
+
print(f'Finish loading with time = {time.time() - start_time}s')
|
| 301 |
+
|
| 302 |
+
# print("T1 image original shape:", self.T1_images.shape) # T1 image original shape: (128, 3, 18, 256, 256)
|
| 303 |
+
# print("T2 image original shape:", self.T2_images.shape)
|
| 304 |
+
|
| 305 |
+
N, _, S, H, W = self.T1_images.shape
|
| 306 |
+
self.fname_slices = []
|
| 307 |
+
|
| 308 |
+
for i in range(N):
|
| 309 |
+
for j in range(S):
|
| 310 |
+
self.fname_slices.append((i, j))
|
| 311 |
+
|
| 312 |
+
|
| 313 |
+
self.T1_images = self.T1_images.transpose(0,2,1,3,4).reshape(-1,len(T1_input_list),240,240)
|
| 314 |
+
self.T2_images = self.T2_images.transpose(0,2,1,3,4).reshape(-1,len(T2_input_list),240,240)
|
| 315 |
+
self.T1_labels = self.T1_labels.reshape(-1,1,240,240)
|
| 316 |
+
self.T2_labels = self.T2_labels.reshape(-1,1,240,240)
|
| 317 |
+
self.T2_mean = self.T2_mean.reshape(-1,3)
|
| 318 |
+
self.T2_std = self.T2_std.reshape(-1,3)
|
| 319 |
+
|
| 320 |
+
print("Train data shape:", self.T1_images.shape)
|
| 321 |
+
# breakpoint()
|
| 322 |
+
|
| 323 |
+
|
| 324 |
+
def __len__(self):
|
| 325 |
+
return len(self.T1_images)
|
| 326 |
+
|
| 327 |
+
def __getitem__(self, idx):
|
| 328 |
+
T1_images = self.T1_images[idx]
|
| 329 |
+
T2_images = self.T2_images[idx]
|
| 330 |
+
T1_labels = self.T1_labels[idx]
|
| 331 |
+
T2_labels = self.T2_labels[idx]
|
| 332 |
+
|
| 333 |
+
fname = self.fname_slices[idx][0]
|
| 334 |
+
slice = self.fname_slices[idx][1]
|
| 335 |
+
|
| 336 |
+
|
| 337 |
+
choices = np.random.choice([i for i in range(len(self.T1_input_list))],1) ## 每次都是从三个repetition中选择一个作为input.
|
| 338 |
+
# choices = np.random.choice([0],1) ## 用第一个repetition作为输入图像进行测试
|
| 339 |
+
T1_images = T1_images[choices]
|
| 340 |
+
T2_images = T2_images[choices]
|
| 341 |
+
t2_mean = self.T2_mean[idx][choices]
|
| 342 |
+
t2_std = self.T2_std[idx][choices]
|
| 343 |
+
|
| 344 |
+
# (1, 240, 240)
|
| 345 |
+
sample = self.albu_transforms(image=T1_images[0], image2=T2_images[0],
|
| 346 |
+
image3=T1_labels[0], image4=T2_labels[0])
|
| 347 |
+
|
| 348 |
+
# breakpoint()
|
| 349 |
+
t1_in = np.expand_dims(sample['image'], 0)
|
| 350 |
+
t2_in = np.expand_dims(sample['image2'], 0)
|
| 351 |
+
t1 = np.expand_dims(sample['image3'], 0)
|
| 352 |
+
t2 = np.expand_dims(sample['image4'], 0)
|
| 353 |
+
|
| 354 |
+
# sample_stats = {"t2_mean": t2_mean, "t2_std": t2_std}
|
| 355 |
+
|
| 356 |
+
# print("t1_in shape:", t1_in.shape, "t1 shape:", t1.shape, "t2_in shape:", t2_in.shape, "t2 shape:", t2.shape)
|
| 357 |
+
|
| 358 |
+
# breakpoint()
|
| 359 |
+
sample = {
|
| 360 |
+
'fname': fname,
|
| 361 |
+
'slice': slice,
|
| 362 |
+
|
| 363 |
+
't1_in': t1_in.astype(np.float32),
|
| 364 |
+
't1': t1.astype(np.float32),
|
| 365 |
+
"t2_mean": t2_mean, "t2_std": t2_std,
|
| 366 |
+
|
| 367 |
+
't2_in': t2_in.astype(np.float32),
|
| 368 |
+
't2': t2.astype(np.float32)}
|
| 369 |
+
|
| 370 |
+
return sample #, sample_stats
|
| 371 |
+
|
| 372 |
+
|
| 373 |
+
|
| 374 |
+
class M4Raw_TestSet(Dataset):
|
| 375 |
+
def __init__(self, args, use_kspace=False, DEBUG=False):
|
| 376 |
+
mask_func = create_mask_for_mask_type(
|
| 377 |
+
args.MASKTYPE, args.CENTER_FRACTIONS, args.ACCELERATIONS,
|
| 378 |
+
)
|
| 379 |
+
self.use_kspace = use_kspace
|
| 380 |
+
self._MRIDOWN = args.MRIDOWN
|
| 381 |
+
|
| 382 |
+
self.input_normalize = args.input_normalize
|
| 383 |
+
input_list1 = sorted(glob(os.path.join(args.root_path, 'multicoil_val' + '*_T102.h5')))
|
| 384 |
+
input_list2 = [path.replace('_T102.h5','_T101.h5') for path in input_list1]
|
| 385 |
+
input_list3 = [path.replace('_T102.h5','_T103.h5') for path in input_list1]
|
| 386 |
+
if DEBUG:
|
| 387 |
+
input_list1 = input_list1[:2]
|
| 388 |
+
input_list2 = input_list2[:2]
|
| 389 |
+
input_list3 = input_list3[:2]
|
| 390 |
+
|
| 391 |
+
|
| 392 |
+
T1_input_list = [input_list1, input_list2, input_list3]
|
| 393 |
+
|
| 394 |
+
input_list1 = sorted(glob(os.path.join(args.root_path, 'multicoil_val', '*_T202.h5')))
|
| 395 |
+
input_list2 = [path.replace('_T202.h5','_T201.h5') for path in input_list1]
|
| 396 |
+
input_list3 = [path.replace('_T202.h5','_T203.h5') for path in input_list1]
|
| 397 |
+
if DEBUG:
|
| 398 |
+
input_list1 = input_list1[:2]
|
| 399 |
+
input_list2 = input_list2[:2]
|
| 400 |
+
input_list3 = input_list3[:2]
|
| 401 |
+
|
| 402 |
+
|
| 403 |
+
T2_input_list = [input_list1,input_list2,input_list3]
|
| 404 |
+
|
| 405 |
+
self.T1_input_list = T1_input_list
|
| 406 |
+
self.T2_input_list = T2_input_list
|
| 407 |
+
self.T1_images = np.zeros([len(input_list1),len(T1_input_list), 18, 240, 240])
|
| 408 |
+
self.T2_images = np.zeros([len(input_list2),len(T2_input_list), 18, 240, 240])
|
| 409 |
+
self.T2_masked_images = np.zeros([len(input_list2),len(T2_input_list), 18, 240, 240])
|
| 410 |
+
self.T2_mean = np.zeros([len(input_list2),len(T2_input_list), 18])
|
| 411 |
+
self.T2_std = np.zeros([len(input_list2),len(T2_input_list), 18])
|
| 412 |
+
print('TestSet loading...')
|
| 413 |
+
|
| 414 |
+
for i in range(len(self.T1_input_list)):
|
| 415 |
+
for j, path in enumerate(T1_input_list[i]):
|
| 416 |
+
_, self.T1_images[j][i], _, _ = read_h5(path, self._MRIDOWN, use_kspace=use_kspace)
|
| 417 |
+
|
| 418 |
+
self.T1_labels = np.mean(self.T1_images, axis=1)
|
| 419 |
+
|
| 420 |
+
for i in range(len(self.T2_input_list)):
|
| 421 |
+
for j, path in enumerate(T2_input_list[i]):
|
| 422 |
+
self.T2_masked_images[j][i], self.T2_images[j][i], self.T2_mean[j][i], self.T2_std[j][i] = read_h5(path,
|
| 423 |
+
self._MRIDOWN,
|
| 424 |
+
use_kspace=use_kspace)
|
| 425 |
+
# lq_image_list, target_list
|
| 426 |
+
|
| 427 |
+
# self.T2_labels = np.mean(self.T2_images, axis=1) # TODO
|
| 428 |
+
self.T2_labels = np.mean(self.T2_images, axis=1)
|
| 429 |
+
self.T2_images = self.T2_masked_images
|
| 430 |
+
|
| 431 |
+
print('Finish loading')
|
| 432 |
+
N, _, S, H, W = self.T1_images.shape
|
| 433 |
+
self.fname_slices = []
|
| 434 |
+
|
| 435 |
+
for i in range(N):
|
| 436 |
+
for j in range(S):
|
| 437 |
+
self.fname_slices.append((i, j))
|
| 438 |
+
|
| 439 |
+
|
| 440 |
+
self.T1_images = self.T1_images.transpose(0,2,1,3,4).reshape(-1,len(T1_input_list),240,240)
|
| 441 |
+
self.T2_images = self.T2_images.transpose(0,2,1,3,4).reshape(-1,len(T2_input_list),240,240)
|
| 442 |
+
self.T1_labels = self.T1_labels.reshape(-1,1,240,240)
|
| 443 |
+
self.T2_labels = self.T2_labels.reshape(-1,1,240,240)
|
| 444 |
+
self.T2_mean = self.T2_mean.reshape(-1,3)
|
| 445 |
+
self.T2_std = self.T2_std.reshape(-1,3)
|
| 446 |
+
print("Train data shape:", self.T1_images.shape)
|
| 447 |
+
|
| 448 |
+
|
| 449 |
+
def __len__(self):
|
| 450 |
+
return len(self.T1_images)
|
| 451 |
+
|
| 452 |
+
def __getitem__(self, idx):
|
| 453 |
+
|
| 454 |
+
T1_images = self.T1_images[idx]
|
| 455 |
+
T2_images = self.T2_images[idx]
|
| 456 |
+
T1_labels = self.T1_labels[idx]
|
| 457 |
+
T2_labels = self.T2_labels[idx]
|
| 458 |
+
|
| 459 |
+
|
| 460 |
+
fname = self.fname_slices[idx][0]
|
| 461 |
+
slice = self.fname_slices[idx][1]
|
| 462 |
+
|
| 463 |
+
|
| 464 |
+
|
| 465 |
+
# choices = np.random.choice([i for i in range(len(self.T1_input_list))],1) ## 每次都是从三个repetition中选择一个作为input.
|
| 466 |
+
choices = np.random.choice([0],1) ## 用第一个repetition作为输入图像进行测试
|
| 467 |
+
|
| 468 |
+
|
| 469 |
+
T1_images = T1_images[choices]
|
| 470 |
+
T2_images = T2_images[choices]
|
| 471 |
+
|
| 472 |
+
t2_mean = self.T2_mean[idx][choices]
|
| 473 |
+
t2_std = self.T2_std[idx][choices]
|
| 474 |
+
|
| 475 |
+
|
| 476 |
+
# breakpoint()
|
| 477 |
+
# import imageio as io
|
| 478 |
+
# io.imsave('T1_images.png', (T1_images[0]*255).astype(np.uint8))
|
| 479 |
+
# io.imsave('T1_labels.png', (T1_labels[0]*255).astype(np.uint8))
|
| 480 |
+
# io.imsave('T2_images.png', (T2_images[0]*255).astype(np.uint8))
|
| 481 |
+
# io.imsave('T2_labels.png', (T2_labels[0]*255).astype(np.uint8))
|
| 482 |
+
# breakpoint()
|
| 483 |
+
|
| 484 |
+
t1_in = T1_images
|
| 485 |
+
t1 = T1_labels
|
| 486 |
+
t2_in = T2_images
|
| 487 |
+
t2 = T2_labels
|
| 488 |
+
|
| 489 |
+
# sample_stats = {"t2_mean": t2_mean, "t2_std": t2_std}
|
| 490 |
+
|
| 491 |
+
# print("Test t1_in shape:", t1_in.shape, "t1 shape:", t1.shape, "t2_in shape:", t2_in.shape, "t2 shape:", t2.shape)
|
| 492 |
+
|
| 493 |
+
# breakpoint()
|
| 494 |
+
sample = {
|
| 495 |
+
'fname': fname,
|
| 496 |
+
'slice': slice,
|
| 497 |
+
|
| 498 |
+
't1_in': t1_in.astype(np.float32),
|
| 499 |
+
't1': t1.astype(np.float32),
|
| 500 |
+
"t2_mean": t2_mean, "t2_std": t2_std,
|
| 501 |
+
|
| 502 |
+
't2_in': t2_in.astype(np.float32),
|
| 503 |
+
't2': t2.astype(np.float32)}
|
| 504 |
+
|
| 505 |
+
return sample #, sample_stats
|
| 506 |
+
|
| 507 |
+
|
| 508 |
+
|
| 509 |
+
def compute_metrics(image, labels):
|
| 510 |
+
MSE = mean_squared_error(labels, image)/np.var(labels)
|
| 511 |
+
PSNR = peak_signal_noise_ratio(labels, image)
|
| 512 |
+
SSIM = structural_similarity(labels, image)
|
| 513 |
+
|
| 514 |
+
# print("metrics:", MSE, PSNR, SSIM)
|
| 515 |
+
|
| 516 |
+
return MSE, PSNR, SSIM
|
| 517 |
+
|
| 518 |
+
def complex_abs_eval(data):
|
| 519 |
+
return (data[0:1, :, :] ** 2 + data[1:2, :, :] ** 2).sqrt()
|
| 520 |
+
|
| 521 |
+
|
| 522 |
+
if __name__ == "__main__":
|
| 523 |
+
parser = argparse.ArgumentParser()
|
| 524 |
+
parser.add_argument('--root_path', type=str, default='/data/qic99/MRI_recon/')
|
| 525 |
+
parser.add_argument('--MRIDOWN', type=str, default='4X', help='MRI down-sampling rate')
|
| 526 |
+
parser.add_argument('--kspace_refine', type=str, default='False', help='whether use the image reconstructed from kspace network.')
|
| 527 |
+
args = parser.parse_args()
|
| 528 |
+
|
| 529 |
+
db_test = M4Raw_TestSet(args)
|
| 530 |
+
testloader = DataLoader(db_test, batch_size=4, shuffle=False, num_workers=4, pin_memory=True)
|
| 531 |
+
|
| 532 |
+
t1_MSE_all, t1_PSNR_all, t1_SSIM_all = [], [], []
|
| 533 |
+
t2_MSE_all, t2_PSNR_all, t2_SSIM_all = [], [], []
|
| 534 |
+
|
| 535 |
+
save_dir = "./visualize_images/"
|
| 536 |
+
|
| 537 |
+
for i_batch, sampled_batch in enumerate(testloader):
|
| 538 |
+
# t1_in, t2_in = sampled_batch['t1_in'].cuda(), sampled_batch['t2_in'].cuda()
|
| 539 |
+
# t1, t2 = sampled_batch['t1_labels'].cuda(), sampled_batch['t2_labels'].cuda()
|
| 540 |
+
|
| 541 |
+
t1_in, t2_in = sampled_batch['ref_image_sub'].cuda(), sampled_batch['tag_image_sub'].cuda()
|
| 542 |
+
t1, t2 = sampled_batch['ref_image_full'].cuda(), sampled_batch['tag_image_full'].cuda()
|
| 543 |
+
|
| 544 |
+
# breakpoint()
|
| 545 |
+
for j in range(t1_in.shape[0]):
|
| 546 |
+
# t1_in_img = (np.clip(complex_abs_eval(t1_in[j])[0].cpu().numpy(), 0, 1) * 255).astype(np.uint8)
|
| 547 |
+
# t1_img = (np.clip(complex_abs_eval(t1[j])[0].cpu().numpy(), 0, 1) * 255).astype(np.uint8)
|
| 548 |
+
# t2_in_img = (np.clip(complex_abs_eval(t2_in[j])[0].cpu().numpy(), 0, 1) * 255).astype(np.uint8)
|
| 549 |
+
# t2_img = (np.clip(complex_abs_eval(t2[j])[0].cpu().numpy(), 0, 1) * 255).astype(np.uint8)
|
| 550 |
+
# breakpoint()
|
| 551 |
+
t1_in_img = (np.clip(t1_in[j][0].cpu().numpy(), 0, 1) * 255).astype(np.uint8)
|
| 552 |
+
t1_img = (np.clip(t1[j][0].cpu().numpy(), 0, 1) * 255).astype(np.uint8)
|
| 553 |
+
t2_in_img = (np.clip(t2_in[j][0].cpu().numpy(), 0, 1) * 255).astype(np.uint8)
|
| 554 |
+
t2_img = (np.clip(t2[j][0].cpu().numpy(), 0, 1) * 255).astype(np.uint8)
|
| 555 |
+
|
| 556 |
+
# t1_in_img = (np.clip(t1_in[j][0].cpu().numpy(), 0, 1) * 255).astype(np.uint8)
|
| 557 |
+
# t1_img = (np.clip(t1[j][0].cpu().numpy(), 0, 1) * 255).astype(np.uint8)
|
| 558 |
+
# t2_in_img = (np.clip(t2_in[j][0].cpu().numpy(), 0, 1) * 255).astype(np.uint8)
|
| 559 |
+
# t2_img = (np.clip(t2[j][0].cpu().numpy(), 0, 1) * 255).astype(np.uint8)
|
| 560 |
+
|
| 561 |
+
|
| 562 |
+
# print(t1_in_img.shape, t1_img.shape)
|
| 563 |
+
t1_MSE, t1_PSNR, t1_SSIM = compute_metrics(t1_in_img, t1_img)
|
| 564 |
+
t2_MSE, t2_PSNR, t2_SSIM = compute_metrics(t2_in_img, t2_img)
|
| 565 |
+
|
| 566 |
+
|
| 567 |
+
t1_MSE_all.append(t1_MSE)
|
| 568 |
+
t1_PSNR_all.append(t1_PSNR)
|
| 569 |
+
t1_SSIM_all.append(t1_SSIM)
|
| 570 |
+
|
| 571 |
+
t2_MSE_all.append(t2_MSE)
|
| 572 |
+
t2_PSNR_all.append(t2_PSNR)
|
| 573 |
+
t2_SSIM_all.append(t2_SSIM)
|
| 574 |
+
|
| 575 |
+
|
| 576 |
+
# print("t1_PSNR:", t1_PSNR_all)
|
| 577 |
+
print("t1_PSNR:", round(np.array(t1_PSNR_all).mean(), 4))
|
| 578 |
+
print("t1_NMSE:", round(np.array(t1_MSE_all).mean(), 4))
|
| 579 |
+
print("t1_SSIM:", round(np.array(t1_SSIM_all).mean(), 4))
|
| 580 |
+
|
| 581 |
+
print("t2_PSNR:", round(np.array(t2_PSNR_all).mean(), 4))
|
| 582 |
+
print("t2_NMSE:", round(np.array(t2_MSE_all).mean(), 4))
|
| 583 |
+
print("t2_SSIM:", round(np.array(t2_SSIM_all).mean(), 4))
|
MRI_recon/code/Frequency-Diffusion/FSMNet/dataloaders/math.py
ADDED
|
@@ -0,0 +1,231 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Copyright (c) Facebook, Inc. and its affiliates.
|
| 3 |
+
|
| 4 |
+
This source code is licensed under the MIT license found in the
|
| 5 |
+
LICENSE file in the root directory of this source tree.
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
import torch
|
| 9 |
+
import numpy as np
|
| 10 |
+
|
| 11 |
+
def complex_mul(x, y):
|
| 12 |
+
"""
|
| 13 |
+
Complex multiplication.
|
| 14 |
+
|
| 15 |
+
This multiplies two complex tensors assuming that they are both stored as
|
| 16 |
+
real arrays with the last dimension being the complex dimension.
|
| 17 |
+
|
| 18 |
+
Args:
|
| 19 |
+
x (torch.Tensor): A PyTorch tensor with the last dimension of size 2.
|
| 20 |
+
y (torch.Tensor): A PyTorch tensor with the last dimension of size 2.
|
| 21 |
+
|
| 22 |
+
Returns:
|
| 23 |
+
torch.Tensor: A PyTorch tensor with the last dimension of size 2.
|
| 24 |
+
"""
|
| 25 |
+
assert x.shape[-1] == y.shape[-1] == 2
|
| 26 |
+
re = x[..., 0] * y[..., 0] - x[..., 1] * y[..., 1]
|
| 27 |
+
im = x[..., 0] * y[..., 1] + x[..., 1] * y[..., 0]
|
| 28 |
+
|
| 29 |
+
return torch.stack((re, im), dim=-1)
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
def complex_conj(x):
|
| 33 |
+
"""
|
| 34 |
+
Complex conjugate.
|
| 35 |
+
|
| 36 |
+
This applies the complex conjugate assuming that the input array has the
|
| 37 |
+
last dimension as the complex dimension.
|
| 38 |
+
|
| 39 |
+
Args:
|
| 40 |
+
x (torch.Tensor): A PyTorch tensor with the last dimension of size 2.
|
| 41 |
+
y (torch.Tensor): A PyTorch tensor with the last dimension of size 2.
|
| 42 |
+
|
| 43 |
+
Returns:
|
| 44 |
+
torch.Tensor: A PyTorch tensor with the last dimension of size 2.
|
| 45 |
+
"""
|
| 46 |
+
assert x.shape[-1] == 2
|
| 47 |
+
|
| 48 |
+
return torch.stack((x[..., 0], -x[..., 1]), dim=-1)
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
def fft2c(data: torch.Tensor, norm: str = "ortho") -> torch.Tensor:
|
| 55 |
+
"""
|
| 56 |
+
Apply centered 2 dimensional Fast Fourier Transform.
|
| 57 |
+
|
| 58 |
+
Args:
|
| 59 |
+
data: Complex valued input data containing at least 3 dimensions:
|
| 60 |
+
dimensions -3 & -2 are spatial dimensions and dimension -1 has size
|
| 61 |
+
2. All other dimensions are assumed to be batch dimensions.
|
| 62 |
+
norm: Normalization mode. See ``torch.fft.fft``.
|
| 63 |
+
|
| 64 |
+
Returns:
|
| 65 |
+
The FFT of the input.
|
| 66 |
+
"""
|
| 67 |
+
if not data.shape[-1] == 2:
|
| 68 |
+
raise ValueError("Tensor does not have separate complex dim.")
|
| 69 |
+
|
| 70 |
+
data = ifftshift(data, dim=[-3, -2])
|
| 71 |
+
data = torch.view_as_real(
|
| 72 |
+
torch.fft.fftn( # type: ignore
|
| 73 |
+
torch.view_as_complex(data), dim=(-2, -1), norm=norm
|
| 74 |
+
)
|
| 75 |
+
)
|
| 76 |
+
data = fftshift(data, dim=[-3, -2])
|
| 77 |
+
|
| 78 |
+
return data
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
def ifft2c(data: torch.Tensor, norm: str = "ortho") -> torch.Tensor:
|
| 82 |
+
"""
|
| 83 |
+
Apply centered 2-dimensional Inverse Fast Fourier Transform.
|
| 84 |
+
|
| 85 |
+
Args:
|
| 86 |
+
data: Complex valued input data containing at least 3 dimensions:
|
| 87 |
+
dimensions -3 & -2 are spatial dimensions and dimension -1 has size
|
| 88 |
+
2. All other dimensions are assumed to be batch dimensions.
|
| 89 |
+
norm: Normalization mode. See ``torch.fft.ifft``.
|
| 90 |
+
|
| 91 |
+
Returns:
|
| 92 |
+
The IFFT of the input.
|
| 93 |
+
"""
|
| 94 |
+
if not data.shape[-1] == 2:
|
| 95 |
+
raise ValueError("Tensor does not have separate complex dim.")
|
| 96 |
+
|
| 97 |
+
data = ifftshift(data, dim=[-3, -2])
|
| 98 |
+
data = torch.view_as_real(
|
| 99 |
+
torch.fft.ifftn( # type: ignore
|
| 100 |
+
torch.view_as_complex(data), dim=(-2, -1), norm=norm
|
| 101 |
+
)
|
| 102 |
+
)
|
| 103 |
+
data = fftshift(data, dim=[-3, -2])
|
| 104 |
+
|
| 105 |
+
return data
|
| 106 |
+
|
| 107 |
+
|
| 108 |
+
|
| 109 |
+
|
| 110 |
+
|
| 111 |
+
def complex_abs(data):
|
| 112 |
+
"""
|
| 113 |
+
Compute the absolute value of a complex valued input tensor.
|
| 114 |
+
|
| 115 |
+
Args:
|
| 116 |
+
data (torch.Tensor): A complex valued tensor, where the size of the
|
| 117 |
+
final dimension should be 2.
|
| 118 |
+
|
| 119 |
+
Returns:
|
| 120 |
+
torch.Tensor: Absolute value of data.
|
| 121 |
+
"""
|
| 122 |
+
assert data.size(-1) == 2
|
| 123 |
+
|
| 124 |
+
return (data ** 2).sum(dim=-1).sqrt()
|
| 125 |
+
|
| 126 |
+
|
| 127 |
+
|
| 128 |
+
def complex_abs_numpy(data):
|
| 129 |
+
assert data.shape[-1] == 2
|
| 130 |
+
|
| 131 |
+
return np.sqrt(np.sum(data ** 2, axis=-1))
|
| 132 |
+
|
| 133 |
+
|
| 134 |
+
def complex_abs_sq(data):#multi coil
|
| 135 |
+
"""
|
| 136 |
+
Compute the squared absolute value of a complex tensor.
|
| 137 |
+
|
| 138 |
+
Args:
|
| 139 |
+
data (torch.Tensor): A complex valued tensor, where the size of the
|
| 140 |
+
final dimension should be 2.
|
| 141 |
+
|
| 142 |
+
Returns:
|
| 143 |
+
torch.Tensor: Squared absolute value of data.
|
| 144 |
+
"""
|
| 145 |
+
assert data.size(-1) == 2
|
| 146 |
+
return (data ** 2).sum(dim=-1)
|
| 147 |
+
|
| 148 |
+
|
| 149 |
+
# Helper functions
|
| 150 |
+
|
| 151 |
+
|
| 152 |
+
def roll(x, shift, dim):
|
| 153 |
+
"""
|
| 154 |
+
Similar to np.roll but applies to PyTorch Tensors.
|
| 155 |
+
|
| 156 |
+
Args:
|
| 157 |
+
x (torch.Tensor): A PyTorch tensor.
|
| 158 |
+
shift (int): Amount to roll.
|
| 159 |
+
dim (int): Which dimension to roll.
|
| 160 |
+
|
| 161 |
+
Returns:
|
| 162 |
+
torch.Tensor: Rolled version of x.
|
| 163 |
+
"""
|
| 164 |
+
if isinstance(shift, (tuple, list)):
|
| 165 |
+
assert len(shift) == len(dim)
|
| 166 |
+
for s, d in zip(shift, dim):
|
| 167 |
+
x = roll(x, s, d)
|
| 168 |
+
return x
|
| 169 |
+
shift = shift % x.size(dim)
|
| 170 |
+
if shift == 0:
|
| 171 |
+
return x
|
| 172 |
+
left = x.narrow(dim, 0, x.size(dim) - shift)
|
| 173 |
+
right = x.narrow(dim, x.size(dim) - shift, shift)
|
| 174 |
+
return torch.cat((right, left), dim=dim)
|
| 175 |
+
|
| 176 |
+
|
| 177 |
+
def fftshift(x, dim=None):
|
| 178 |
+
"""
|
| 179 |
+
Similar to np.fft.fftshift but applies to PyTorch Tensors
|
| 180 |
+
|
| 181 |
+
Args:
|
| 182 |
+
x (torch.Tensor): A PyTorch tensor.
|
| 183 |
+
dim (int): Which dimension to fftshift.
|
| 184 |
+
|
| 185 |
+
Returns:
|
| 186 |
+
torch.Tensor: fftshifted version of x.
|
| 187 |
+
"""
|
| 188 |
+
if dim is None:
|
| 189 |
+
dim = tuple(range(x.dim()))
|
| 190 |
+
shift = [dim // 2 for dim in x.shape]
|
| 191 |
+
elif isinstance(dim, int):
|
| 192 |
+
shift = x.shape[dim] // 2
|
| 193 |
+
else:
|
| 194 |
+
shift = [x.shape[i] // 2 for i in dim]
|
| 195 |
+
|
| 196 |
+
return roll(x, shift, dim)
|
| 197 |
+
|
| 198 |
+
|
| 199 |
+
def ifftshift(x, dim=None):
|
| 200 |
+
"""
|
| 201 |
+
Similar to np.fft.ifftshift but applies to PyTorch Tensors
|
| 202 |
+
|
| 203 |
+
Args:
|
| 204 |
+
x (torch.Tensor): A PyTorch tensor.
|
| 205 |
+
dim (int): Which dimension to ifftshift.
|
| 206 |
+
|
| 207 |
+
Returns:
|
| 208 |
+
torch.Tensor: ifftshifted version of x.
|
| 209 |
+
"""
|
| 210 |
+
if dim is None:
|
| 211 |
+
dim = tuple(range(x.dim()))
|
| 212 |
+
shift = [(dim + 1) // 2 for dim in x.shape]
|
| 213 |
+
elif isinstance(dim, int):
|
| 214 |
+
shift = (x.shape[dim] + 1) // 2
|
| 215 |
+
else:
|
| 216 |
+
shift = [(x.shape[i] + 1) // 2 for i in dim]
|
| 217 |
+
|
| 218 |
+
return roll(x, shift, dim)
|
| 219 |
+
|
| 220 |
+
|
| 221 |
+
def tensor_to_complex_np(data):
|
| 222 |
+
"""
|
| 223 |
+
Converts a complex torch tensor to numpy array.
|
| 224 |
+
Args:
|
| 225 |
+
data (torch.Tensor): Input data to be converted to numpy.
|
| 226 |
+
|
| 227 |
+
Returns:
|
| 228 |
+
np.array: Complex numpy version of data
|
| 229 |
+
"""
|
| 230 |
+
data = data.numpy()
|
| 231 |
+
return data[..., 0] + 1j * data[..., 1]
|
MRI_recon/code/Frequency-Diffusion/FSMNet/dataloaders/subsample.py
ADDED
|
@@ -0,0 +1,195 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Copyright (c) Facebook, Inc. and its affiliates.
|
| 3 |
+
|
| 4 |
+
This source code is licensed under the MIT license found in the
|
| 5 |
+
LICENSE file in the root directory of this source tree.
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
import contextlib
|
| 9 |
+
|
| 10 |
+
import numpy as np
|
| 11 |
+
import torch
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
@contextlib.contextmanager
|
| 15 |
+
def temp_seed(rng, seed):
|
| 16 |
+
state = rng.get_state()
|
| 17 |
+
rng.seed(seed)
|
| 18 |
+
try:
|
| 19 |
+
yield
|
| 20 |
+
finally:
|
| 21 |
+
rng.set_state(state)
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
def create_mask_for_mask_type(mask_type_str, center_fractions, accelerations):
|
| 25 |
+
if mask_type_str == "random":
|
| 26 |
+
return RandomMaskFunc(center_fractions, accelerations)
|
| 27 |
+
elif mask_type_str == "equispaced":
|
| 28 |
+
return EquispacedMaskFunc(center_fractions, accelerations)
|
| 29 |
+
else:
|
| 30 |
+
raise Exception(f"{mask_type_str} not supported")
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
class MaskFunc(object):
|
| 34 |
+
"""
|
| 35 |
+
An object for GRAPPA-style sampling masks.
|
| 36 |
+
|
| 37 |
+
This crates a sampling mask that densely samples the center while
|
| 38 |
+
subsampling outer k-space regions based on the undersampling factor.
|
| 39 |
+
"""
|
| 40 |
+
|
| 41 |
+
def __init__(self, center_fractions, accelerations):
|
| 42 |
+
"""
|
| 43 |
+
Args:
|
| 44 |
+
center_fractions (List[float]): Fraction of low-frequency columns to be
|
| 45 |
+
retained. If multiple values are provided, then one of these
|
| 46 |
+
numbers is chosen uniformly each time.
|
| 47 |
+
accelerations (List[int]): Amount of under-sampling. This should have
|
| 48 |
+
the same length as center_fractions. If multiple values are
|
| 49 |
+
provided, then one of these is chosen uniformly each time.
|
| 50 |
+
"""
|
| 51 |
+
if len(center_fractions) != len(accelerations):
|
| 52 |
+
raise ValueError(
|
| 53 |
+
"Number of center fractions should match number of accelerations"
|
| 54 |
+
)
|
| 55 |
+
|
| 56 |
+
self.center_fractions = center_fractions
|
| 57 |
+
self.accelerations = accelerations
|
| 58 |
+
self.rng = np.random
|
| 59 |
+
|
| 60 |
+
def choose_acceleration(self):
|
| 61 |
+
"""Choose acceleration based on class parameters."""
|
| 62 |
+
choice = self.rng.randint(0, len(self.accelerations))
|
| 63 |
+
center_fraction = self.center_fractions[choice]
|
| 64 |
+
acceleration = self.accelerations[choice]
|
| 65 |
+
|
| 66 |
+
return center_fraction, acceleration
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
class RandomMaskFunc(MaskFunc):
|
| 70 |
+
"""
|
| 71 |
+
RandomMaskFunc creates a sub-sampling mask of a given shape.
|
| 72 |
+
|
| 73 |
+
The mask selects a subset of columns from the input k-space data. If the
|
| 74 |
+
k-space data has N columns, the mask picks out:
|
| 75 |
+
1. N_low_freqs = (N * center_fraction) columns in the center
|
| 76 |
+
corresponding to low-frequencies.
|
| 77 |
+
2. The other columns are selected uniformly at random with a
|
| 78 |
+
probability equal to: prob = (N / acceleration - N_low_freqs) /
|
| 79 |
+
(N - N_low_freqs). This ensures that the expected number of columns
|
| 80 |
+
selected is equal to (N / acceleration).
|
| 81 |
+
|
| 82 |
+
It is possible to use multiple center_fractions and accelerations, in which
|
| 83 |
+
case one possible (center_fraction, acceleration) is chosen uniformly at
|
| 84 |
+
random each time the RandomMaskFunc object is called.
|
| 85 |
+
|
| 86 |
+
For example, if accelerations = [4, 8] and center_fractions = [0.08, 0.04],
|
| 87 |
+
then there is a 50% probability that 4-fold acceleration with 8% center
|
| 88 |
+
fraction is selected and a 50% probability that 8-fold acceleration with 4%
|
| 89 |
+
center fraction is selected.
|
| 90 |
+
"""
|
| 91 |
+
|
| 92 |
+
def __call__(self, shape, seed=None):
|
| 93 |
+
"""
|
| 94 |
+
Create the mask.
|
| 95 |
+
|
| 96 |
+
Args:
|
| 97 |
+
shape (iterable[int]): The shape of the mask to be created. The
|
| 98 |
+
shape should have at least 3 dimensions. Samples are drawn
|
| 99 |
+
along the second last dimension.
|
| 100 |
+
seed (int, optional): Seed for the random number generator. Setting
|
| 101 |
+
the seed ensures the same mask is generated each time for the
|
| 102 |
+
same shape. The random state is reset afterwards.
|
| 103 |
+
|
| 104 |
+
Returns:
|
| 105 |
+
torch.Tensor: A mask of the specified shape.
|
| 106 |
+
"""
|
| 107 |
+
if len(shape) < 3:
|
| 108 |
+
raise ValueError("Shape should have 3 or more dimensions")
|
| 109 |
+
|
| 110 |
+
with temp_seed(self.rng, seed):
|
| 111 |
+
num_cols = shape[-2]
|
| 112 |
+
center_fraction, acceleration = self.choose_acceleration()
|
| 113 |
+
|
| 114 |
+
# create the mask
|
| 115 |
+
num_low_freqs = int(round(num_cols * center_fraction))
|
| 116 |
+
prob = (num_cols / acceleration - num_low_freqs) / (
|
| 117 |
+
num_cols - num_low_freqs
|
| 118 |
+
)
|
| 119 |
+
mask = self.rng.uniform(size=num_cols) < prob
|
| 120 |
+
pad = (num_cols - num_low_freqs + 1) // 2
|
| 121 |
+
mask[pad : pad + num_low_freqs] = True
|
| 122 |
+
|
| 123 |
+
# reshape the mask
|
| 124 |
+
mask_shape = [1 for _ in shape]
|
| 125 |
+
mask_shape[-2] = num_cols
|
| 126 |
+
mask = torch.from_numpy(mask.reshape(*mask_shape).astype(np.float32))
|
| 127 |
+
|
| 128 |
+
return mask
|
| 129 |
+
|
| 130 |
+
|
| 131 |
+
class EquispacedMaskFunc(MaskFunc):
|
| 132 |
+
"""
|
| 133 |
+
EquispacedMaskFunc creates a sub-sampling mask of a given shape.
|
| 134 |
+
|
| 135 |
+
The mask selects a subset of columns from the input k-space data. If the
|
| 136 |
+
k-space data has N columns, the mask picks out:
|
| 137 |
+
1. N_low_freqs = (N * center_fraction) columns in the center
|
| 138 |
+
corresponding tovlow-frequencies.
|
| 139 |
+
2. The other columns are selected with equal spacing at a proportion
|
| 140 |
+
that reaches the desired acceleration rate taking into consideration
|
| 141 |
+
the number of low frequencies. This ensures that the expected number
|
| 142 |
+
of columns selected is equal to (N / acceleration)
|
| 143 |
+
|
| 144 |
+
It is possible to use multiple center_fractions and accelerations, in which
|
| 145 |
+
case one possible (center_fraction, acceleration) is chosen uniformly at
|
| 146 |
+
random each time the EquispacedMaskFunc object is called.
|
| 147 |
+
|
| 148 |
+
Note that this function may not give equispaced samples (documented in
|
| 149 |
+
https://github.com/facebookresearch/fastMRI/issues/54), which will require
|
| 150 |
+
modifications to standard GRAPPA approaches. Nonetheless, this aspect of
|
| 151 |
+
the function has been preserved to match the public multicoil data.
|
| 152 |
+
"""
|
| 153 |
+
|
| 154 |
+
def __call__(self, shape, seed):
|
| 155 |
+
"""
|
| 156 |
+
Args:
|
| 157 |
+
shape (iterable[int]): The shape of the mask to be created. The
|
| 158 |
+
shape should have at least 3 dimensions. Samples are drawn
|
| 159 |
+
along the second last dimension.
|
| 160 |
+
seed (int, optional): Seed for the random number generator. Setting
|
| 161 |
+
the seed ensures the same mask is generated each time for the
|
| 162 |
+
same shape. The random state is reset afterwards.
|
| 163 |
+
|
| 164 |
+
Returns:
|
| 165 |
+
torch.Tensor: A mask of the specified shape.
|
| 166 |
+
"""
|
| 167 |
+
if len(shape) < 3:
|
| 168 |
+
raise ValueError("Shape should have 3 or more dimensions")
|
| 169 |
+
|
| 170 |
+
with temp_seed(self.rng, seed):
|
| 171 |
+
center_fraction, acceleration = self.choose_acceleration()
|
| 172 |
+
num_cols = shape[-2]
|
| 173 |
+
num_low_freqs = int(round(num_cols * center_fraction))
|
| 174 |
+
|
| 175 |
+
# create the mask
|
| 176 |
+
mask = np.zeros(num_cols, dtype=np.float32)
|
| 177 |
+
pad = (num_cols - num_low_freqs + 1) // 2
|
| 178 |
+
mask[pad : pad + num_low_freqs] = True
|
| 179 |
+
|
| 180 |
+
# determine acceleration rate by adjusting for the number of low frequencies
|
| 181 |
+
adjusted_accel = (acceleration * (num_low_freqs - num_cols)) / (
|
| 182 |
+
num_low_freqs * acceleration - num_cols
|
| 183 |
+
)
|
| 184 |
+
offset = self.rng.randint(0, round(adjusted_accel))
|
| 185 |
+
|
| 186 |
+
accel_samples = np.arange(offset, num_cols - 1, adjusted_accel)
|
| 187 |
+
accel_samples = np.around(accel_samples).astype(np.uint)
|
| 188 |
+
mask[accel_samples] = True
|
| 189 |
+
|
| 190 |
+
# reshape the mask
|
| 191 |
+
mask_shape = [1 for _ in shape]
|
| 192 |
+
mask_shape[-2] = num_cols
|
| 193 |
+
mask = torch.from_numpy(mask.reshape(*mask_shape).astype(np.float32))
|
| 194 |
+
|
| 195 |
+
return mask
|
MRI_recon/code/Frequency-Diffusion/FSMNet/dataloaders/transforms.py
ADDED
|
@@ -0,0 +1,493 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Copyright (c) Facebook, Inc. and its affiliates.
|
| 3 |
+
|
| 4 |
+
This source code is licensed under the MIT license found in the
|
| 5 |
+
LICENSE file in the root directory of this source tree.
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
import numpy as np
|
| 9 |
+
import torch
|
| 10 |
+
|
| 11 |
+
from .math import ifft2c, fft2c, complex_abs
|
| 12 |
+
from .subsample import create_mask_for_mask_type, MaskFunc
|
| 13 |
+
import random
|
| 14 |
+
|
| 15 |
+
from typing import Dict, Optional, Sequence, Tuple, Union
|
| 16 |
+
from matplotlib import pyplot as plt
|
| 17 |
+
import os
|
| 18 |
+
|
| 19 |
+
def rss(data, dim=0):
|
| 20 |
+
"""
|
| 21 |
+
Compute the Root Sum of Squares (RSS).
|
| 22 |
+
|
| 23 |
+
RSS is computed assuming that dim is the coil dimension.
|
| 24 |
+
|
| 25 |
+
Args:
|
| 26 |
+
data (torch.Tensor): The input tensor
|
| 27 |
+
dim (int): The dimensions along which to apply the RSS transform
|
| 28 |
+
|
| 29 |
+
Returns:
|
| 30 |
+
torch.Tensor: The RSS value.
|
| 31 |
+
"""
|
| 32 |
+
return torch.sqrt((data ** 2).sum(dim))
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
def to_tensor(data):
|
| 36 |
+
"""
|
| 37 |
+
Convert numpy array to PyTorch tensor.
|
| 38 |
+
|
| 39 |
+
For complex arrays, the real and imaginary parts are stacked along the last
|
| 40 |
+
dimension.
|
| 41 |
+
|
| 42 |
+
Args:
|
| 43 |
+
data (np.array): Input numpy array.
|
| 44 |
+
|
| 45 |
+
Returns:
|
| 46 |
+
torch.Tensor: PyTorch version of data.
|
| 47 |
+
"""
|
| 48 |
+
if np.iscomplexobj(data):
|
| 49 |
+
data = np.stack((data.real, data.imag), axis=-1)
|
| 50 |
+
|
| 51 |
+
return torch.from_numpy(data)
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
def tensor_to_complex_np(data):
|
| 55 |
+
"""
|
| 56 |
+
Converts a complex torch tensor to numpy array.
|
| 57 |
+
|
| 58 |
+
Args:
|
| 59 |
+
data (torch.Tensor): Input data to be converted to numpy.
|
| 60 |
+
|
| 61 |
+
Returns:
|
| 62 |
+
np.array: Complex numpy version of data.
|
| 63 |
+
"""
|
| 64 |
+
data = data.numpy()
|
| 65 |
+
|
| 66 |
+
return data[..., 0] + 1j * data[..., 1]
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
def apply_mask(data, mask_func, seed=None, padding=None):
|
| 70 |
+
"""
|
| 71 |
+
Subsample given k-space by multiplying with a mask.
|
| 72 |
+
|
| 73 |
+
Args:
|
| 74 |
+
data (torch.Tensor): The input k-space data. This should have at least 3 dimensions, where
|
| 75 |
+
dimensions -3 and -2 are the spatial dimensions, and the final dimension has size
|
| 76 |
+
2 (for complex values).
|
| 77 |
+
mask_func (callable): A function that takes a shape (tuple of ints) and a random
|
| 78 |
+
number seed and returns a mask.
|
| 79 |
+
seed (int or 1-d array_like, optional): Seed for the random number generator.
|
| 80 |
+
|
| 81 |
+
Returns:
|
| 82 |
+
(tuple): tuple containing:
|
| 83 |
+
masked data (torch.Tensor): Subsampled k-space data
|
| 84 |
+
mask (torch.Tensor): The generated mask
|
| 85 |
+
"""
|
| 86 |
+
shape = np.array(data.shape)
|
| 87 |
+
shape[:-3] = 1
|
| 88 |
+
mask = mask_func(shape, seed)
|
| 89 |
+
if padding is not None:
|
| 90 |
+
mask[:, :, : padding[0]] = 0
|
| 91 |
+
mask[:, :, padding[1] :] = 0 # padding value inclusive on right of zeros
|
| 92 |
+
|
| 93 |
+
masked_data = data * mask + 0.0 # the + 0.0 removes the sign of the zeros
|
| 94 |
+
|
| 95 |
+
return masked_data, mask
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
def mask_center(x, mask_from, mask_to):
|
| 99 |
+
mask = torch.zeros_like(x)
|
| 100 |
+
mask[:, :, :, mask_from:mask_to] = x[:, :, :, mask_from:mask_to]
|
| 101 |
+
|
| 102 |
+
return mask
|
| 103 |
+
|
| 104 |
+
|
| 105 |
+
def center_crop(data, shape):
|
| 106 |
+
"""
|
| 107 |
+
Apply a center crop to the input real image or batch of real images.
|
| 108 |
+
|
| 109 |
+
Args:
|
| 110 |
+
data (torch.Tensor): The input tensor to be center cropped. It should
|
| 111 |
+
have at least 2 dimensions and the cropping is applied along the
|
| 112 |
+
last two dimensions.
|
| 113 |
+
shape (int, int): The output shape. The shape should be smaller than
|
| 114 |
+
the corresponding dimensions of data.
|
| 115 |
+
|
| 116 |
+
Returns:
|
| 117 |
+
torch.Tensor: The center cropped image.
|
| 118 |
+
"""
|
| 119 |
+
assert 0 < shape[0] <= data.shape[-2]
|
| 120 |
+
assert 0 < shape[1] <= data.shape[-1]
|
| 121 |
+
|
| 122 |
+
w_from = (data.shape[-2] - shape[0]) // 2
|
| 123 |
+
h_from = (data.shape[-1] - shape[1]) // 2
|
| 124 |
+
w_to = w_from + shape[0]
|
| 125 |
+
h_to = h_from + shape[1]
|
| 126 |
+
|
| 127 |
+
return data[..., w_from:w_to, h_from:h_to]
|
| 128 |
+
|
| 129 |
+
|
| 130 |
+
def complex_center_crop(data, shape):
|
| 131 |
+
"""
|
| 132 |
+
Apply a center crop to the input image or batch of complex images.
|
| 133 |
+
|
| 134 |
+
Args:
|
| 135 |
+
data (torch.Tensor): The complex input tensor to be center cropped. It
|
| 136 |
+
should have at least 3 dimensions and the cropping is applied along
|
| 137 |
+
dimensions -3 and -2 and the last dimensions should have a size of
|
| 138 |
+
2.
|
| 139 |
+
shape (int): The output shape. The shape should be smaller than
|
| 140 |
+
the corresponding dimensions of data.
|
| 141 |
+
|
| 142 |
+
Returns:
|
| 143 |
+
torch.Tensor: The center cropped image
|
| 144 |
+
"""
|
| 145 |
+
assert 0 < shape[0] <= data.shape[-3]
|
| 146 |
+
assert 0 < shape[1] <= data.shape[-2]
|
| 147 |
+
|
| 148 |
+
w_from = (data.shape[-3] - shape[0]) // 2 #80
|
| 149 |
+
h_from = (data.shape[-2] - shape[1]) // 2 #80
|
| 150 |
+
w_to = w_from + shape[0] #240
|
| 151 |
+
h_to = h_from + shape[1] #240
|
| 152 |
+
|
| 153 |
+
return data[..., w_from:w_to, h_from:h_to, :]
|
| 154 |
+
|
| 155 |
+
|
| 156 |
+
def center_crop_to_smallest(x, y):
|
| 157 |
+
"""
|
| 158 |
+
Apply a center crop on the larger image to the size of the smaller.
|
| 159 |
+
|
| 160 |
+
The minimum is taken over dim=-1 and dim=-2. If x is smaller than y at
|
| 161 |
+
dim=-1 and y is smaller than x at dim=-2, then the returned dimension will
|
| 162 |
+
be a mixture of the two.
|
| 163 |
+
|
| 164 |
+
Args:
|
| 165 |
+
x (torch.Tensor): The first image.
|
| 166 |
+
y (torch.Tensor): The second image
|
| 167 |
+
|
| 168 |
+
Returns:
|
| 169 |
+
tuple: tuple of tensors x and y, each cropped to the minimim size.
|
| 170 |
+
"""
|
| 171 |
+
smallest_width = min(x.shape[-1], y.shape[-1])
|
| 172 |
+
smallest_height = min(x.shape[-2], y.shape[-2])
|
| 173 |
+
x = center_crop(x, (smallest_height, smallest_width))
|
| 174 |
+
y = center_crop(y, (smallest_height, smallest_width))
|
| 175 |
+
|
| 176 |
+
return x, y
|
| 177 |
+
|
| 178 |
+
|
| 179 |
+
def normalize(data, mean, stddev, eps=0.0):
|
| 180 |
+
"""
|
| 181 |
+
Normalize the given tensor.
|
| 182 |
+
|
| 183 |
+
Applies the formula (data - mean) / (stddev + eps).
|
| 184 |
+
|
| 185 |
+
Args:
|
| 186 |
+
data (torch.Tensor): Input data to be normalized.
|
| 187 |
+
mean (float): Mean value.
|
| 188 |
+
stddev (float): Standard deviation.
|
| 189 |
+
eps (float, default=0.0): Added to stddev to prevent dividing by zero.
|
| 190 |
+
|
| 191 |
+
Returns:
|
| 192 |
+
torch.Tensor: Normalized tensor
|
| 193 |
+
"""
|
| 194 |
+
return (data - mean) / (stddev + eps)
|
| 195 |
+
|
| 196 |
+
|
| 197 |
+
def normalize_instance(data, eps=0.0):
|
| 198 |
+
"""
|
| 199 |
+
Normalize the given tensor with instance norm/
|
| 200 |
+
|
| 201 |
+
Applies the formula (data - mean) / (stddev + eps), where mean and stddev
|
| 202 |
+
are computed from the data itself.
|
| 203 |
+
|
| 204 |
+
Args:
|
| 205 |
+
data (torch.Tensor): Input data to be normalized
|
| 206 |
+
eps (float): Added to stddev to prevent dividing by zero
|
| 207 |
+
|
| 208 |
+
Returns:
|
| 209 |
+
torch.Tensor: Normalized tensor
|
| 210 |
+
"""
|
| 211 |
+
mean = data.mean()
|
| 212 |
+
std = data.std()
|
| 213 |
+
|
| 214 |
+
return normalize(data, mean, std, eps), mean, std
|
| 215 |
+
|
| 216 |
+
|
| 217 |
+
class DataTransform(object):
|
| 218 |
+
"""
|
| 219 |
+
Data Transformer for training U-Net models.
|
| 220 |
+
"""
|
| 221 |
+
|
| 222 |
+
def __init__(self, which_challenge):
|
| 223 |
+
"""
|
| 224 |
+
Args:
|
| 225 |
+
which_challenge (str): Either "singlecoil" or "multicoil" denoting
|
| 226 |
+
the dataset.
|
| 227 |
+
mask_func (fastmri.data.subsample.MaskFunc): A function that can
|
| 228 |
+
create a mask of appropriate shape.
|
| 229 |
+
use_seed (bool): If true, this class computes a pseudo random
|
| 230 |
+
number generator seed from the filename. This ensures that the
|
| 231 |
+
same mask is used for all the slices of a given volume every
|
| 232 |
+
time.
|
| 233 |
+
"""
|
| 234 |
+
if which_challenge not in ("singlecoil", "multicoil"):
|
| 235 |
+
raise ValueError(f'Challenge should either be "singlecoil" or "multicoil"')
|
| 236 |
+
|
| 237 |
+
self.which_challenge = which_challenge
|
| 238 |
+
|
| 239 |
+
def __call__(self, kspace, mask, target, attrs, fname, slice_num):
|
| 240 |
+
"""
|
| 241 |
+
Args:
|
| 242 |
+
kspace (numpy.array): Input k-space of shape (num_coils, rows,
|
| 243 |
+
cols, 2) for multi-coil data or (rows, cols, 2) for single coil
|
| 244 |
+
data.
|
| 245 |
+
mask (numpy.array): Mask from the test dataset.
|
| 246 |
+
target (numpy.array): Target image.
|
| 247 |
+
attrs (dict): Acquisition related information stored in the HDF5
|
| 248 |
+
object.
|
| 249 |
+
fname (str): File name.
|
| 250 |
+
slice_num (int): Serial number of the slice.
|
| 251 |
+
|
| 252 |
+
Returns:
|
| 253 |
+
(tuple): tuple containing:
|
| 254 |
+
image (torch.Tensor): Zero-filled input image.
|
| 255 |
+
target (torch.Tensor): Target image converted to a torch
|
| 256 |
+
Tensor.
|
| 257 |
+
mean (float): Mean value used for normalization.
|
| 258 |
+
std (float): Standard deviation value used for normalization.
|
| 259 |
+
fname (str): File name.
|
| 260 |
+
slice_num (int): Serial number of the slice.
|
| 261 |
+
"""
|
| 262 |
+
kspace = to_tensor(kspace)
|
| 263 |
+
|
| 264 |
+
# inverse Fourier transform to get zero filled solution
|
| 265 |
+
image = ifft2c(kspace)
|
| 266 |
+
|
| 267 |
+
# crop input to correct size
|
| 268 |
+
if target is not None:
|
| 269 |
+
crop_size = (target.shape[-2], target.shape[-1])
|
| 270 |
+
else:
|
| 271 |
+
crop_size = (attrs["recon_size"][0], attrs["recon_size"][1])
|
| 272 |
+
|
| 273 |
+
# check for sFLAIR 203
|
| 274 |
+
if image.shape[-2] < crop_size[1]:
|
| 275 |
+
crop_size = (image.shape[-2], image.shape[-2])
|
| 276 |
+
|
| 277 |
+
image = complex_center_crop(image, crop_size)
|
| 278 |
+
|
| 279 |
+
# getLR
|
| 280 |
+
imgfft = fft2c(image)
|
| 281 |
+
imgfft = complex_center_crop(imgfft, (160, 160))
|
| 282 |
+
LR_image = ifft2c(imgfft)
|
| 283 |
+
|
| 284 |
+
# absolute value
|
| 285 |
+
LR_image = complex_abs(LR_image)
|
| 286 |
+
|
| 287 |
+
# normalize input
|
| 288 |
+
LR_image, mean, std = normalize_instance(LR_image, eps=1e-11)
|
| 289 |
+
LR_image = LR_image.clamp(-6, 6)
|
| 290 |
+
|
| 291 |
+
# normalize target
|
| 292 |
+
if target is not None:
|
| 293 |
+
target = to_tensor(target)
|
| 294 |
+
target = center_crop(target, crop_size)
|
| 295 |
+
target = normalize(target, mean, std, eps=1e-11)
|
| 296 |
+
target = target.clamp(-6, 6)
|
| 297 |
+
else:
|
| 298 |
+
target = torch.Tensor([0])
|
| 299 |
+
|
| 300 |
+
return LR_image, target, mean, std, fname, slice_num
|
| 301 |
+
|
| 302 |
+
class DenoiseDataTransform(object):
|
| 303 |
+
def __init__(self, size, noise_rate):
|
| 304 |
+
super(DenoiseDataTransform, self).__init__()
|
| 305 |
+
self.size = (size, size)
|
| 306 |
+
self.noise_rate = noise_rate
|
| 307 |
+
def __call__(self, kspace, mask, target, attrs, fname, slice_num):
|
| 308 |
+
max_value = attrs["max"]
|
| 309 |
+
|
| 310 |
+
#target
|
| 311 |
+
target = to_tensor(target)
|
| 312 |
+
target = center_crop(target, self.size)
|
| 313 |
+
target, mean, std = normalize_instance(target, eps=1e-11)
|
| 314 |
+
target = target.clamp(-6, 6)
|
| 315 |
+
|
| 316 |
+
#image
|
| 317 |
+
kspace = to_tensor(kspace)
|
| 318 |
+
complex_image = ifft2c(kspace) #complex_image
|
| 319 |
+
image = complex_center_crop(complex_image, self.size)
|
| 320 |
+
noise_image = self.rician_noise(image, max_value)
|
| 321 |
+
noise_image = complex_abs(noise_image)
|
| 322 |
+
|
| 323 |
+
noise_image = normalize(noise_image, mean, std, eps=1e-11)
|
| 324 |
+
noise_image = noise_image.clamp(-6, 6)
|
| 325 |
+
|
| 326 |
+
return noise_image, target, mean, std, fname, slice_num
|
| 327 |
+
|
| 328 |
+
|
| 329 |
+
def rician_noise(self, X, noise_std):
|
| 330 |
+
#Add rician noise with variance sampled uniformly from the range 0 and 0.1
|
| 331 |
+
noise_std = random.uniform(0, noise_std*self.noise_rate)
|
| 332 |
+
Ir = X + noise_std * torch.randn(X.shape)
|
| 333 |
+
Ii = noise_std*torch.randn(X.shape)
|
| 334 |
+
In = torch.sqrt(Ir ** 2 + Ii ** 2)
|
| 335 |
+
return In
|
| 336 |
+
|
| 337 |
+
|
| 338 |
+
def apply_mask(
|
| 339 |
+
data: torch.Tensor,
|
| 340 |
+
mask_func: MaskFunc,
|
| 341 |
+
seed: Optional[Union[int, Tuple[int, ...]]] = None,
|
| 342 |
+
padding: Optional[Sequence[int]] = None,
|
| 343 |
+
) -> Tuple[torch.Tensor, torch.Tensor]:
|
| 344 |
+
"""
|
| 345 |
+
Subsample given k-space by multiplying with a mask.
|
| 346 |
+
Args:
|
| 347 |
+
data: The input k-space data. This should have at least 3 dimensions,
|
| 348 |
+
where dimensions -3 and -2 are the spatial dimensions, and the
|
| 349 |
+
final dimension has size 2 (for complex values).
|
| 350 |
+
mask_func: A function that takes a shape (tuple of ints) and a random
|
| 351 |
+
number seed and returns a mask.
|
| 352 |
+
seed: Seed for the random number generator.
|
| 353 |
+
padding: Padding value to apply for mask.
|
| 354 |
+
Returns:
|
| 355 |
+
tuple containing:
|
| 356 |
+
masked data: Subsampled k-space data
|
| 357 |
+
mask: The generated mask
|
| 358 |
+
"""
|
| 359 |
+
shape = np.array(data.shape)
|
| 360 |
+
shape[:-3] = 1
|
| 361 |
+
mask = mask_func(shape, seed)
|
| 362 |
+
if padding is not None:
|
| 363 |
+
mask[:, :, : padding[0]] = 0
|
| 364 |
+
mask[:, :, padding[1] :] = 0 # padding value inclusive on right of zeros
|
| 365 |
+
|
| 366 |
+
masked_data = data * mask + 0.0 # the + 0.0 removes the sign of the zeros
|
| 367 |
+
|
| 368 |
+
return masked_data, mask
|
| 369 |
+
|
| 370 |
+
|
| 371 |
+
|
| 372 |
+
class ReconstructionTransform(object):
|
| 373 |
+
"""
|
| 374 |
+
Data Transformer for training U-Net models.
|
| 375 |
+
"""
|
| 376 |
+
|
| 377 |
+
def __init__(self, which_challenge, mask_func=None, use_seed=True):
|
| 378 |
+
"""
|
| 379 |
+
Args:
|
| 380 |
+
which_challenge (str): Either "singlecoil" or "multicoil" denoting
|
| 381 |
+
the dataset.
|
| 382 |
+
mask_func (fastmri.data.subsample.MaskFunc): A function that can
|
| 383 |
+
create a mask of appropriate shape.
|
| 384 |
+
use_seed (bool): If true, this class computes a pseudo random
|
| 385 |
+
number generator seed from the filename. This ensures that the
|
| 386 |
+
same mask is used for all the slices of a given volume every
|
| 387 |
+
time.
|
| 388 |
+
"""
|
| 389 |
+
if which_challenge not in ("singlecoil", "multicoil"):
|
| 390 |
+
raise ValueError(f'Challenge should either be "singlecoil" or "multicoil"')
|
| 391 |
+
|
| 392 |
+
self.mask_func = mask_func
|
| 393 |
+
self.which_challenge = which_challenge
|
| 394 |
+
self.use_seed = use_seed
|
| 395 |
+
|
| 396 |
+
def __call__(self, kspace, mask, target, attrs, fname, slice_num):
|
| 397 |
+
"""
|
| 398 |
+
Args:
|
| 399 |
+
kspace (numpy.array): Input k-space of shape (num_coils, rows,
|
| 400 |
+
cols, 2) for multi-coil data or (rows, cols, 2) for single coil
|
| 401 |
+
data.
|
| 402 |
+
mask (numpy.array): Mask from the test dataset.
|
| 403 |
+
target (numpy.array): Target image.
|
| 404 |
+
attrs (dict): Acquisition related information stored in the HDF5
|
| 405 |
+
object.
|
| 406 |
+
fname (str): File name.
|
| 407 |
+
slice_num (int): Serial number of the slice.
|
| 408 |
+
|
| 409 |
+
Returns:
|
| 410 |
+
(tuple): tuple containing:
|
| 411 |
+
image (torch.Tensor): Zero-filled input image.
|
| 412 |
+
target (torch.Tensor): Target image converted to a torch
|
| 413 |
+
Tensor.
|
| 414 |
+
mean (float): Mean value used for normalization.
|
| 415 |
+
std (float): Standard deviation value used for normalization.
|
| 416 |
+
fname (str): File name.
|
| 417 |
+
slice_num (int): Serial number of the slice.
|
| 418 |
+
"""
|
| 419 |
+
kspace = to_tensor(kspace)
|
| 420 |
+
|
| 421 |
+
# apply mask
|
| 422 |
+
if self.mask_func:
|
| 423 |
+
seed = None if not self.use_seed else tuple(map(ord, fname))
|
| 424 |
+
masked_kspace, mask = apply_mask(kspace, self.mask_func, seed)
|
| 425 |
+
# print("mask shape", mask.shape, mask.sum())
|
| 426 |
+
# mask shape torch.Size([1, 368, 1]) tensor(89.)
|
| 427 |
+
|
| 428 |
+
else:
|
| 429 |
+
masked_kspace = kspace
|
| 430 |
+
# print("masked_kspace shape", masked_kspace.shape)
|
| 431 |
+
|
| 432 |
+
# inverse Fourier transform to get zero filled solution
|
| 433 |
+
image = ifft2c(masked_kspace)
|
| 434 |
+
|
| 435 |
+
# crop input to correct size
|
| 436 |
+
if target is not None:
|
| 437 |
+
crop_size = (target.shape[-2], target.shape[-1])
|
| 438 |
+
else:
|
| 439 |
+
crop_size = (attrs["recon_size"][0], attrs["recon_size"][1])
|
| 440 |
+
|
| 441 |
+
# check for sFLAIR 203
|
| 442 |
+
if image.shape[-2] < crop_size[1]:
|
| 443 |
+
crop_size = (image.shape[-2], image.shape[-2])
|
| 444 |
+
|
| 445 |
+
image = complex_center_crop(image, crop_size)
|
| 446 |
+
# print('image',image.shape)
|
| 447 |
+
# absolute value
|
| 448 |
+
image = complex_abs(image)
|
| 449 |
+
|
| 450 |
+
# apply Root-Sum-of-Squares if multicoil data
|
| 451 |
+
if self.which_challenge == "multicoil":
|
| 452 |
+
image = rss(image)
|
| 453 |
+
|
| 454 |
+
# normalize input
|
| 455 |
+
image, mean, std = normalize_instance(image, eps=1e-11)
|
| 456 |
+
image = image.clamp(-6, 6)
|
| 457 |
+
|
| 458 |
+
# normalize target
|
| 459 |
+
if target is not None:
|
| 460 |
+
target = to_tensor(target)
|
| 461 |
+
target = center_crop(target, crop_size)
|
| 462 |
+
target = normalize(target, mean, std, eps=1e-11)
|
| 463 |
+
target = target.clamp(-6, 6)
|
| 464 |
+
else:
|
| 465 |
+
target = torch.Tensor([0])
|
| 466 |
+
|
| 467 |
+
return image, target, mean, std, fname, slice_num
|
| 468 |
+
|
| 469 |
+
|
| 470 |
+
def build_transforms(args, mode = 'train', use_kspace=False):
|
| 471 |
+
|
| 472 |
+
challenge = 'singlecoil'
|
| 473 |
+
if use_kspace:
|
| 474 |
+
return ReconstructionTransform(challenge)
|
| 475 |
+
|
| 476 |
+
else:
|
| 477 |
+
if mode == 'train':
|
| 478 |
+
mask = create_mask_for_mask_type(
|
| 479 |
+
args.MASKTYPE, args.CENTER_FRACTIONS, args.ACCELERATIONS,
|
| 480 |
+
)
|
| 481 |
+
return ReconstructionTransform(challenge, mask, use_seed=False)
|
| 482 |
+
elif mode == 'val':
|
| 483 |
+
mask = create_mask_for_mask_type(
|
| 484 |
+
args.MASKTYPE, args.CENTER_FRACTIONS, args.ACCELERATIONS,
|
| 485 |
+
)
|
| 486 |
+
return ReconstructionTransform(challenge, mask)
|
| 487 |
+
else:
|
| 488 |
+
return ReconstructionTransform(challenge)
|
| 489 |
+
|
| 490 |
+
|
| 491 |
+
|
| 492 |
+
|
| 493 |
+
|
MRI_recon/code/Frequency-Diffusion/FSMNet/debug/True_0_0.png
ADDED
|
Git LFS Details
|
MRI_recon/code/Frequency-Diffusion/FSMNet/documents/INSTALL.md
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
## Dependency
|
| 2 |
+
The code is tested on `python 3.8, Pytorch 1.13`.
|
| 3 |
+
|
| 4 |
+
##### Setup environment
|
| 5 |
+
|
| 6 |
+
```bash
|
| 7 |
+
conda create -n FSMNet python=3.8
|
| 8 |
+
source activate FSMNet # or conda activate FSMNet
|
| 9 |
+
pip install torch==1.13.1+cu116 torchvision==0.14.1+cu116 torchaudio==0.13.1 --extra-index-url https://download.pytorch.org/whl/cu116
|
| 10 |
+
pip install einops h5py matplotlib scikit_image tensorboardX yacs pandas opencv-python timm ml_collections
|
| 11 |
+
```
|
MRI_recon/code/Frequency-Diffusion/FSMNet/frequency_diffusion/__init__.py
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .frequency_noise import add_frequency_noise
|
| 2 |
+
from .degradation.k_degradation import get_ksu_kernel, apply_tofre, apply_to_spatial
|
MRI_recon/code/Frequency-Diffusion/FSMNet/frequency_diffusion/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (354 Bytes). View file
|
|
|
MRI_recon/code/Frequency-Diffusion/FSMNet/frequency_diffusion/__pycache__/frequency_noise.cpython-310.pyc
ADDED
|
Binary file (973 Bytes). View file
|
|
|
MRI_recon/code/Frequency-Diffusion/FSMNet/frequency_diffusion/degradation/__init__.py
ADDED
|
File without changes
|
MRI_recon/code/Frequency-Diffusion/FSMNet/frequency_diffusion/degradation/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (194 Bytes). View file
|
|
|
MRI_recon/code/Frequency-Diffusion/FSMNet/frequency_diffusion/degradation/__pycache__/k_degradation.cpython-310.pyc
ADDED
|
Binary file (9.58 kB). View file
|
|
|
MRI_recon/code/Frequency-Diffusion/FSMNet/frequency_diffusion/degradation/__pycache__/mask_utils.cpython-310.pyc
ADDED
|
Binary file (7.55 kB). View file
|
|
|
MRI_recon/code/Frequency-Diffusion/FSMNet/frequency_diffusion/degradation/extract_example_mask.py
ADDED
|
@@ -0,0 +1,71 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import matplotlib.pyplot as plt
|
| 2 |
+
import torch
|
| 3 |
+
import numpy as np
|
| 4 |
+
from torch.fft import fft2, ifft2, fftshift, ifftshift
|
| 5 |
+
|
| 6 |
+
# brats 4X
|
| 7 |
+
example = "/gamedrive/Datasets/medical/FrequencyDiffusion/brats/image_100patients_4X/BraTS20_Training_036_90_t2_4X_undermri.png"
|
| 8 |
+
gt = "/gamedrive/Datasets/medical/FrequencyDiffusion/brats/image_100patients_4X/BraTS20_Training_036_90_t2.png"
|
| 9 |
+
save_file = "./example_mask/brats_4X_mask.npy"
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
example = "/gamedrive/Datasets/medical/FrequencyDiffusion/brats/image_100patients_8X/BraTS20_Training_036_90_t2_8X_undermri.png"
|
| 13 |
+
gt = "/gamedrive/Datasets/medical/FrequencyDiffusion/brats/image_100patients_8X/BraTS20_Training_036_90_t2.png"
|
| 14 |
+
save_file = "./example_mask/brats_8X_mask.npy"
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
example_img = plt.imread(example) # cv2.imread(example_frequency_img, cv2.IMREAD_GRAYSCALE)
|
| 19 |
+
gt = plt.imread(gt) # cv2.imread(example_frequency_img, cv2.IMREAD_GRAYSCALE)
|
| 20 |
+
|
| 21 |
+
print("example_img shape: ", example_img.shape)
|
| 22 |
+
plt.imshow(example_img, cmap='gray')
|
| 23 |
+
plt.title("Example Frequency Image")
|
| 24 |
+
plt.show()
|
| 25 |
+
|
| 26 |
+
example_img = torch.from_numpy(example_img).float()
|
| 27 |
+
fre = fftshift(fft2(example_img)) # )
|
| 28 |
+
amp = torch.log(torch.abs(fre))
|
| 29 |
+
plt.imshow(amp.squeeze(0).squeeze(0).numpy())
|
| 30 |
+
plt.show()
|
| 31 |
+
angle = torch.angle(fre)
|
| 32 |
+
plt.imshow(angle.squeeze(0).squeeze(0).numpy())
|
| 33 |
+
plt.show()
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
gt_fre = fftshift(fft2(torch.from_numpy(gt).float())) # )
|
| 37 |
+
gt_amp = torch.log(torch.abs(gt_fre))
|
| 38 |
+
plt.imshow(gt_amp.squeeze(0).squeeze(0).numpy())
|
| 39 |
+
plt.show()
|
| 40 |
+
gt_angle = torch.angle(gt_fre)
|
| 41 |
+
plt.imshow(gt_angle.squeeze(0).squeeze(0).numpy())
|
| 42 |
+
plt.show()
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
amp_mask = gt_amp.squeeze(0).squeeze(0).numpy() - amp.squeeze(0).squeeze(0).numpy()
|
| 46 |
+
amp_mask = np.mean(amp_mask, axis=0, keepdims=True)
|
| 47 |
+
|
| 48 |
+
print("amp_mask shape: ", amp_mask)
|
| 49 |
+
thres = np.mean(amp_mask)
|
| 50 |
+
amp_mask[amp_mask < thres] = 1
|
| 51 |
+
amp_mask[amp_mask >= thres] = 0
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
#duplicate
|
| 55 |
+
amp_mask = np.repeat(amp_mask, 240, axis=0)
|
| 56 |
+
|
| 57 |
+
plt.imshow(gt_amp.squeeze(0).squeeze(0).numpy() - amp.squeeze(0).squeeze(0).numpy())
|
| 58 |
+
plt.show()
|
| 59 |
+
plt.imshow(gt_angle.squeeze(0).squeeze(0).numpy() - angle.squeeze(0).squeeze(0).numpy())
|
| 60 |
+
plt.show()
|
| 61 |
+
plt.imshow(amp_mask)
|
| 62 |
+
plt.show()
|
| 63 |
+
|
| 64 |
+
np.save(save_file, amp_mask)
|
| 65 |
+
#
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
load_backmask = np.load(save_file)
|
| 69 |
+
plt.imshow(load_backmask)
|
| 70 |
+
plt.show()
|
| 71 |
+
|