Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- source_code/SegMamba/mamba/mamba_ssm/ops/selective_scan_interface.py +709 -0
- source_code/SegMamba/monai/handlers/clearml_handlers.py +178 -0
- source_code/SegMamba/monai/handlers/metric_logger.py +137 -0
- source_code/SegMamba/monai/handlers/metrics_reloaded_handler.py +115 -0
- source_code/SegMamba/monai/handlers/nvtx_handlers.py +182 -0
- source_code/SegMamba/monai/handlers/panoptic_quality.py +69 -0
- source_code/SegMamba/monai/handlers/postprocessing.py +73 -0
- source_code/SegMamba/monai/handlers/probability_maps.py +134 -0
- source_code/SegMamba/monai/handlers/regression_metrics.py +154 -0
- source_code/SegMamba/monai/handlers/roc_auc.py +53 -0
- source_code/SegMamba/monai/handlers/smartcache_handler.py +81 -0
- source_code/SegMamba/monai/handlers/stats_handler.py +293 -0
- source_code/SegMamba/monai/handlers/utils.py +219 -0
- source_code/SegMamba/monai/handlers/validation_handler.py +86 -0
- source_code/SegMamba/monai/inferers/merger.py +381 -0
- source_code/SegMamba/monai/inferers/splitter.py +444 -0
- source_code/SegMamba/monai/inferers/utils.py +405 -0
- source_code/SegMamba/monai/losses/cldice.py +184 -0
- source_code/SegMamba/monai/losses/dice.py +1068 -0
- source_code/SegMamba/monai/losses/ds_loss.py +88 -0
- source_code/SegMamba/monai/losses/focal_loss.py +255 -0
- source_code/SegMamba/monai/losses/hausdorff_loss.py +242 -0
- source_code/SegMamba/monai/losses/image_dissimilarity.py +329 -0
- source_code/SegMamba/monai/losses/multi_scale.py +94 -0
- source_code/SegMamba/monai/losses/perceptual.py +437 -0
- source_code/SegMamba/monai/losses/spatial_mask.py +70 -0
- source_code/SegMamba/monai/losses/spectral_loss.py +88 -0
- source_code/SegMamba/monai/losses/ssim_loss.py +134 -0
- source_code/SegMamba/monai/losses/sure_loss.py +200 -0
- source_code/SegMamba/monai/metrics/confusion_matrix.py +322 -0
- source_code/SegMamba/monai/metrics/cumulative_average.py +160 -0
- source_code/SegMamba/monai/metrics/f_beta_score.py +107 -0
- source_code/SegMamba/monai/metrics/fid.py +110 -0
- source_code/SegMamba/monai/metrics/froc.py +175 -0
- source_code/SegMamba/monai/metrics/hausdorff_distance.py +246 -0
- source_code/SegMamba/monai/metrics/loss_metric.py +111 -0
- source_code/SegMamba/monai/metrics/meandice.py +281 -0
- source_code/SegMamba/monai/metrics/mmd.py +91 -0
- source_code/SegMamba/monai/metrics/panoptic_quality.py +296 -0
- source_code/SegMamba/monai/metrics/regression.py +596 -0
- source_code/SegMamba/monai/networks/__init__.py +34 -0
- source_code/sam3/.github/workflows/format.yml +18 -0
- source_code/sam3/assets/veval/toy_gt_and_pred/toy_saco_veval_sav_test_gt.json +0 -0
- source_code/sam3/sam3/agent/helpers/boxes.py +438 -0
- source_code/sam3/sam3/agent/helpers/color_map.py +150 -0
- source_code/sam3/sam3/agent/helpers/keypoints.py +244 -0
- source_code/sam3/sam3/agent/helpers/mask_overlap_removal.py +128 -0
- source_code/sam3/sam3/agent/helpers/masks.py +560 -0
- source_code/sam3/sam3/agent/helpers/rle.py +122 -0
- source_code/sam3/sam3/agent/helpers/roi_align.py +75 -0
source_code/SegMamba/mamba/mamba_ssm/ops/selective_scan_interface.py
ADDED
|
@@ -0,0 +1,709 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) 2023, Tri Dao, Albert Gu.
|
| 2 |
+
|
| 3 |
+
import torch
|
| 4 |
+
import torch.nn.functional as F
|
| 5 |
+
from torch.cuda.amp import custom_bwd, custom_fwd
|
| 6 |
+
|
| 7 |
+
from einops import rearrange, repeat
|
| 8 |
+
|
| 9 |
+
from causal_conv1d import causal_conv1d_fn
|
| 10 |
+
import causal_conv1d_cuda
|
| 11 |
+
import selective_scan_cuda
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
class SelectiveScanFn(torch.autograd.Function):
|
| 15 |
+
|
| 16 |
+
@staticmethod
|
| 17 |
+
def forward(ctx, u, delta, A, B, C, D=None, z=None, delta_bias=None, delta_softplus=False,
|
| 18 |
+
return_last_state=False):
|
| 19 |
+
if u.stride(-1) != 1:
|
| 20 |
+
u = u.contiguous()
|
| 21 |
+
if delta.stride(-1) != 1:
|
| 22 |
+
delta = delta.contiguous()
|
| 23 |
+
if D is not None:
|
| 24 |
+
D = D.contiguous()
|
| 25 |
+
if B.stride(-1) != 1:
|
| 26 |
+
B = B.contiguous()
|
| 27 |
+
if C.stride(-1) != 1:
|
| 28 |
+
C = C.contiguous()
|
| 29 |
+
if z is not None and z.stride(-1) != 1:
|
| 30 |
+
z = z.contiguous()
|
| 31 |
+
if B.dim() == 3:
|
| 32 |
+
B = rearrange(B, "b dstate l -> b 1 dstate l")
|
| 33 |
+
ctx.squeeze_B = True
|
| 34 |
+
if C.dim() == 3:
|
| 35 |
+
C = rearrange(C, "b dstate l -> b 1 dstate l")
|
| 36 |
+
ctx.squeeze_C = True
|
| 37 |
+
out, x, *rest = selective_scan_cuda.fwd(u, delta, A, B, C, D, z, delta_bias, delta_softplus)
|
| 38 |
+
ctx.delta_softplus = delta_softplus
|
| 39 |
+
ctx.has_z = z is not None
|
| 40 |
+
last_state = x[:, :, -1, 1::2] # (batch, dim, dstate)
|
| 41 |
+
if not ctx.has_z:
|
| 42 |
+
ctx.save_for_backward(u, delta, A, B, C, D, delta_bias, x)
|
| 43 |
+
return out if not return_last_state else (out, last_state)
|
| 44 |
+
else:
|
| 45 |
+
ctx.save_for_backward(u, delta, A, B, C, D, z, delta_bias, x, out)
|
| 46 |
+
out_z = rest[0]
|
| 47 |
+
return out_z if not return_last_state else (out_z, last_state)
|
| 48 |
+
|
| 49 |
+
@staticmethod
|
| 50 |
+
def backward(ctx, dout, *args):
|
| 51 |
+
if not ctx.has_z:
|
| 52 |
+
u, delta, A, B, C, D, delta_bias, x = ctx.saved_tensors
|
| 53 |
+
z = None
|
| 54 |
+
out = None
|
| 55 |
+
else:
|
| 56 |
+
u, delta, A, B, C, D, z, delta_bias, x, out = ctx.saved_tensors
|
| 57 |
+
if dout.stride(-1) != 1:
|
| 58 |
+
dout = dout.contiguous()
|
| 59 |
+
# The kernel supports passing in a pre-allocated dz (e.g., in case we want to fuse the
|
| 60 |
+
# backward of selective_scan_cuda with the backward of chunk).
|
| 61 |
+
# Here we just pass in None and dz will be allocated in the C++ code.
|
| 62 |
+
du, ddelta, dA, dB, dC, dD, ddelta_bias, *rest = selective_scan_cuda.bwd(
|
| 63 |
+
u, delta, A, B, C, D, z, delta_bias, dout, x, out, None, ctx.delta_softplus,
|
| 64 |
+
False # option to recompute out_z, not used here
|
| 65 |
+
)
|
| 66 |
+
dz = rest[0] if ctx.has_z else None
|
| 67 |
+
dB = dB.squeeze(1) if getattr(ctx, "squeeze_B", False) else dB
|
| 68 |
+
dC = dC.squeeze(1) if getattr(ctx, "squeeze_C", False) else dC
|
| 69 |
+
return (du, ddelta, dA, dB, dC,
|
| 70 |
+
dD if D is not None else None,
|
| 71 |
+
dz,
|
| 72 |
+
ddelta_bias if delta_bias is not None else None,
|
| 73 |
+
None,
|
| 74 |
+
None)
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
def selective_scan_fn(u, delta, A, B, C, D=None, z=None, delta_bias=None, delta_softplus=False,
|
| 78 |
+
return_last_state=False):
|
| 79 |
+
"""if return_last_state is True, returns (out, last_state)
|
| 80 |
+
last_state has shape (batch, dim, dstate). Note that the gradient of the last state is
|
| 81 |
+
not considered in the backward pass.
|
| 82 |
+
"""
|
| 83 |
+
return SelectiveScanFn.apply(u, delta, A, B, C, D, z, delta_bias, delta_softplus, return_last_state)
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
def selective_scan_ref(u, delta, A, B, C, D=None, z=None, delta_bias=None, delta_softplus=False,
|
| 87 |
+
return_last_state=False):
|
| 88 |
+
"""
|
| 89 |
+
u: r(B D L)
|
| 90 |
+
delta: r(B D L)
|
| 91 |
+
A: c(D N) or r(D N)
|
| 92 |
+
B: c(D N) or r(B N L) or r(B N 2L) or r(B G N L) or (B G N L)
|
| 93 |
+
C: c(D N) or r(B N L) or r(B N 2L) or r(B G N L) or (B G N L)
|
| 94 |
+
D: r(D)
|
| 95 |
+
z: r(B D L)
|
| 96 |
+
delta_bias: r(D), fp32
|
| 97 |
+
|
| 98 |
+
out: r(B D L)
|
| 99 |
+
last_state (optional): r(B D dstate) or c(B D dstate)
|
| 100 |
+
"""
|
| 101 |
+
dtype_in = u.dtype
|
| 102 |
+
u = u.float()
|
| 103 |
+
delta = delta.float()
|
| 104 |
+
if delta_bias is not None:
|
| 105 |
+
delta = delta + delta_bias[..., None].float()
|
| 106 |
+
if delta_softplus:
|
| 107 |
+
delta = F.softplus(delta)
|
| 108 |
+
batch, dim, dstate = u.shape[0], A.shape[0], A.shape[1]
|
| 109 |
+
is_variable_B = B.dim() >= 3
|
| 110 |
+
is_variable_C = C.dim() >= 3
|
| 111 |
+
if A.is_complex():
|
| 112 |
+
if is_variable_B:
|
| 113 |
+
B = torch.view_as_complex(rearrange(B.float(), "... (L two) -> ... L two", two=2))
|
| 114 |
+
if is_variable_C:
|
| 115 |
+
C = torch.view_as_complex(rearrange(C.float(), "... (L two) -> ... L two", two=2))
|
| 116 |
+
else:
|
| 117 |
+
B = B.float()
|
| 118 |
+
C = C.float()
|
| 119 |
+
x = A.new_zeros((batch, dim, dstate))
|
| 120 |
+
ys = []
|
| 121 |
+
deltaA = torch.exp(torch.einsum('bdl,dn->bdln', delta, A))
|
| 122 |
+
if not is_variable_B:
|
| 123 |
+
deltaB_u = torch.einsum('bdl,dn,bdl->bdln', delta, B, u)
|
| 124 |
+
else:
|
| 125 |
+
if B.dim() == 3:
|
| 126 |
+
deltaB_u = torch.einsum('bdl,bnl,bdl->bdln', delta, B, u)
|
| 127 |
+
else:
|
| 128 |
+
B = repeat(B, "B G N L -> B (G H) N L", H=dim // B.shape[1])
|
| 129 |
+
deltaB_u = torch.einsum('bdl,bdnl,bdl->bdln', delta, B, u)
|
| 130 |
+
if is_variable_C and C.dim() == 4:
|
| 131 |
+
C = repeat(C, "B G N L -> B (G H) N L", H=dim // C.shape[1])
|
| 132 |
+
last_state = None
|
| 133 |
+
for i in range(u.shape[2]):
|
| 134 |
+
x = deltaA[:, :, i] * x + deltaB_u[:, :, i]
|
| 135 |
+
if not is_variable_C:
|
| 136 |
+
y = torch.einsum('bdn,dn->bd', x, C)
|
| 137 |
+
else:
|
| 138 |
+
if C.dim() == 3:
|
| 139 |
+
y = torch.einsum('bdn,bn->bd', x, C[:, :, i])
|
| 140 |
+
else:
|
| 141 |
+
y = torch.einsum('bdn,bdn->bd', x, C[:, :, :, i])
|
| 142 |
+
if i == u.shape[2] - 1:
|
| 143 |
+
last_state = x
|
| 144 |
+
if y.is_complex():
|
| 145 |
+
y = y.real * 2
|
| 146 |
+
ys.append(y)
|
| 147 |
+
y = torch.stack(ys, dim=2) # (batch dim L)
|
| 148 |
+
out = y if D is None else y + u * rearrange(D, "d -> d 1")
|
| 149 |
+
if z is not None:
|
| 150 |
+
out = out * F.silu(z)
|
| 151 |
+
out = out.to(dtype=dtype_in)
|
| 152 |
+
return out if not return_last_state else (out, last_state)
|
| 153 |
+
|
| 154 |
+
|
| 155 |
+
class MambaInnerFnNoOutProj(torch.autograd.Function):
|
| 156 |
+
|
| 157 |
+
@staticmethod
|
| 158 |
+
@custom_fwd
|
| 159 |
+
def forward(ctx, xz, conv1d_weight, conv1d_bias, x_proj_weight, delta_proj_weight,
|
| 160 |
+
A, B=None, C=None, D=None, delta_bias=None, B_proj_bias=None,
|
| 161 |
+
C_proj_bias=None, delta_softplus=True, checkpoint_lvl=1):
|
| 162 |
+
"""
|
| 163 |
+
xz: (batch, dim, seqlen)
|
| 164 |
+
"""
|
| 165 |
+
assert checkpoint_lvl in [0, 1]
|
| 166 |
+
L = xz.shape[-1]
|
| 167 |
+
delta_rank = delta_proj_weight.shape[1]
|
| 168 |
+
d_state = A.shape[-1] * (1 if not A.is_complex() else 2)
|
| 169 |
+
if torch.is_autocast_enabled():
|
| 170 |
+
x_proj_weight = x_proj_weight.to(dtype=torch.get_autocast_gpu_dtype())
|
| 171 |
+
delta_proj_weight = delta_proj_weight.to(dtype=torch.get_autocast_gpu_dtype())
|
| 172 |
+
if xz.stride(-1) != 1:
|
| 173 |
+
xz = xz.contiguous()
|
| 174 |
+
conv1d_weight = rearrange(conv1d_weight, "d 1 w -> d w")
|
| 175 |
+
x, z = xz.chunk(2, dim=1)
|
| 176 |
+
conv1d_bias = conv1d_bias.contiguous() if conv1d_bias is not None else None
|
| 177 |
+
conv1d_out = causal_conv1d_cuda.causal_conv1d_fwd(x, conv1d_weight, conv1d_bias, True)
|
| 178 |
+
# We're being very careful here about the layout, to avoid extra transposes.
|
| 179 |
+
# We want delta to have d as the slowest moving dimension
|
| 180 |
+
# and L as the fastest moving dimension, since those are what the ssm_scan kernel expects.
|
| 181 |
+
x_dbl = F.linear(rearrange(conv1d_out, 'b d l -> (b l) d'), x_proj_weight) # (bl d)
|
| 182 |
+
delta = rearrange(delta_proj_weight @ x_dbl[:, :delta_rank].t(), "d (b l) -> b d l", l = L)
|
| 183 |
+
ctx.is_variable_B = B is None
|
| 184 |
+
ctx.is_variable_C = C is None
|
| 185 |
+
ctx.B_proj_bias_is_None = B_proj_bias is None
|
| 186 |
+
ctx.C_proj_bias_is_None = C_proj_bias is None
|
| 187 |
+
if B is None: # variable B
|
| 188 |
+
B = x_dbl[:, delta_rank:delta_rank + d_state] # (bl dstate)
|
| 189 |
+
if B_proj_bias is not None:
|
| 190 |
+
B = B + B_proj_bias.to(dtype=B.dtype)
|
| 191 |
+
if not A.is_complex():
|
| 192 |
+
# B = rearrange(B, "(b l) dstate -> b dstate l", l=L).contiguous()
|
| 193 |
+
B = rearrange(B, "(b l) dstate -> b 1 dstate l", l=L).contiguous()
|
| 194 |
+
else:
|
| 195 |
+
B = rearrange(B, "(b l) (dstate two) -> b 1 dstate (l two)", l=L, two=2).contiguous()
|
| 196 |
+
else:
|
| 197 |
+
if B.stride(-1) != 1:
|
| 198 |
+
B = B.contiguous()
|
| 199 |
+
if C is None: # variable C
|
| 200 |
+
C = x_dbl[:, -d_state:] # (bl dstate)
|
| 201 |
+
if C_proj_bias is not None:
|
| 202 |
+
C = C + C_proj_bias.to(dtype=C.dtype)
|
| 203 |
+
if not A.is_complex():
|
| 204 |
+
# C = rearrange(C, "(b l) dstate -> b dstate l", l=L).contiguous()
|
| 205 |
+
C = rearrange(C, "(b l) dstate -> b 1 dstate l", l=L).contiguous()
|
| 206 |
+
else:
|
| 207 |
+
C = rearrange(C, "(b l) (dstate two) -> b 1 dstate (l two)", l=L, two=2).contiguous()
|
| 208 |
+
else:
|
| 209 |
+
if C.stride(-1) != 1:
|
| 210 |
+
C = C.contiguous()
|
| 211 |
+
if D is not None:
|
| 212 |
+
D = D.contiguous()
|
| 213 |
+
out, scan_intermediates, out_z = selective_scan_cuda.fwd(
|
| 214 |
+
conv1d_out, delta, A, B, C, D, z, delta_bias, delta_softplus
|
| 215 |
+
)
|
| 216 |
+
ctx.delta_softplus = delta_softplus
|
| 217 |
+
ctx.checkpoint_lvl = checkpoint_lvl
|
| 218 |
+
if checkpoint_lvl >= 1: # Will recompute conv1d_out and delta in the backward pass
|
| 219 |
+
conv1d_out, delta = None, None
|
| 220 |
+
ctx.save_for_backward(xz, conv1d_weight, conv1d_bias, x_dbl, x_proj_weight,
|
| 221 |
+
delta_proj_weight, conv1d_out, delta,
|
| 222 |
+
A, B, C, D, delta_bias, scan_intermediates, out)
|
| 223 |
+
# return rearrange(out_z, "b d l -> b l d")
|
| 224 |
+
return out_z
|
| 225 |
+
|
| 226 |
+
@staticmethod
|
| 227 |
+
@custom_bwd
|
| 228 |
+
def backward(ctx, dout):
|
| 229 |
+
# dout: (batch, seqlen, dim)
|
| 230 |
+
(xz, conv1d_weight, conv1d_bias, x_dbl, x_proj_weight, delta_proj_weight,
|
| 231 |
+
conv1d_out, delta, A, B, C, D, delta_bias, scan_intermediates, out) = ctx.saved_tensors
|
| 232 |
+
L = xz.shape[-1]
|
| 233 |
+
delta_rank = delta_proj_weight.shape[1]
|
| 234 |
+
d_state = A.shape[-1] * (1 if not A.is_complex() else 2)
|
| 235 |
+
x, z = xz.chunk(2, dim=1)
|
| 236 |
+
if dout.stride(-1) != 1:
|
| 237 |
+
dout = dout.contiguous()
|
| 238 |
+
if ctx.checkpoint_lvl == 1:
|
| 239 |
+
conv1d_out = causal_conv1d_cuda.causal_conv1d_fwd(x, conv1d_weight, conv1d_bias, True)
|
| 240 |
+
delta = rearrange(delta_proj_weight @ x_dbl[:, :delta_rank].t(),
|
| 241 |
+
"d (b l) -> b d l", l = L)
|
| 242 |
+
# The kernel supports passing in a pre-allocated dz (e.g., in case we want to fuse the
|
| 243 |
+
# backward of selective_scan_cuda with the backward of chunk).
|
| 244 |
+
dxz = torch.empty_like(xz) # (batch, dim, seqlen)
|
| 245 |
+
dx, dz = dxz.chunk(2, dim=1)
|
| 246 |
+
# dout_y = rearrange(dout, "b l d -> b d l") # because no arrange at end of forward, so dout shape is b d l
|
| 247 |
+
dconv1d_out, ddelta, dA, dB, dC, dD, ddelta_bias, dz, out_z = selective_scan_cuda.bwd(
|
| 248 |
+
conv1d_out, delta, A, B, C, D, z, delta_bias, dout, scan_intermediates, out, dz,
|
| 249 |
+
ctx.delta_softplus,
|
| 250 |
+
True # option to recompute out_z
|
| 251 |
+
)
|
| 252 |
+
dD = dD if D is not None else None
|
| 253 |
+
dx_dbl = torch.empty_like(x_dbl)
|
| 254 |
+
dB_proj_bias = None
|
| 255 |
+
if ctx.is_variable_B:
|
| 256 |
+
if not A.is_complex():
|
| 257 |
+
dB = rearrange(dB, "b 1 dstate l -> (b l) dstate").contiguous()
|
| 258 |
+
else:
|
| 259 |
+
dB = rearrange(dB, "b 1 dstate (l two) -> (b l) (dstate two)", two=2).contiguous()
|
| 260 |
+
dB_proj_bias = dB.sum(0) if not ctx.B_proj_bias_is_None else None
|
| 261 |
+
dx_dbl[:, delta_rank:delta_rank + d_state] = dB # (bl d)
|
| 262 |
+
dB = None
|
| 263 |
+
dC_proj_bias = None
|
| 264 |
+
if ctx.is_variable_C:
|
| 265 |
+
if not A.is_complex():
|
| 266 |
+
dC = rearrange(dC, "b 1 dstate l -> (b l) dstate").contiguous()
|
| 267 |
+
else:
|
| 268 |
+
dC = rearrange(dC, "b 1 dstate (l two) -> (b l) (dstate two)", two=2).contiguous()
|
| 269 |
+
dC_proj_bias = dC.sum(0) if not ctx.C_proj_bias_is_None else None
|
| 270 |
+
dx_dbl[:, -d_state:] = dC # (bl d)
|
| 271 |
+
dC = None
|
| 272 |
+
ddelta = rearrange(ddelta, "b d l -> d (b l)")
|
| 273 |
+
ddelta_proj_weight = torch.einsum("dB,Br->dr", ddelta, x_dbl[:, :delta_rank])
|
| 274 |
+
dx_dbl[:, :delta_rank] = torch.einsum("dB,dr->Br", ddelta, delta_proj_weight)
|
| 275 |
+
dconv1d_out = rearrange(dconv1d_out, "b d l -> d (b l)")
|
| 276 |
+
dx_proj_weight = torch.einsum("Br,Bd->rd", dx_dbl, rearrange(conv1d_out, "b d l -> (b l) d"))
|
| 277 |
+
dconv1d_out = torch.addmm(dconv1d_out, x_proj_weight.t(), dx_dbl.t(), out=dconv1d_out)
|
| 278 |
+
dconv1d_out = rearrange(dconv1d_out, "d (b l) -> b d l", b=x.shape[0], l=x.shape[-1])
|
| 279 |
+
# The kernel supports passing in a pre-allocated dx (e.g., in case we want to fuse the
|
| 280 |
+
# backward of conv1d with the backward of chunk).
|
| 281 |
+
dx, dconv1d_weight, dconv1d_bias = causal_conv1d_cuda.causal_conv1d_bwd(
|
| 282 |
+
x, conv1d_weight, conv1d_bias, dconv1d_out, dx, True
|
| 283 |
+
)
|
| 284 |
+
dconv1d_bias = dconv1d_bias if conv1d_bias is not None else None
|
| 285 |
+
dconv1d_weight = rearrange(dconv1d_weight, "d w -> d 1 w")
|
| 286 |
+
return (dxz, dconv1d_weight, dconv1d_bias, dx_proj_weight, ddelta_proj_weight,
|
| 287 |
+
dA, dB, dC, dD,
|
| 288 |
+
ddelta_bias if delta_bias is not None else None,
|
| 289 |
+
dB_proj_bias, dC_proj_bias, None)
|
| 290 |
+
|
| 291 |
+
|
| 292 |
+
class MambaInnerFn(torch.autograd.Function):
|
| 293 |
+
|
| 294 |
+
@staticmethod
|
| 295 |
+
@custom_fwd
|
| 296 |
+
def forward(ctx, xz, conv1d_weight, conv1d_bias, x_proj_weight, delta_proj_weight,
|
| 297 |
+
out_proj_weight, out_proj_bias,
|
| 298 |
+
A, B=None, C=None, D=None, delta_bias=None, B_proj_bias=None,
|
| 299 |
+
C_proj_bias=None, delta_softplus=True, checkpoint_lvl=1):
|
| 300 |
+
"""
|
| 301 |
+
xz: (batch, dim, seqlen)
|
| 302 |
+
"""
|
| 303 |
+
assert checkpoint_lvl in [0, 1]
|
| 304 |
+
L = xz.shape[-1]
|
| 305 |
+
delta_rank = delta_proj_weight.shape[1]
|
| 306 |
+
d_state = A.shape[-1] * (1 if not A.is_complex() else 2)
|
| 307 |
+
if torch.is_autocast_enabled():
|
| 308 |
+
x_proj_weight = x_proj_weight.to(dtype=torch.get_autocast_gpu_dtype())
|
| 309 |
+
delta_proj_weight = delta_proj_weight.to(dtype=torch.get_autocast_gpu_dtype())
|
| 310 |
+
out_proj_weight = out_proj_weight.to(dtype=torch.get_autocast_gpu_dtype())
|
| 311 |
+
out_proj_bias = (out_proj_bias.to(dtype=torch.get_autocast_gpu_dtype())
|
| 312 |
+
if out_proj_bias is not None else None)
|
| 313 |
+
if xz.stride(-1) != 1:
|
| 314 |
+
xz = xz.contiguous()
|
| 315 |
+
conv1d_weight = rearrange(conv1d_weight, "d 1 w -> d w")
|
| 316 |
+
x, z = xz.chunk(2, dim=1)
|
| 317 |
+
conv1d_bias = conv1d_bias.contiguous() if conv1d_bias is not None else None
|
| 318 |
+
conv1d_out = causal_conv1d_cuda.causal_conv1d_fwd(x, conv1d_weight, conv1d_bias, True)
|
| 319 |
+
# We're being very careful here about the layout, to avoid extra transposes.
|
| 320 |
+
# We want delta to have d as the slowest moving dimension
|
| 321 |
+
# and L as the fastest moving dimension, since those are what the ssm_scan kernel expects.
|
| 322 |
+
x_dbl = F.linear(rearrange(conv1d_out, 'b d l -> (b l) d'), x_proj_weight) # (bl d)
|
| 323 |
+
delta = rearrange(delta_proj_weight @ x_dbl[:, :delta_rank].t(), "d (b l) -> b d l", l = L)
|
| 324 |
+
ctx.is_variable_B = B is None
|
| 325 |
+
ctx.is_variable_C = C is None
|
| 326 |
+
ctx.B_proj_bias_is_None = B_proj_bias is None
|
| 327 |
+
ctx.C_proj_bias_is_None = C_proj_bias is None
|
| 328 |
+
if B is None: # variable B
|
| 329 |
+
B = x_dbl[:, delta_rank:delta_rank + d_state] # (bl dstate)
|
| 330 |
+
if B_proj_bias is not None:
|
| 331 |
+
B = B + B_proj_bias.to(dtype=B.dtype)
|
| 332 |
+
if not A.is_complex():
|
| 333 |
+
# B = rearrange(B, "(b l) dstate -> b dstate l", l=L).contiguous()
|
| 334 |
+
B = rearrange(B, "(b l) dstate -> b 1 dstate l", l=L).contiguous()
|
| 335 |
+
else:
|
| 336 |
+
B = rearrange(B, "(b l) (dstate two) -> b 1 dstate (l two)", l=L, two=2).contiguous()
|
| 337 |
+
else:
|
| 338 |
+
if B.stride(-1) != 1:
|
| 339 |
+
B = B.contiguous()
|
| 340 |
+
if C is None: # variable C
|
| 341 |
+
C = x_dbl[:, -d_state:] # (bl dstate)
|
| 342 |
+
if C_proj_bias is not None:
|
| 343 |
+
C = C + C_proj_bias.to(dtype=C.dtype)
|
| 344 |
+
if not A.is_complex():
|
| 345 |
+
# C = rearrange(C, "(b l) dstate -> b dstate l", l=L).contiguous()
|
| 346 |
+
C = rearrange(C, "(b l) dstate -> b 1 dstate l", l=L).contiguous()
|
| 347 |
+
else:
|
| 348 |
+
C = rearrange(C, "(b l) (dstate two) -> b 1 dstate (l two)", l=L, two=2).contiguous()
|
| 349 |
+
else:
|
| 350 |
+
if C.stride(-1) != 1:
|
| 351 |
+
C = C.contiguous()
|
| 352 |
+
if D is not None:
|
| 353 |
+
D = D.contiguous()
|
| 354 |
+
out, scan_intermediates, out_z = selective_scan_cuda.fwd(
|
| 355 |
+
conv1d_out, delta, A, B, C, D, z, delta_bias, delta_softplus
|
| 356 |
+
)
|
| 357 |
+
ctx.delta_softplus = delta_softplus
|
| 358 |
+
ctx.out_proj_bias_is_None = out_proj_bias is None
|
| 359 |
+
ctx.checkpoint_lvl = checkpoint_lvl
|
| 360 |
+
if checkpoint_lvl >= 1: # Will recompute conv1d_out and delta in the backward pass
|
| 361 |
+
conv1d_out, delta = None, None
|
| 362 |
+
ctx.save_for_backward(xz, conv1d_weight, conv1d_bias, x_dbl, x_proj_weight,
|
| 363 |
+
delta_proj_weight, out_proj_weight, conv1d_out, delta,
|
| 364 |
+
A, B, C, D, delta_bias, scan_intermediates, out)
|
| 365 |
+
return F.linear(rearrange(out_z, "b d l -> b l d"), out_proj_weight, out_proj_bias)
|
| 366 |
+
|
| 367 |
+
@staticmethod
|
| 368 |
+
@custom_bwd
|
| 369 |
+
def backward(ctx, dout):
|
| 370 |
+
# dout: (batch, seqlen, dim)
|
| 371 |
+
(xz, conv1d_weight, conv1d_bias, x_dbl, x_proj_weight, delta_proj_weight, out_proj_weight,
|
| 372 |
+
conv1d_out, delta, A, B, C, D, delta_bias, scan_intermediates, out) = ctx.saved_tensors
|
| 373 |
+
L = xz.shape[-1]
|
| 374 |
+
delta_rank = delta_proj_weight.shape[1]
|
| 375 |
+
d_state = A.shape[-1] * (1 if not A.is_complex() else 2)
|
| 376 |
+
x, z = xz.chunk(2, dim=1)
|
| 377 |
+
if dout.stride(-1) != 1:
|
| 378 |
+
dout = dout.contiguous()
|
| 379 |
+
if ctx.checkpoint_lvl == 1:
|
| 380 |
+
conv1d_out = causal_conv1d_cuda.causal_conv1d_fwd(x, conv1d_weight, conv1d_bias, True)
|
| 381 |
+
delta = rearrange(delta_proj_weight @ x_dbl[:, :delta_rank].t(),
|
| 382 |
+
"d (b l) -> b d l", l = L)
|
| 383 |
+
# The kernel supports passing in a pre-allocated dz (e.g., in case we want to fuse the
|
| 384 |
+
# backward of selective_scan_cuda with the backward of chunk).
|
| 385 |
+
dxz = torch.empty_like(xz) # (batch, dim, seqlen)
|
| 386 |
+
dx, dz = dxz.chunk(2, dim=1)
|
| 387 |
+
dout = rearrange(dout, "b l e -> e (b l)")
|
| 388 |
+
dout_y = rearrange(out_proj_weight.t() @ dout, "d (b l) -> b d l", l=L)
|
| 389 |
+
dconv1d_out, ddelta, dA, dB, dC, dD, ddelta_bias, dz, out_z = selective_scan_cuda.bwd(
|
| 390 |
+
conv1d_out, delta, A, B, C, D, z, delta_bias, dout_y, scan_intermediates, out, dz,
|
| 391 |
+
ctx.delta_softplus,
|
| 392 |
+
True # option to recompute out_z
|
| 393 |
+
)
|
| 394 |
+
dout_proj_weight = torch.einsum("eB,dB->ed", dout, rearrange(out_z, "b d l -> d (b l)"))
|
| 395 |
+
dout_proj_bias = dout.sum(dim=(0, 1)) if not ctx.out_proj_bias_is_None else None
|
| 396 |
+
dD = dD if D is not None else None
|
| 397 |
+
dx_dbl = torch.empty_like(x_dbl)
|
| 398 |
+
dB_proj_bias = None
|
| 399 |
+
if ctx.is_variable_B:
|
| 400 |
+
if not A.is_complex():
|
| 401 |
+
dB = rearrange(dB, "b 1 dstate l -> (b l) dstate").contiguous()
|
| 402 |
+
else:
|
| 403 |
+
dB = rearrange(dB, "b 1 dstate (l two) -> (b l) (dstate two)", two=2).contiguous()
|
| 404 |
+
dB_proj_bias = dB.sum(0) if not ctx.B_proj_bias_is_None else None
|
| 405 |
+
dx_dbl[:, delta_rank:delta_rank + d_state] = dB # (bl d)
|
| 406 |
+
dB = None
|
| 407 |
+
dC_proj_bias = None
|
| 408 |
+
if ctx.is_variable_C:
|
| 409 |
+
if not A.is_complex():
|
| 410 |
+
dC = rearrange(dC, "b 1 dstate l -> (b l) dstate").contiguous()
|
| 411 |
+
else:
|
| 412 |
+
dC = rearrange(dC, "b 1 dstate (l two) -> (b l) (dstate two)", two=2).contiguous()
|
| 413 |
+
dC_proj_bias = dC.sum(0) if not ctx.C_proj_bias_is_None else None
|
| 414 |
+
dx_dbl[:, -d_state:] = dC # (bl d)
|
| 415 |
+
dC = None
|
| 416 |
+
ddelta = rearrange(ddelta, "b d l -> d (b l)")
|
| 417 |
+
ddelta_proj_weight = torch.einsum("dB,Br->dr", ddelta, x_dbl[:, :delta_rank])
|
| 418 |
+
dx_dbl[:, :delta_rank] = torch.einsum("dB,dr->Br", ddelta, delta_proj_weight)
|
| 419 |
+
dconv1d_out = rearrange(dconv1d_out, "b d l -> d (b l)")
|
| 420 |
+
dx_proj_weight = torch.einsum("Br,Bd->rd", dx_dbl, rearrange(conv1d_out, "b d l -> (b l) d"))
|
| 421 |
+
dconv1d_out = torch.addmm(dconv1d_out, x_proj_weight.t(), dx_dbl.t(), out=dconv1d_out)
|
| 422 |
+
dconv1d_out = rearrange(dconv1d_out, "d (b l) -> b d l", b=x.shape[0], l=x.shape[-1])
|
| 423 |
+
# The kernel supports passing in a pre-allocated dx (e.g., in case we want to fuse the
|
| 424 |
+
# backward of conv1d with the backward of chunk).
|
| 425 |
+
dx, dconv1d_weight, dconv1d_bias = causal_conv1d_cuda.causal_conv1d_bwd(
|
| 426 |
+
x, conv1d_weight, conv1d_bias, dconv1d_out, dx, True
|
| 427 |
+
)
|
| 428 |
+
dconv1d_bias = dconv1d_bias if conv1d_bias is not None else None
|
| 429 |
+
dconv1d_weight = rearrange(dconv1d_weight, "d w -> d 1 w")
|
| 430 |
+
return (dxz, dconv1d_weight, dconv1d_bias, dx_proj_weight, ddelta_proj_weight,
|
| 431 |
+
dout_proj_weight, dout_proj_bias,
|
| 432 |
+
dA, dB, dC, dD,
|
| 433 |
+
ddelta_bias if delta_bias is not None else None,
|
| 434 |
+
dB_proj_bias, dC_proj_bias, None)
|
| 435 |
+
|
| 436 |
+
|
| 437 |
+
class BiMambaInnerFn(torch.autograd.Function):
|
| 438 |
+
|
| 439 |
+
@staticmethod
|
| 440 |
+
@custom_fwd
|
| 441 |
+
def forward(ctx, xz, conv1d_weight, conv1d_bias, x_proj_weight, delta_proj_weight,
|
| 442 |
+
out_proj_weight, out_proj_bias,
|
| 443 |
+
A, A_b, B=None, C=None, D=None, delta_bias=None, B_proj_bias=None,
|
| 444 |
+
C_proj_bias=None, delta_softplus=True, checkpoint_lvl=1):
|
| 445 |
+
"""
|
| 446 |
+
xz: (batch, dim, seqlen)
|
| 447 |
+
"""
|
| 448 |
+
assert checkpoint_lvl in [0, 1]
|
| 449 |
+
L = xz.shape[-1]
|
| 450 |
+
delta_rank = delta_proj_weight.shape[1]
|
| 451 |
+
d_state = A.shape[-1] * (1 if not A.is_complex() else 2)
|
| 452 |
+
if torch.is_autocast_enabled():
|
| 453 |
+
x_proj_weight = x_proj_weight.to(dtype=torch.get_autocast_gpu_dtype())
|
| 454 |
+
delta_proj_weight = delta_proj_weight.to(dtype=torch.get_autocast_gpu_dtype())
|
| 455 |
+
out_proj_weight = out_proj_weight.to(dtype=torch.get_autocast_gpu_dtype())
|
| 456 |
+
out_proj_bias = (out_proj_bias.to(dtype=torch.get_autocast_gpu_dtype())
|
| 457 |
+
if out_proj_bias is not None else None)
|
| 458 |
+
if xz.stride(-1) != 1:
|
| 459 |
+
xz = xz.contiguous()
|
| 460 |
+
conv1d_weight = rearrange(conv1d_weight, "d 1 w -> d w")
|
| 461 |
+
x, z = xz.chunk(2, dim=1)
|
| 462 |
+
conv1d_bias = conv1d_bias.contiguous() if conv1d_bias is not None else None
|
| 463 |
+
conv1d_out = causal_conv1d_cuda.causal_conv1d_fwd(x, conv1d_weight, conv1d_bias, True)
|
| 464 |
+
# We're being very careful here about the layout, to avoid extra transposes.
|
| 465 |
+
# We want delta to have d as the slowest moving dimension
|
| 466 |
+
# and L as the fastest moving dimension, since those are what the ssm_scan kernel expects.
|
| 467 |
+
x_dbl = F.linear(rearrange(conv1d_out, 'b d l -> (b l) d'), x_proj_weight) # (bl d)
|
| 468 |
+
delta = rearrange(delta_proj_weight @ x_dbl[:, :delta_rank].t(), "d (b l) -> b d l", l = L)
|
| 469 |
+
ctx.is_variable_B = B is None
|
| 470 |
+
ctx.is_variable_C = C is None
|
| 471 |
+
ctx.B_proj_bias_is_None = B_proj_bias is None
|
| 472 |
+
ctx.C_proj_bias_is_None = C_proj_bias is None
|
| 473 |
+
if B is None: # variable B
|
| 474 |
+
B = x_dbl[:, delta_rank:delta_rank + d_state] # (bl dstate)
|
| 475 |
+
if B_proj_bias is not None:
|
| 476 |
+
B = B + B_proj_bias.to(dtype=B.dtype)
|
| 477 |
+
if not A.is_complex():
|
| 478 |
+
# B = rearrange(B, "(b l) dstate -> b dstate l", l=L).contiguous()
|
| 479 |
+
B = rearrange(B, "(b l) dstate -> b 1 dstate l", l=L).contiguous()
|
| 480 |
+
else:
|
| 481 |
+
B = rearrange(B, "(b l) (dstate two) -> b 1 dstate (l two)", l=L, two=2).contiguous()
|
| 482 |
+
else:
|
| 483 |
+
if B.stride(-1) != 1:
|
| 484 |
+
B = B.contiguous()
|
| 485 |
+
if C is None: # variable C
|
| 486 |
+
C = x_dbl[:, -d_state:] # (bl dstate)
|
| 487 |
+
if C_proj_bias is not None:
|
| 488 |
+
C = C + C_proj_bias.to(dtype=C.dtype)
|
| 489 |
+
if not A.is_complex():
|
| 490 |
+
# C = rearrange(C, "(b l) dstate -> b dstate l", l=L).contiguous()
|
| 491 |
+
C = rearrange(C, "(b l) dstate -> b 1 dstate l", l=L).contiguous()
|
| 492 |
+
else:
|
| 493 |
+
C = rearrange(C, "(b l) (dstate two) -> b 1 dstate (l two)", l=L, two=2).contiguous()
|
| 494 |
+
else:
|
| 495 |
+
if C.stride(-1) != 1:
|
| 496 |
+
C = C.contiguous()
|
| 497 |
+
if D is not None:
|
| 498 |
+
D = D.contiguous()
|
| 499 |
+
out_f, scan_intermediates_f, out_z_f = selective_scan_cuda.fwd(
|
| 500 |
+
conv1d_out, delta, A, B, C, D, z, delta_bias, delta_softplus
|
| 501 |
+
)
|
| 502 |
+
assert not A_b.is_complex(), "A should not be complex!!"
|
| 503 |
+
out_b, scan_intermediates_b, out_z_b = selective_scan_cuda.fwd(
|
| 504 |
+
conv1d_out.flip([-1]), delta.flip([-1]), A_b, B.flip([-1]), C.flip([-1]), D, z.flip([-1]), delta_bias, delta_softplus,
|
| 505 |
+
)
|
| 506 |
+
|
| 507 |
+
out_z = out_z_f + out_z_b.flip([-1])
|
| 508 |
+
|
| 509 |
+
ctx.delta_softplus = delta_softplus
|
| 510 |
+
ctx.out_proj_bias_is_None = out_proj_bias is None
|
| 511 |
+
ctx.checkpoint_lvl = checkpoint_lvl
|
| 512 |
+
if checkpoint_lvl >= 1: # Will recompute conv1d_out and delta in the backward pass
|
| 513 |
+
conv1d_out, delta = None, None
|
| 514 |
+
ctx.save_for_backward(xz, conv1d_weight, conv1d_bias, x_dbl, x_proj_weight,
|
| 515 |
+
delta_proj_weight, out_proj_weight, conv1d_out, delta,
|
| 516 |
+
A, A_b, B, C, D, delta_bias, scan_intermediates_f, scan_intermediates_b, out_f, out_b)
|
| 517 |
+
return F.linear(rearrange(out_z, "b d l -> b l d"), out_proj_weight, out_proj_bias)
|
| 518 |
+
|
| 519 |
+
@staticmethod
|
| 520 |
+
@custom_bwd
|
| 521 |
+
def backward(ctx, dout):
|
| 522 |
+
# dout: (batch, seqlen, dim)
|
| 523 |
+
(xz, conv1d_weight, conv1d_bias, x_dbl, x_proj_weight, delta_proj_weight, out_proj_weight,
|
| 524 |
+
conv1d_out, delta, A, A_b, B, C, D, delta_bias, scan_intermediates_f, scan_intermediates_b, out_f, out_b) = ctx.saved_tensors
|
| 525 |
+
L = xz.shape[-1]
|
| 526 |
+
delta_rank = delta_proj_weight.shape[1]
|
| 527 |
+
d_state = A.shape[-1] * (1 if not A.is_complex() else 2)
|
| 528 |
+
x, z = xz.chunk(2, dim=1)
|
| 529 |
+
if dout.stride(-1) != 1:
|
| 530 |
+
dout = dout.contiguous()
|
| 531 |
+
if ctx.checkpoint_lvl == 1:
|
| 532 |
+
conv1d_out = causal_conv1d_cuda.causal_conv1d_fwd(x, conv1d_weight, conv1d_bias, True)
|
| 533 |
+
delta = rearrange(delta_proj_weight @ x_dbl[:, :delta_rank].t(),
|
| 534 |
+
"d (b l) -> b d l", l = L)
|
| 535 |
+
# The kernel supports passing in a pre-allocated dz (e.g., in case we want to fuse the
|
| 536 |
+
# backward of selective_scan_cuda with the backward of chunk).
|
| 537 |
+
dxz = torch.empty_like(xz) # (batch, dim, seqlen)
|
| 538 |
+
dx, dz = dxz.chunk(2, dim=1)
|
| 539 |
+
dout = rearrange(dout, "b l e -> e (b l)")
|
| 540 |
+
dout_y = rearrange(out_proj_weight.t() @ dout, "d (b l) -> b d l", l=L)
|
| 541 |
+
dconv1d_out, ddelta, dA, dB, dC, dD, ddelta_bias, dz, out_z_f = selective_scan_cuda.bwd(
|
| 542 |
+
conv1d_out, delta, A, B, C, D, z, delta_bias, dout_y, scan_intermediates_f, out_f, dz,
|
| 543 |
+
ctx.delta_softplus,
|
| 544 |
+
True # option to recompute out_z
|
| 545 |
+
)
|
| 546 |
+
# flip one
|
| 547 |
+
dz_b = torch.empty_like(dz)
|
| 548 |
+
dconv1d_out_f_b, ddelta_f_b, dA_b, dB_f_b, dC_f_b, dD_b, ddelta_bias_b, dz_b, out_z_b = selective_scan_cuda.bwd(
|
| 549 |
+
conv1d_out.flip([-1]), delta.flip([-1]), A_b, B.flip([-1]), C.flip([-1]), D, z.flip([-1]), delta_bias, dout_y.flip([-1]), scan_intermediates_b, out_b, dz_b,
|
| 550 |
+
ctx.delta_softplus,
|
| 551 |
+
True # option to recompute out_z
|
| 552 |
+
)
|
| 553 |
+
|
| 554 |
+
dconv1d_out = dconv1d_out + dconv1d_out_f_b.flip([-1])
|
| 555 |
+
ddelta = ddelta + ddelta_f_b.flip([-1])
|
| 556 |
+
dB = dB + dB_f_b.flip([-1])
|
| 557 |
+
dC = dC + dC_f_b.flip([-1])
|
| 558 |
+
dD = dD + dD_b
|
| 559 |
+
ddelta_bias = ddelta_bias + ddelta_bias_b
|
| 560 |
+
dz = dz + dz_b.flip([-1])
|
| 561 |
+
out_z = out_z_f + out_z_b.flip([-1])
|
| 562 |
+
|
| 563 |
+
dout_proj_weight = torch.einsum("eB,dB->ed", dout, rearrange(out_z, "b d l -> d (b l)"))
|
| 564 |
+
dout_proj_bias = dout.sum(dim=(0, 1)) if not ctx.out_proj_bias_is_None else None
|
| 565 |
+
dD = dD if D is not None else None
|
| 566 |
+
dx_dbl = torch.empty_like(x_dbl)
|
| 567 |
+
dB_proj_bias = None
|
| 568 |
+
if ctx.is_variable_B:
|
| 569 |
+
if not A.is_complex():
|
| 570 |
+
dB = rearrange(dB, "b 1 dstate l -> (b l) dstate").contiguous()
|
| 571 |
+
else:
|
| 572 |
+
dB = rearrange(dB, "b 1 dstate (l two) -> (b l) (dstate two)", two=2).contiguous()
|
| 573 |
+
dB_proj_bias = dB.sum(0) if not ctx.B_proj_bias_is_None else None
|
| 574 |
+
dx_dbl[:, delta_rank:delta_rank + d_state] = dB # (bl d)
|
| 575 |
+
dB = None
|
| 576 |
+
dC_proj_bias = None
|
| 577 |
+
if ctx.is_variable_C:
|
| 578 |
+
if not A.is_complex():
|
| 579 |
+
dC = rearrange(dC, "b 1 dstate l -> (b l) dstate").contiguous()
|
| 580 |
+
else:
|
| 581 |
+
dC = rearrange(dC, "b 1 dstate (l two) -> (b l) (dstate two)", two=2).contiguous()
|
| 582 |
+
dC_proj_bias = dC.sum(0) if not ctx.C_proj_bias_is_None else None
|
| 583 |
+
dx_dbl[:, -d_state:] = dC # (bl d)
|
| 584 |
+
dC = None
|
| 585 |
+
ddelta = rearrange(ddelta, "b d l -> d (b l)")
|
| 586 |
+
ddelta_proj_weight = torch.einsum("dB,Br->dr", ddelta, x_dbl[:, :delta_rank])
|
| 587 |
+
dx_dbl[:, :delta_rank] = torch.einsum("dB,dr->Br", ddelta, delta_proj_weight)
|
| 588 |
+
dconv1d_out = rearrange(dconv1d_out, "b d l -> d (b l)")
|
| 589 |
+
dx_proj_weight = torch.einsum("Br,Bd->rd", dx_dbl, rearrange(conv1d_out, "b d l -> (b l) d"))
|
| 590 |
+
dconv1d_out = torch.addmm(dconv1d_out, x_proj_weight.t(), dx_dbl.t(), out=dconv1d_out)
|
| 591 |
+
dconv1d_out = rearrange(dconv1d_out, "d (b l) -> b d l", b=x.shape[0], l=x.shape[-1])
|
| 592 |
+
# The kernel supports passing in a pre-allocated dx (e.g., in case we want to fuse the
|
| 593 |
+
# backward of conv1d with the backward of chunk).
|
| 594 |
+
dx, dconv1d_weight, dconv1d_bias = causal_conv1d_cuda.causal_conv1d_bwd(
|
| 595 |
+
x, conv1d_weight, conv1d_bias, dconv1d_out, dx, True
|
| 596 |
+
)
|
| 597 |
+
dconv1d_bias = dconv1d_bias if conv1d_bias is not None else None
|
| 598 |
+
dconv1d_weight = rearrange(dconv1d_weight, "d w -> d 1 w")
|
| 599 |
+
return (dxz, dconv1d_weight, dconv1d_bias, dx_proj_weight, ddelta_proj_weight,
|
| 600 |
+
dout_proj_weight, dout_proj_bias,
|
| 601 |
+
dA, dA_b, dB, dC, dD,
|
| 602 |
+
ddelta_bias if delta_bias is not None else None,
|
| 603 |
+
dB_proj_bias, dC_proj_bias, None)
|
| 604 |
+
|
| 605 |
+
|
| 606 |
+
def mamba_inner_fn(
|
| 607 |
+
xz, conv1d_weight, conv1d_bias, x_proj_weight, delta_proj_weight,
|
| 608 |
+
out_proj_weight, out_proj_bias,
|
| 609 |
+
A, B=None, C=None, D=None, delta_bias=None, B_proj_bias=None,
|
| 610 |
+
C_proj_bias=None, delta_softplus=True
|
| 611 |
+
):
|
| 612 |
+
return MambaInnerFn.apply(xz, conv1d_weight, conv1d_bias, x_proj_weight, delta_proj_weight,
|
| 613 |
+
out_proj_weight, out_proj_bias,
|
| 614 |
+
A, B, C, D, delta_bias, B_proj_bias, C_proj_bias, delta_softplus)
|
| 615 |
+
|
| 616 |
+
def bimamba_inner_fn(
|
| 617 |
+
xz, conv1d_weight, conv1d_bias, x_proj_weight, delta_proj_weight,
|
| 618 |
+
out_proj_weight, out_proj_bias,
|
| 619 |
+
A, A_b, B=None, C=None, D=None, delta_bias=None, B_proj_bias=None,
|
| 620 |
+
C_proj_bias=None, delta_softplus=True
|
| 621 |
+
):
|
| 622 |
+
return BiMambaInnerFn.apply(xz, conv1d_weight, conv1d_bias, x_proj_weight, delta_proj_weight,
|
| 623 |
+
out_proj_weight, out_proj_bias,
|
| 624 |
+
A, A_b, B, C, D, delta_bias, B_proj_bias, C_proj_bias, delta_softplus)
|
| 625 |
+
|
| 626 |
+
|
| 627 |
+
def mamba_inner_fn_no_out_proj(
|
| 628 |
+
xz, conv1d_weight, conv1d_bias, x_proj_weight, delta_proj_weight,
|
| 629 |
+
A, B=None, C=None, D=None, delta_bias=None, B_proj_bias=None,
|
| 630 |
+
C_proj_bias=None, delta_softplus=True
|
| 631 |
+
):
|
| 632 |
+
return MambaInnerFnNoOutProj.apply(xz, conv1d_weight, conv1d_bias, x_proj_weight, delta_proj_weight,
|
| 633 |
+
A, B, C, D, delta_bias, B_proj_bias, C_proj_bias, delta_softplus)
|
| 634 |
+
|
| 635 |
+
|
| 636 |
+
def mamba_inner_ref(
|
| 637 |
+
xz, conv1d_weight, conv1d_bias, x_proj_weight, delta_proj_weight,
|
| 638 |
+
out_proj_weight, out_proj_bias,
|
| 639 |
+
A, B=None, C=None, D=None, delta_bias=None, B_proj_bias=None,
|
| 640 |
+
C_proj_bias=None, delta_softplus=True
|
| 641 |
+
):
|
| 642 |
+
L = xz.shape[-1]
|
| 643 |
+
delta_rank = delta_proj_weight.shape[1]
|
| 644 |
+
d_state = A.shape[-1] * (1 if not A.is_complex() else 2)
|
| 645 |
+
x, z = xz.chunk(2, dim=1)
|
| 646 |
+
x = causal_conv1d_fn(x, rearrange(conv1d_weight, "d 1 w -> d w"), conv1d_bias, "silu")
|
| 647 |
+
# We're being very careful here about the layout, to avoid extra transposes.
|
| 648 |
+
# We want delta to have d as the slowest moving dimension
|
| 649 |
+
# and L as the fastest moving dimension, since those are what the ssm_scan kernel expects.
|
| 650 |
+
x_dbl = F.linear(rearrange(x, 'b d l -> (b l) d'), x_proj_weight) # (bl d)
|
| 651 |
+
delta = delta_proj_weight @ x_dbl[:, :delta_rank].t()
|
| 652 |
+
delta = rearrange(delta, "d (b l) -> b d l", l=L)
|
| 653 |
+
if B is None: # variable B
|
| 654 |
+
B = x_dbl[:, delta_rank:delta_rank + d_state] # (bl d)
|
| 655 |
+
if B_proj_bias is not None:
|
| 656 |
+
B = B + B_proj_bias.to(dtype=B.dtype)
|
| 657 |
+
if not A.is_complex():
|
| 658 |
+
B = rearrange(B, "(b l) dstate -> b dstate l", l=L).contiguous()
|
| 659 |
+
else:
|
| 660 |
+
B = rearrange(B, "(b l) (dstate two) -> b dstate (l two)", l=L, two=2).contiguous()
|
| 661 |
+
if C is None: # variable B
|
| 662 |
+
C = x_dbl[:, -d_state:] # (bl d)
|
| 663 |
+
if C_proj_bias is not None:
|
| 664 |
+
C = C + C_proj_bias.to(dtype=C.dtype)
|
| 665 |
+
if not A.is_complex():
|
| 666 |
+
C = rearrange(C, "(b l) dstate -> b dstate l", l=L).contiguous()
|
| 667 |
+
else:
|
| 668 |
+
C = rearrange(C, "(b l) (dstate two) -> b dstate (l two)", l=L, two=2).contiguous()
|
| 669 |
+
y = selective_scan_fn(x, delta, A, B, C, D, z=z, delta_bias=delta_bias, delta_softplus=True)
|
| 670 |
+
return F.linear(rearrange(y, "b d l -> b l d"), out_proj_weight, out_proj_bias)
|
| 671 |
+
|
| 672 |
+
|
| 673 |
+
def bimamba_inner_ref(
|
| 674 |
+
xz, conv1d_weight, conv1d_bias, x_proj_weight, delta_proj_weight,
|
| 675 |
+
out_proj_weight, out_proj_bias,
|
| 676 |
+
A, A_b, B=None, C=None, D=None, delta_bias=None, B_proj_bias=None,
|
| 677 |
+
C_proj_bias=None, delta_softplus=True
|
| 678 |
+
):
|
| 679 |
+
L = xz.shape[-1]
|
| 680 |
+
delta_rank = delta_proj_weight.shape[1]
|
| 681 |
+
d_state = A.shape[-1] * (1 if not A.is_complex() else 2)
|
| 682 |
+
x, z = xz.chunk(2, dim=1)
|
| 683 |
+
x = causal_conv1d_fn(x, rearrange(conv1d_weight, "d 1 w -> d w"), conv1d_bias, "silu")
|
| 684 |
+
# We're being very careful here about the layout, to avoid extra transposes.
|
| 685 |
+
# We want delta to have d as the slowest moving dimension
|
| 686 |
+
# and L as the fastest moving dimension, since those are what the ssm_scan kernel expects.
|
| 687 |
+
x_dbl = F.linear(rearrange(x, 'b d l -> (b l) d'), x_proj_weight) # (bl d)
|
| 688 |
+
delta = delta_proj_weight @ x_dbl[:, :delta_rank].t()
|
| 689 |
+
delta = rearrange(delta, "d (b l) -> b d l", l=L)
|
| 690 |
+
if B is None: # variable B
|
| 691 |
+
B = x_dbl[:, delta_rank:delta_rank + d_state] # (bl d)
|
| 692 |
+
if B_proj_bias is not None:
|
| 693 |
+
B = B + B_proj_bias.to(dtype=B.dtype)
|
| 694 |
+
if not A.is_complex():
|
| 695 |
+
B = rearrange(B, "(b l) dstate -> b dstate l", l=L).contiguous()
|
| 696 |
+
else:
|
| 697 |
+
B = rearrange(B, "(b l) (dstate two) -> b dstate (l two)", l=L, two=2).contiguous()
|
| 698 |
+
if C is None: # variable B
|
| 699 |
+
C = x_dbl[:, -d_state:] # (bl d)
|
| 700 |
+
if C_proj_bias is not None:
|
| 701 |
+
C = C + C_proj_bias.to(dtype=C.dtype)
|
| 702 |
+
if not A.is_complex():
|
| 703 |
+
C = rearrange(C, "(b l) dstate -> b dstate l", l=L).contiguous()
|
| 704 |
+
else:
|
| 705 |
+
C = rearrange(C, "(b l) (dstate two) -> b dstate (l two)", l=L, two=2).contiguous()
|
| 706 |
+
y = selective_scan_fn(x, delta, A, B, C, D, z=z, delta_bias=delta_bias, delta_softplus=True)
|
| 707 |
+
y_b = selective_scan_fn(x.flip([-1]), delta.flip([-1]), A_b, B.flip([-1]), C.flip([-1]), D, z.flip([-1]), delta_bias, delta_softplus=True)
|
| 708 |
+
y = y + y_b.flip([-1])
|
| 709 |
+
return F.linear(rearrange(y, "b d l -> b l d"), out_proj_weight, out_proj_bias)
|
source_code/SegMamba/monai/handlers/clearml_handlers.py
ADDED
|
@@ -0,0 +1,178 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) MONAI Consortium
|
| 2 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 3 |
+
# you may not use this file except in compliance with the License.
|
| 4 |
+
# You may obtain a copy of the License at
|
| 5 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 6 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 7 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 8 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 9 |
+
# See the License for the specific language governing permissions and
|
| 10 |
+
# limitations under the License.
|
| 11 |
+
|
| 12 |
+
from __future__ import annotations
|
| 13 |
+
|
| 14 |
+
from typing import TYPE_CHECKING, Any, Mapping, Sequence
|
| 15 |
+
|
| 16 |
+
from monai.utils import optional_import
|
| 17 |
+
|
| 18 |
+
from .tensorboard_handlers import TensorBoardImageHandler, TensorBoardStatsHandler
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
class ClearMLHandler:
|
| 22 |
+
"""
|
| 23 |
+
Base class for the handlers to log everything to ClearML.
|
| 24 |
+
For more details of ClearML usage, please refer to:
|
| 25 |
+
https://clear.ml/docs/latest/docs/references/sdk/task
|
| 26 |
+
|
| 27 |
+
Usage example is available in the tutorial:
|
| 28 |
+
https://github.com/Project-MONAI/tutorials/blob/master/3d_segmentation/unet_segmentation_3d_ignite.ipynb.
|
| 29 |
+
|
| 30 |
+
"""
|
| 31 |
+
|
| 32 |
+
def __init__(
|
| 33 |
+
self,
|
| 34 |
+
project_name: str | None,
|
| 35 |
+
task_name: str | None,
|
| 36 |
+
output_uri: str | bool,
|
| 37 |
+
tags: Sequence[str] | None,
|
| 38 |
+
reuse_last_task_id: bool,
|
| 39 |
+
continue_last_task: bool,
|
| 40 |
+
auto_connect_frameworks: bool | Mapping[str, bool | str | list],
|
| 41 |
+
auto_connect_arg_parser: bool | Mapping[str, bool],
|
| 42 |
+
) -> None:
|
| 43 |
+
"""
|
| 44 |
+
Args:
|
| 45 |
+
project_name: ClearML project name, default to 'MONAI'.
|
| 46 |
+
task_name: ClearML task name, default to 'monai_experiment'.
|
| 47 |
+
output_uri: The default location for output models and other artifacts, default to 'True'.
|
| 48 |
+
tags: Add a list of tags (str) to the created Task, default to 'None'.
|
| 49 |
+
reuse_last_task_id: Force a new Task (experiment) with a previously used Task ID, default to 'True'.
|
| 50 |
+
continue_last_task: Continue the execution of a previously executed Task (experiment), default to 'False'.
|
| 51 |
+
auto_connect_frameworks: Automatically connect frameworks, default to 'True'.
|
| 52 |
+
auto_connect_arg_parser: Automatically connect an argparse object to the Task, default to 'True'.
|
| 53 |
+
|
| 54 |
+
"""
|
| 55 |
+
|
| 56 |
+
if TYPE_CHECKING:
|
| 57 |
+
import clearml
|
| 58 |
+
else:
|
| 59 |
+
clearml, _ = optional_import("clearml")
|
| 60 |
+
|
| 61 |
+
# Always check if the user didn't already add a `task.init`` in before
|
| 62 |
+
# if so, use that task, otherwise create a new one.
|
| 63 |
+
if clearml.Task.current_task():
|
| 64 |
+
self.clearml_task = clearml.Task.current_task()
|
| 65 |
+
else:
|
| 66 |
+
self.clearml_task = clearml.Task.init(
|
| 67 |
+
project_name=project_name,
|
| 68 |
+
task_name=task_name,
|
| 69 |
+
output_uri=output_uri,
|
| 70 |
+
tags=tags,
|
| 71 |
+
reuse_last_task_id=reuse_last_task_id,
|
| 72 |
+
continue_last_task=continue_last_task,
|
| 73 |
+
auto_connect_frameworks=auto_connect_frameworks,
|
| 74 |
+
auto_connect_arg_parser=auto_connect_arg_parser,
|
| 75 |
+
)
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
class ClearMLStatsHandler(ClearMLHandler, TensorBoardStatsHandler):
|
| 79 |
+
"""
|
| 80 |
+
|
| 81 |
+
Class to write tensorboard stats by inheriting TensorBoardStatsHandler class.
|
| 82 |
+
Everything from Tensorboard is logged automatically to ClearML.
|
| 83 |
+
|
| 84 |
+
Usage example is available in the tutorial:
|
| 85 |
+
https://github.com/Project-MONAI/tutorials/blob/master/3d_segmentation/unet_segmentation_3d_ignite.ipynb.
|
| 86 |
+
|
| 87 |
+
"""
|
| 88 |
+
|
| 89 |
+
def __init__(
|
| 90 |
+
self,
|
| 91 |
+
project_name: str | None = "MONAI",
|
| 92 |
+
task_name: str | None = "monai_experiment",
|
| 93 |
+
output_uri: str | bool = True,
|
| 94 |
+
tags: Sequence[str] | None = None,
|
| 95 |
+
reuse_last_task_id: bool = True,
|
| 96 |
+
continue_last_task: bool = False,
|
| 97 |
+
auto_connect_frameworks: bool | Mapping[str, bool | str | list] = True,
|
| 98 |
+
auto_connect_arg_parser: bool | Mapping[str, bool] = True,
|
| 99 |
+
*args: Any,
|
| 100 |
+
**kwargs: Any,
|
| 101 |
+
) -> None:
|
| 102 |
+
"""
|
| 103 |
+
Args:
|
| 104 |
+
project_name: ClearML project name, default to 'MONAI'.
|
| 105 |
+
task_name: ClearML task name, default to 'monai_experiment'.
|
| 106 |
+
output_uri: The default location for output models and other artifacts, default to 'True'.
|
| 107 |
+
tags: Add a list of tags (str) to the created Task, default to 'None'.
|
| 108 |
+
reuse_last_task_id: Force a new Task (experiment) with a previously used Task ID, default to 'True'.
|
| 109 |
+
continue_last_task: Continue the execution of a previously executed Task (experiment), default to 'False'.
|
| 110 |
+
auto_connect_frameworks: Automatically connect frameworks, default to 'True'.
|
| 111 |
+
auto_connect_arg_parser: Automatically connect an argparse object to the Task, default to 'True'.
|
| 112 |
+
|
| 113 |
+
"""
|
| 114 |
+
|
| 115 |
+
ClearMLHandler.__init__(
|
| 116 |
+
self,
|
| 117 |
+
project_name=project_name,
|
| 118 |
+
task_name=task_name,
|
| 119 |
+
output_uri=output_uri,
|
| 120 |
+
tags=tags,
|
| 121 |
+
reuse_last_task_id=reuse_last_task_id,
|
| 122 |
+
continue_last_task=continue_last_task,
|
| 123 |
+
auto_connect_frameworks=auto_connect_frameworks,
|
| 124 |
+
auto_connect_arg_parser=auto_connect_arg_parser,
|
| 125 |
+
)
|
| 126 |
+
TensorBoardStatsHandler.__init__(self, *args, **kwargs)
|
| 127 |
+
|
| 128 |
+
|
| 129 |
+
class ClearMLImageHandler(ClearMLHandler, TensorBoardImageHandler):
|
| 130 |
+
"""
|
| 131 |
+
|
| 132 |
+
This class inherits all functionality from TensorBoardImageHandler class.
|
| 133 |
+
Everything from Tensorboard is logged automatically to ClearML.
|
| 134 |
+
|
| 135 |
+
Usage example is available in the tutorial:
|
| 136 |
+
https://github.com/Project-MONAI/tutorials/blob/master/3d_segmentation/unet_segmentation_3d_ignite.ipynb.
|
| 137 |
+
|
| 138 |
+
"""
|
| 139 |
+
|
| 140 |
+
def __init__(
|
| 141 |
+
self,
|
| 142 |
+
project_name: str | None = "MONAI",
|
| 143 |
+
task_name: str | None = "monai_experiment",
|
| 144 |
+
output_uri: str | bool = True,
|
| 145 |
+
tags: Sequence[str] | None = None,
|
| 146 |
+
reuse_last_task_id: bool = True,
|
| 147 |
+
continue_last_task: bool = False,
|
| 148 |
+
auto_connect_frameworks: bool | Mapping[str, bool | str | list] = True,
|
| 149 |
+
auto_connect_arg_parser: bool | Mapping[str, bool] = True,
|
| 150 |
+
*args: Any,
|
| 151 |
+
**kwargs: Any,
|
| 152 |
+
) -> None:
|
| 153 |
+
"""
|
| 154 |
+
Args:
|
| 155 |
+
project_name: ClearML project name, default to 'MONAI'.
|
| 156 |
+
task_name: ClearML task name, default to 'monai_experiment'.
|
| 157 |
+
output_uri: The default location for output models and other artifacts, default to 'True'.
|
| 158 |
+
tags: Add a list of tags (str) to the created Task, default to 'None'.
|
| 159 |
+
reuse_last_task_id: Force a new Task (experiment) with a previously used Task ID, default to 'True'.
|
| 160 |
+
continue_last_task: Continue the execution of a previously executed Task (experiment), default to 'False'.
|
| 161 |
+
auto_connect_frameworks: Automatically connect frameworks, default to 'True'.
|
| 162 |
+
auto_connect_arg_parser: Automatically connect an argparse object to the Task, default to 'True'.
|
| 163 |
+
|
| 164 |
+
"""
|
| 165 |
+
|
| 166 |
+
ClearMLHandler.__init__(
|
| 167 |
+
self,
|
| 168 |
+
project_name=project_name,
|
| 169 |
+
task_name=task_name,
|
| 170 |
+
output_uri=output_uri,
|
| 171 |
+
tags=tags,
|
| 172 |
+
reuse_last_task_id=reuse_last_task_id,
|
| 173 |
+
continue_last_task=continue_last_task,
|
| 174 |
+
auto_connect_frameworks=auto_connect_frameworks,
|
| 175 |
+
auto_connect_arg_parser=auto_connect_arg_parser,
|
| 176 |
+
)
|
| 177 |
+
|
| 178 |
+
TensorBoardImageHandler.__init__(self, *args, **kwargs)
|
source_code/SegMamba/monai/handlers/metric_logger.py
ADDED
|
@@ -0,0 +1,137 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) MONAI Consortium
|
| 2 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 3 |
+
# you may not use this file except in compliance with the License.
|
| 4 |
+
# You may obtain a copy of the License at
|
| 5 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 6 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 7 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 8 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 9 |
+
# See the License for the specific language governing permissions and
|
| 10 |
+
# limitations under the License.
|
| 11 |
+
|
| 12 |
+
from __future__ import annotations
|
| 13 |
+
|
| 14 |
+
from collections import defaultdict
|
| 15 |
+
from collections.abc import Callable, Mapping, Sequence
|
| 16 |
+
from enum import Enum
|
| 17 |
+
from threading import RLock
|
| 18 |
+
from typing import TYPE_CHECKING, Any
|
| 19 |
+
|
| 20 |
+
from monai.config import IgniteInfo
|
| 21 |
+
from monai.utils import min_version, optional_import
|
| 22 |
+
from monai.utils.enums import CommonKeys
|
| 23 |
+
|
| 24 |
+
Events, _ = optional_import("ignite.engine", IgniteInfo.OPT_IMPORT_VERSION, min_version, "Events")
|
| 25 |
+
if TYPE_CHECKING:
|
| 26 |
+
from ignite.engine import Engine
|
| 27 |
+
else:
|
| 28 |
+
Engine, _ = optional_import(
|
| 29 |
+
"ignite.engine", IgniteInfo.OPT_IMPORT_VERSION, min_version, "Engine", as_type="decorator"
|
| 30 |
+
)
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
def _get_loss_from_output(output: Sequence[Mapping[str, Any]], loss_key: str = CommonKeys.LOSS) -> Any:
|
| 34 |
+
return output[0][loss_key]
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
class MetricLoggerKeys(Enum):
|
| 38 |
+
METRICS = "Metrics"
|
| 39 |
+
LOSS = "Loss"
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
class MetricLogger:
|
| 43 |
+
"""
|
| 44 |
+
Collect per-iteration metrics and loss value from the attached trainer. This will also collect metric values from
|
| 45 |
+
a given evaluator object which is expected to perform evaluation at the end of training epochs. This class is
|
| 46 |
+
useful for collecting loss and metric values in one place for storage with checkpoint savers (`state_dict` and
|
| 47 |
+
`load_state_dict` methods provided as expected by Pytorch and Ignite) and for graphing during training.
|
| 48 |
+
|
| 49 |
+
Example::
|
| 50 |
+
# construct an evaluator saving mean dice metric values in the key "val_mean_dice"
|
| 51 |
+
evaluator = SupervisedEvaluator(..., key_val_metric={"val_mean_dice": MeanDice(...)})
|
| 52 |
+
|
| 53 |
+
# construct the logger and associate with evaluator to extract metric values from
|
| 54 |
+
logger = MetricLogger(evaluator=evaluator)
|
| 55 |
+
|
| 56 |
+
# construct the trainer with the logger passed in as a handler so that it logs loss values
|
| 57 |
+
trainer = SupervisedTrainer(..., train_handlers=[logger, ValidationHandler(1, evaluator)])
|
| 58 |
+
|
| 59 |
+
# run training, logger.loss will be a list of (iteration, loss) values, logger.metrics a dict with key
|
| 60 |
+
# "val_mean_dice" storing a list of (iteration, metric) values
|
| 61 |
+
trainer.run()
|
| 62 |
+
|
| 63 |
+
Args:
|
| 64 |
+
loss_transform: Converts the `output` value from the trainer's state into a loss value
|
| 65 |
+
`engine.state` and `loss_transform` inherit from the ignite concept:
|
| 66 |
+
https://pytorch.org/ignite/concepts.html#state, explanation and usage example are in the tutorial:
|
| 67 |
+
https://github.com/Project-MONAI/tutorials/blob/master/modules/batch_output_transform.ipynb.
|
| 68 |
+
metric_transform: Converts the metric value coming from the trainer/evaluator's state into a storable value
|
| 69 |
+
evaluator: Optional evaluator to consume metric results from at the end of its evaluation run
|
| 70 |
+
"""
|
| 71 |
+
|
| 72 |
+
def __init__(
|
| 73 |
+
self,
|
| 74 |
+
loss_transform: Callable = _get_loss_from_output,
|
| 75 |
+
metric_transform: Callable = lambda x: x,
|
| 76 |
+
evaluator: Engine | None = None,
|
| 77 |
+
) -> None:
|
| 78 |
+
self.loss_transform = loss_transform
|
| 79 |
+
self.metric_transform = metric_transform
|
| 80 |
+
self.loss: list = []
|
| 81 |
+
self.metrics: defaultdict = defaultdict(list)
|
| 82 |
+
self.iteration = 0
|
| 83 |
+
self.lock = RLock()
|
| 84 |
+
|
| 85 |
+
if evaluator is not None:
|
| 86 |
+
self.attach_evaluator(evaluator)
|
| 87 |
+
|
| 88 |
+
def attach(self, engine: Engine) -> None:
|
| 89 |
+
"""
|
| 90 |
+
Args:
|
| 91 |
+
engine: Ignite Engine, it can be a trainer, validator or evaluator.
|
| 92 |
+
"""
|
| 93 |
+
engine.add_event_handler(Events.ITERATION_COMPLETED, self)
|
| 94 |
+
|
| 95 |
+
def attach_evaluator(self, evaluator: Engine) -> None:
|
| 96 |
+
"""
|
| 97 |
+
Attach event handlers to the given evaluator to log metric values from it.
|
| 98 |
+
|
| 99 |
+
Args:
|
| 100 |
+
evaluator: Ignite Engine implementing network evaluation
|
| 101 |
+
"""
|
| 102 |
+
evaluator.add_event_handler(Events.COMPLETED, self.log_metrics)
|
| 103 |
+
|
| 104 |
+
def __call__(self, engine: Engine) -> None:
|
| 105 |
+
"""
|
| 106 |
+
Args:
|
| 107 |
+
engine: Ignite Engine, it can be a trainer, validator or evaluator.
|
| 108 |
+
"""
|
| 109 |
+
with self.lock:
|
| 110 |
+
self.iteration = engine.state.iteration
|
| 111 |
+
lossval = self.loss_transform(engine.state.output)
|
| 112 |
+
|
| 113 |
+
self.loss.append((self.iteration, lossval))
|
| 114 |
+
self.log_metrics(engine)
|
| 115 |
+
|
| 116 |
+
def log_metrics(self, engine: Engine) -> None:
|
| 117 |
+
"""
|
| 118 |
+
Log metrics from the given Engine's state member.
|
| 119 |
+
|
| 120 |
+
Args:
|
| 121 |
+
engine: Ignite Engine to log from
|
| 122 |
+
"""
|
| 123 |
+
with self.lock:
|
| 124 |
+
for m, v in engine.state.metrics.items():
|
| 125 |
+
v = self.metric_transform(v)
|
| 126 |
+
self.metrics[m].append((self.iteration, v))
|
| 127 |
+
|
| 128 |
+
def state_dict(self):
|
| 129 |
+
return {MetricLoggerKeys.LOSS: self.loss, MetricLoggerKeys.METRICS: self.metrics}
|
| 130 |
+
|
| 131 |
+
def load_state_dict(self, state_dict):
|
| 132 |
+
self.loss[:] = state_dict[MetricLoggerKeys.LOSS]
|
| 133 |
+
self.metrics.clear()
|
| 134 |
+
self.metrics.update(state_dict[MetricLoggerKeys.METRICS])
|
| 135 |
+
|
| 136 |
+
|
| 137 |
+
metriclogger = MetricLogger
|
source_code/SegMamba/monai/handlers/metrics_reloaded_handler.py
ADDED
|
@@ -0,0 +1,115 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) MONAI Consortium
|
| 2 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 3 |
+
# you may not use this file except in compliance with the License.
|
| 4 |
+
# You may obtain a copy of the License at
|
| 5 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 6 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 7 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 8 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 9 |
+
# See the License for the specific language governing permissions and
|
| 10 |
+
# limitations under the License.
|
| 11 |
+
|
| 12 |
+
from __future__ import annotations
|
| 13 |
+
|
| 14 |
+
from collections.abc import Callable
|
| 15 |
+
|
| 16 |
+
from monai.handlers.ignite_metric import IgniteMetricHandler
|
| 17 |
+
from monai.metrics import MetricsReloadedBinary, MetricsReloadedCategorical
|
| 18 |
+
from monai.utils.enums import MetricReduction
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
class MetricsReloadedBinaryHandler(IgniteMetricHandler):
|
| 22 |
+
"""
|
| 23 |
+
Handler of MetricsReloadedBinary, which wraps the binary pairwise metrics of MetricsReloaded.
|
| 24 |
+
"""
|
| 25 |
+
|
| 26 |
+
def __init__(
|
| 27 |
+
self,
|
| 28 |
+
metric_name: str,
|
| 29 |
+
include_background: bool = True,
|
| 30 |
+
reduction: MetricReduction | str = MetricReduction.MEAN,
|
| 31 |
+
get_not_nans: bool = False,
|
| 32 |
+
output_transform: Callable = lambda x: x,
|
| 33 |
+
save_details: bool = True,
|
| 34 |
+
) -> None:
|
| 35 |
+
"""
|
| 36 |
+
|
| 37 |
+
Args:
|
| 38 |
+
metric_name: Name of a binary metric from the MetricsReloaded package.
|
| 39 |
+
include_background: whether to include computation on the first channel of
|
| 40 |
+
the predicted output. Defaults to ``True``.
|
| 41 |
+
reduction: define mode of reduction to the metrics, will only apply reduction on `not-nan` values,
|
| 42 |
+
available reduction modes: {``"none"``, ``"mean"``, ``"sum"``, ``"mean_batch"``, ``"sum_batch"``,
|
| 43 |
+
``"mean_channel"``, ``"sum_channel"``}, default to ``"mean"``. if "none", will not do reduction.
|
| 44 |
+
get_not_nans: whether to return the `not_nans` count, if True, aggregate() returns (metric, not_nans).
|
| 45 |
+
Here `not_nans` count the number of not nans for the metric,
|
| 46 |
+
thus its shape equals to the shape of the metric.
|
| 47 |
+
output_transform: callable to extract `y_pred` and `y` from `ignite.engine.state.output` then
|
| 48 |
+
construct `(y_pred, y)` pair, where `y_pred` and `y` can be `batch-first` Tensors or
|
| 49 |
+
lists of `channel-first` Tensors. the form of `(y_pred, y)` is required by the `update()`.
|
| 50 |
+
`engine.state` and `output_transform` inherit from the ignite concept:
|
| 51 |
+
https://pytorch.org/ignite/concepts.html#state, explanation and usage example are in the tutorial:
|
| 52 |
+
https://github.com/Project-MONAI/tutorials/blob/master/modules/batch_output_transform.ipynb.
|
| 53 |
+
save_details: whether to save metric computation details per image, for example: TP/TN/FP/FN of every image.
|
| 54 |
+
default to True, will save to `engine.state.metric_details` dict with the metric name as key.
|
| 55 |
+
|
| 56 |
+
See also:
|
| 57 |
+
:py:meth:`monai.metrics.wrapper`
|
| 58 |
+
"""
|
| 59 |
+
metric_fn = MetricsReloadedBinary(
|
| 60 |
+
metric_name=metric_name,
|
| 61 |
+
include_background=include_background,
|
| 62 |
+
reduction=reduction,
|
| 63 |
+
get_not_nans=get_not_nans,
|
| 64 |
+
)
|
| 65 |
+
super().__init__(metric_fn=metric_fn, output_transform=output_transform, save_details=save_details)
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
class MetricsReloadedCategoricalHandler(IgniteMetricHandler):
|
| 69 |
+
"""
|
| 70 |
+
Handler of MetricsReloadedCategorical, which wraps the categorical pairwise metrics of MetricsReloaded.
|
| 71 |
+
"""
|
| 72 |
+
|
| 73 |
+
def __init__(
|
| 74 |
+
self,
|
| 75 |
+
metric_name: str,
|
| 76 |
+
include_background: bool = True,
|
| 77 |
+
reduction: MetricReduction | str = MetricReduction.MEAN,
|
| 78 |
+
get_not_nans: bool = False,
|
| 79 |
+
smooth_dr: float = 1e-5,
|
| 80 |
+
output_transform: Callable = lambda x: x,
|
| 81 |
+
save_details: bool = True,
|
| 82 |
+
) -> None:
|
| 83 |
+
"""
|
| 84 |
+
|
| 85 |
+
Args:
|
| 86 |
+
metric_name: Name of a categorical metric from the MetricsReloaded package.
|
| 87 |
+
include_background: whether to include computation on the first channel of
|
| 88 |
+
the predicted output. Defaults to ``True``.
|
| 89 |
+
reduction: define mode of reduction to the metrics, will only apply reduction on `not-nan` values,
|
| 90 |
+
available reduction modes: {``"none"``, ``"mean"``, ``"sum"``, ``"mean_batch"``, ``"sum_batch"``,
|
| 91 |
+
``"mean_channel"``, ``"sum_channel"``}, default to ``"mean"``. if "none", will not do reduction.
|
| 92 |
+
get_not_nans: whether to return the `not_nans` count, if True, aggregate() returns (metric, not_nans).
|
| 93 |
+
Here `not_nans` count the number of not nans for the metric,
|
| 94 |
+
thus its shape equals to the shape of the metric.
|
| 95 |
+
smooth_dr: a small constant added to the denominator to avoid nan. OBS: should be greater than zero.
|
| 96 |
+
output_transform: callable to extract `y_pred` and `y` from `ignite.engine.state.output` then
|
| 97 |
+
construct `(y_pred, y)` pair, where `y_pred` and `y` can be `batch-first` Tensors or
|
| 98 |
+
lists of `channel-first` Tensors. the form of `(y_pred, y)` is required by the `update()`.
|
| 99 |
+
`engine.state` and `output_transform` inherit from the ignite concept:
|
| 100 |
+
https://pytorch.org/ignite/concepts.html#state, explanation and usage example are in the tutorial:
|
| 101 |
+
https://github.com/Project-MONAI/tutorials/blob/master/modules/batch_output_transform.ipynb.
|
| 102 |
+
save_details: whether to save metric computation details per image, for example: TP/TN/FP/FN of every image.
|
| 103 |
+
default to True, will save to `engine.state.metric_details` dict with the metric name as key.
|
| 104 |
+
|
| 105 |
+
See also:
|
| 106 |
+
:py:meth:`monai.metrics.wrapper`
|
| 107 |
+
"""
|
| 108 |
+
metric_fn = MetricsReloadedCategorical(
|
| 109 |
+
metric_name=metric_name,
|
| 110 |
+
include_background=include_background,
|
| 111 |
+
reduction=reduction,
|
| 112 |
+
get_not_nans=get_not_nans,
|
| 113 |
+
smooth_dr=smooth_dr,
|
| 114 |
+
)
|
| 115 |
+
super().__init__(metric_fn=metric_fn, output_transform=output_transform, save_details=save_details)
|
source_code/SegMamba/monai/handlers/nvtx_handlers.py
ADDED
|
@@ -0,0 +1,182 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) MONAI Consortium
|
| 2 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 3 |
+
# you may not use this file except in compliance with the License.
|
| 4 |
+
# You may obtain a copy of the License at
|
| 5 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 6 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 7 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 8 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 9 |
+
# See the License for the specific language governing permissions and
|
| 10 |
+
# limitations under the License.
|
| 11 |
+
"""
|
| 12 |
+
Wrapper around NVIDIA Tools Extension for profiling MONAI ignite workflow
|
| 13 |
+
"""
|
| 14 |
+
|
| 15 |
+
from __future__ import annotations
|
| 16 |
+
|
| 17 |
+
from typing import TYPE_CHECKING
|
| 18 |
+
|
| 19 |
+
from monai.config import IgniteInfo
|
| 20 |
+
from monai.utils import ensure_tuple, min_version, optional_import
|
| 21 |
+
|
| 22 |
+
_nvtx, _ = optional_import("torch._C._nvtx", descriptor="NVTX is not installed. Are you sure you have a CUDA build?")
|
| 23 |
+
if TYPE_CHECKING:
|
| 24 |
+
from ignite.engine import Engine, Events
|
| 25 |
+
else:
|
| 26 |
+
Engine, _ = optional_import(
|
| 27 |
+
"ignite.engine", IgniteInfo.OPT_IMPORT_VERSION, min_version, "Engine", as_type="decorator"
|
| 28 |
+
)
|
| 29 |
+
Events, _ = optional_import(
|
| 30 |
+
"ignite.engine", IgniteInfo.OPT_IMPORT_VERSION, min_version, "Events", as_type="decorator"
|
| 31 |
+
)
|
| 32 |
+
|
| 33 |
+
__all__ = ["RangeHandler", "RangePushHandler", "RangePopHandler", "MarkHandler"]
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
class RangeHandler:
|
| 37 |
+
"""
|
| 38 |
+
Attach a NVTX range to a pair of Ignite events.
|
| 39 |
+
It pushes an NVTX range at the first event and pops it at the second event.
|
| 40 |
+
Stores zero-based depth of the range that is started.
|
| 41 |
+
|
| 42 |
+
Args:
|
| 43 |
+
events: a string, pair of Ignite events, pair of Ignite event literals, or pair of Ignite events and literals.
|
| 44 |
+
If a single string is provided, it should describe the base name of a pair of default Ignite events
|
| 45 |
+
with _STARTED and _COMPLETED postfix (like "EPOCH" for Events.EPOCH_STARTED and Events.EPOCH_COMPLETED).
|
| 46 |
+
The accepted events are: BATCH, ITERATION, EPOCH, and ENGINE.
|
| 47 |
+
If pair of literals, each should be the literal equivalent of an Ignite event, fo instance:
|
| 48 |
+
("EPOCH_STARTED" and "EPOCH_COMPLETED").
|
| 49 |
+
One can combine events and literals, like (Events.EPOCH_STARTED and "EPOCH_COMPLETED").
|
| 50 |
+
For the complete list of Events,
|
| 51 |
+
check https://pytorch.org/ignite/generated/ignite.engine.events.Events.html.
|
| 52 |
+
|
| 53 |
+
msg: ASCII message to associate with range.
|
| 54 |
+
If not provided, the name of first event will be assigned to the NVTX range.
|
| 55 |
+
"""
|
| 56 |
+
|
| 57 |
+
def __init__(self, events: str | tuple[str | Events, str | Events], msg: str | None = None) -> None:
|
| 58 |
+
self.events = self.resolve_events(events)
|
| 59 |
+
if msg is None:
|
| 60 |
+
if isinstance(events, str):
|
| 61 |
+
# assign the prefix of the events
|
| 62 |
+
msg = events
|
| 63 |
+
else:
|
| 64 |
+
# combine events' names
|
| 65 |
+
msg = "/".join([e.name for e in self.events])
|
| 66 |
+
self.msg = msg
|
| 67 |
+
self.depth = None
|
| 68 |
+
|
| 69 |
+
def resolve_events(self, events: str | tuple) -> tuple[Events, Events]:
|
| 70 |
+
"""
|
| 71 |
+
Resolve the input events to create a pair of Ignite events
|
| 72 |
+
"""
|
| 73 |
+
events = ensure_tuple(events)
|
| 74 |
+
if len(events) == 1:
|
| 75 |
+
return self.create_paired_events(events[0])
|
| 76 |
+
if len(events) == 2:
|
| 77 |
+
return self.get_event(events[0]), self.get_event(events[1])
|
| 78 |
+
raise ValueError(f"Exactly two Ignite events should be provided [received {len(events)}].")
|
| 79 |
+
|
| 80 |
+
def create_paired_events(self, event: str) -> tuple[Events, Events]:
|
| 81 |
+
"""
|
| 82 |
+
Create pair of Ignite events from a event prefix name
|
| 83 |
+
"""
|
| 84 |
+
event = event.upper()
|
| 85 |
+
event_prefix = {"": "", "ENGINE": "", "EPOCH": "EPOCH_", "ITERATION": "ITERATION_", "BATCH": "GET_BATCH_"}
|
| 86 |
+
return self.get_event(event_prefix[event] + "STARTED"), self.get_event(event_prefix[event] + "COMPLETED")
|
| 87 |
+
|
| 88 |
+
def get_event(self, event: str | Events) -> Events:
|
| 89 |
+
return Events[event.upper()] if isinstance(event, str) else event
|
| 90 |
+
|
| 91 |
+
def attach(self, engine: Engine) -> None:
|
| 92 |
+
"""
|
| 93 |
+
Attach an NVTX Range to specific Ignite events
|
| 94 |
+
Args:
|
| 95 |
+
engine: Ignite Engine, it can be a trainer, validator or evaluator.
|
| 96 |
+
"""
|
| 97 |
+
engine.add_event_handler(self.events[0], self.range_push)
|
| 98 |
+
engine.add_event_handler(self.events[1], self.range_pop)
|
| 99 |
+
|
| 100 |
+
def range_push(self):
|
| 101 |
+
self.depth = _nvtx.rangePushA(self.msg)
|
| 102 |
+
|
| 103 |
+
def range_pop(self):
|
| 104 |
+
_nvtx.rangePop()
|
| 105 |
+
|
| 106 |
+
|
| 107 |
+
class RangePushHandler:
|
| 108 |
+
"""
|
| 109 |
+
At a specific event, pushes a range onto a stack of nested range span.
|
| 110 |
+
Stores zero-based depth of the range that is started.
|
| 111 |
+
|
| 112 |
+
Args:
|
| 113 |
+
msg: ASCII message to associate with range
|
| 114 |
+
"""
|
| 115 |
+
|
| 116 |
+
def __init__(self, event: str | Events, msg: str | None = None) -> None:
|
| 117 |
+
self.event = Events[event.upper()] if isinstance(event, str) else event
|
| 118 |
+
if msg is None:
|
| 119 |
+
msg = self.event.name
|
| 120 |
+
self.msg = msg
|
| 121 |
+
self.depth = None
|
| 122 |
+
|
| 123 |
+
def attach(self, engine: Engine) -> None:
|
| 124 |
+
"""
|
| 125 |
+
Push an NVTX range at a specific Ignite event
|
| 126 |
+
Args:
|
| 127 |
+
engine: Ignite Engine, it can be a trainer, validator or evaluator.
|
| 128 |
+
"""
|
| 129 |
+
engine.add_event_handler(self.event, self.range_push)
|
| 130 |
+
|
| 131 |
+
def range_push(self):
|
| 132 |
+
self.depth = _nvtx.rangePushA(self.msg)
|
| 133 |
+
|
| 134 |
+
|
| 135 |
+
class RangePopHandler:
|
| 136 |
+
"""
|
| 137 |
+
At a specific event, pop a previously pushed range.
|
| 138 |
+
Stores zero-based depth of the range that is started.
|
| 139 |
+
|
| 140 |
+
Args:
|
| 141 |
+
msg: ASCII message to associate with range
|
| 142 |
+
"""
|
| 143 |
+
|
| 144 |
+
def __init__(self, event: str | Events) -> None:
|
| 145 |
+
self.event = Events[event.upper()] if isinstance(event, str) else event
|
| 146 |
+
|
| 147 |
+
def attach(self, engine: Engine) -> None:
|
| 148 |
+
"""
|
| 149 |
+
Pop an NVTX range at a specific Ignite event
|
| 150 |
+
Args:
|
| 151 |
+
engine: Ignite Engine, it can be a trainer, validator or evaluator.
|
| 152 |
+
"""
|
| 153 |
+
engine.add_event_handler(self.event, self.range_pop)
|
| 154 |
+
|
| 155 |
+
def range_pop(self):
|
| 156 |
+
_nvtx.rangePop()
|
| 157 |
+
|
| 158 |
+
|
| 159 |
+
class MarkHandler:
|
| 160 |
+
"""
|
| 161 |
+
Mark an instantaneous event that occurred at some point.
|
| 162 |
+
|
| 163 |
+
Args:
|
| 164 |
+
msg: ASCII message to associate with range
|
| 165 |
+
"""
|
| 166 |
+
|
| 167 |
+
def __init__(self, event: str | Events, msg: str | None = None) -> None:
|
| 168 |
+
self.event = Events[event.upper()] if isinstance(event, str) else event
|
| 169 |
+
if msg is None:
|
| 170 |
+
msg = self.event.name
|
| 171 |
+
self.msg = msg
|
| 172 |
+
|
| 173 |
+
def attach(self, engine: Engine) -> None:
|
| 174 |
+
"""
|
| 175 |
+
Add an NVTX mark to a specific Ignite event
|
| 176 |
+
Args:
|
| 177 |
+
engine: Ignite Engine, it can be a trainer, validator or evaluator.
|
| 178 |
+
"""
|
| 179 |
+
engine.add_event_handler(self.event, self.mark)
|
| 180 |
+
|
| 181 |
+
def mark(self):
|
| 182 |
+
_nvtx.markA(self.msg)
|
source_code/SegMamba/monai/handlers/panoptic_quality.py
ADDED
|
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) MONAI Consortium
|
| 2 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 3 |
+
# you may not use this file except in compliance with the License.
|
| 4 |
+
# You may obtain a copy of the License at
|
| 5 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 6 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 7 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 8 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 9 |
+
# See the License for the specific language governing permissions and
|
| 10 |
+
# limitations under the License.
|
| 11 |
+
|
| 12 |
+
from __future__ import annotations
|
| 13 |
+
|
| 14 |
+
from collections.abc import Callable
|
| 15 |
+
|
| 16 |
+
from monai.handlers.ignite_metric import IgniteMetricHandler
|
| 17 |
+
from monai.metrics import PanopticQualityMetric
|
| 18 |
+
from monai.utils import MetricReduction
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
class PanopticQuality(IgniteMetricHandler):
|
| 22 |
+
"""
|
| 23 |
+
Computes Panoptic quality from full size Tensor and collects average over batch, class-channels, iterations.
|
| 24 |
+
"""
|
| 25 |
+
|
| 26 |
+
def __init__(
|
| 27 |
+
self,
|
| 28 |
+
num_classes: int,
|
| 29 |
+
metric_name: str = "pq",
|
| 30 |
+
reduction: MetricReduction | str = MetricReduction.MEAN_BATCH,
|
| 31 |
+
match_iou_threshold: float = 0.5,
|
| 32 |
+
smooth_numerator: float = 1e-6,
|
| 33 |
+
output_transform: Callable = lambda x: x,
|
| 34 |
+
save_details: bool = True,
|
| 35 |
+
) -> None:
|
| 36 |
+
"""
|
| 37 |
+
|
| 38 |
+
Args:
|
| 39 |
+
num_classes: number of classes. The number should not count the background.
|
| 40 |
+
metric_name: output metric. The value can be "pq", "sq" or "rq".
|
| 41 |
+
reduction: define mode of reduction to the metrics, will only apply reduction on `not-nan` values,
|
| 42 |
+
available reduction modes: {``"none"``, ``"mean"``, ``"sum"``, ``"mean_batch"``, ``"sum_batch"``,
|
| 43 |
+
``"mean_channel"``, ``"sum_channel"``}, default to `self.reduction`. if "none", will not do reduction.
|
| 44 |
+
match_iou_threshold: IOU threshold to determine the pairing between `y_pred` and `y`. Usually,
|
| 45 |
+
it should >= 0.5, the pairing between instances of `y_pred` and `y` are identical.
|
| 46 |
+
If set `match_iou_threshold` < 0.5, this function uses Munkres assignment to find the
|
| 47 |
+
maximal amount of unique pairing.
|
| 48 |
+
smooth_numerator: a small constant added to the numerator to avoid zero.
|
| 49 |
+
output_transform: callable to extract `y_pred` and `y` from `ignite.engine.state.output` then
|
| 50 |
+
construct `(y_pred, y)` pair, where `y_pred` and `y` can be `batch-first` Tensors or
|
| 51 |
+
lists of `channel-first` Tensors. the form of `(y_pred, y)` is required by the `update()`.
|
| 52 |
+
`engine.state` and `output_transform` inherit from the ignite concept:
|
| 53 |
+
https://pytorch.org/ignite/concepts.html#state, explanation and usage example are in the tutorial:
|
| 54 |
+
https://github.com/Project-MONAI/tutorials/blob/master/modules/batch_output_transform.ipynb.
|
| 55 |
+
save_details: whether to save metric computation details per image, for example: panoptic quality of
|
| 56 |
+
every image.
|
| 57 |
+
default to True, will save to `engine.state.metric_details` dict with the metric name as key.
|
| 58 |
+
|
| 59 |
+
See also:
|
| 60 |
+
:py:meth:`monai.metrics.panoptic_quality.compute_panoptic_quality`
|
| 61 |
+
"""
|
| 62 |
+
metric_fn = PanopticQualityMetric(
|
| 63 |
+
num_classes=num_classes,
|
| 64 |
+
metric_name=metric_name,
|
| 65 |
+
reduction=reduction,
|
| 66 |
+
match_iou_threshold=match_iou_threshold,
|
| 67 |
+
smooth_numerator=smooth_numerator,
|
| 68 |
+
)
|
| 69 |
+
super().__init__(metric_fn=metric_fn, output_transform=output_transform, save_details=save_details)
|
source_code/SegMamba/monai/handlers/postprocessing.py
ADDED
|
@@ -0,0 +1,73 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) MONAI Consortium
|
| 2 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 3 |
+
# you may not use this file except in compliance with the License.
|
| 4 |
+
# You may obtain a copy of the License at
|
| 5 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 6 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 7 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 8 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 9 |
+
# See the License for the specific language governing permissions and
|
| 10 |
+
# limitations under the License.
|
| 11 |
+
|
| 12 |
+
from __future__ import annotations
|
| 13 |
+
|
| 14 |
+
from collections.abc import Callable
|
| 15 |
+
from typing import TYPE_CHECKING
|
| 16 |
+
|
| 17 |
+
from monai.config import IgniteInfo
|
| 18 |
+
from monai.engines.utils import IterationEvents, engine_apply_transform
|
| 19 |
+
from monai.utils import min_version, optional_import
|
| 20 |
+
|
| 21 |
+
Events, _ = optional_import("ignite.engine", IgniteInfo.OPT_IMPORT_VERSION, min_version, "Events")
|
| 22 |
+
if TYPE_CHECKING:
|
| 23 |
+
from ignite.engine import Engine
|
| 24 |
+
else:
|
| 25 |
+
Engine, _ = optional_import("ignite.engine", IgniteInfo.OPT_IMPORT_VERSION, min_version, "Engine")
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
class PostProcessing:
|
| 29 |
+
"""
|
| 30 |
+
Ignite handler to execute additional post processing after the post processing in engines.
|
| 31 |
+
So users can insert other handlers between engine postprocessing and this post processing handler.
|
| 32 |
+
If using components from `monai.transforms` as the `transform`, recommend to decollate `engine.state.batch`
|
| 33 |
+
and `engine.state.batch` in the engine(set `decollate=True`) or in the `DecollateBatch` handler first.
|
| 34 |
+
|
| 35 |
+
"""
|
| 36 |
+
|
| 37 |
+
def __init__(self, transform: Callable, event: str = "MODEL_COMPLETED") -> None:
|
| 38 |
+
"""
|
| 39 |
+
Args:
|
| 40 |
+
transform: callable function to execute on the `engine.state.batch` and `engine.state.output`.
|
| 41 |
+
can also be composed transforms.
|
| 42 |
+
event: expected EVENT to attach the handler, should be "MODEL_COMPLETED" or "ITERATION_COMPLETED".
|
| 43 |
+
default to "MODEL_COMPLETED".
|
| 44 |
+
|
| 45 |
+
"""
|
| 46 |
+
self.transform = transform
|
| 47 |
+
event = event.upper()
|
| 48 |
+
if event not in ("MODEL_COMPLETED", "ITERATION_COMPLETED"):
|
| 49 |
+
raise ValueError("event should be `MODEL_COMPLETED` or `ITERATION_COMPLETED`.")
|
| 50 |
+
self.event = event
|
| 51 |
+
|
| 52 |
+
def attach(self, engine: Engine) -> None:
|
| 53 |
+
"""
|
| 54 |
+
Args:
|
| 55 |
+
engine: Ignite Engine, it can be a trainer, validator or evaluator.
|
| 56 |
+
"""
|
| 57 |
+
if self.event == "MODEL_COMPLETED":
|
| 58 |
+
engine.add_event_handler(IterationEvents.MODEL_COMPLETED, self)
|
| 59 |
+
else:
|
| 60 |
+
engine.add_event_handler(Events.ITERATION_COMPLETED, self)
|
| 61 |
+
|
| 62 |
+
def __call__(self, engine: Engine) -> None:
|
| 63 |
+
"""
|
| 64 |
+
Args:
|
| 65 |
+
engine: Ignite Engine, it can be a trainer, validator or evaluator.
|
| 66 |
+
"""
|
| 67 |
+
if not isinstance(engine.state.batch, list) or not isinstance(engine.state.output, list):
|
| 68 |
+
engine.state.batch, engine.state.output = engine_apply_transform(
|
| 69 |
+
batch=engine.state.batch, output=engine.state.output, transform=self.transform
|
| 70 |
+
)
|
| 71 |
+
else:
|
| 72 |
+
for i, (b, o) in enumerate(zip(engine.state.batch, engine.state.output)):
|
| 73 |
+
engine.state.batch[i], engine.state.output[i] = engine_apply_transform(b, o, self.transform)
|
source_code/SegMamba/monai/handlers/probability_maps.py
ADDED
|
@@ -0,0 +1,134 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) MONAI Consortium
|
| 2 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 3 |
+
# you may not use this file except in compliance with the License.
|
| 4 |
+
# You may obtain a copy of the License at
|
| 5 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 6 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 7 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 8 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 9 |
+
# See the License for the specific language governing permissions and
|
| 10 |
+
# limitations under the License.
|
| 11 |
+
|
| 12 |
+
from __future__ import annotations
|
| 13 |
+
|
| 14 |
+
import logging
|
| 15 |
+
import threading
|
| 16 |
+
from typing import TYPE_CHECKING
|
| 17 |
+
|
| 18 |
+
import numpy as np
|
| 19 |
+
|
| 20 |
+
from monai.config import DtypeLike, IgniteInfo
|
| 21 |
+
from monai.data.folder_layout import FolderLayout
|
| 22 |
+
from monai.utils import ProbMapKeys, min_version, optional_import
|
| 23 |
+
from monai.utils.enums import CommonKeys
|
| 24 |
+
|
| 25 |
+
Events, _ = optional_import("ignite.engine", IgniteInfo.OPT_IMPORT_VERSION, min_version, "Events")
|
| 26 |
+
if TYPE_CHECKING:
|
| 27 |
+
from ignite.engine import Engine
|
| 28 |
+
else:
|
| 29 |
+
Engine, _ = optional_import("ignite.engine", IgniteInfo.OPT_IMPORT_VERSION, min_version, "Engine")
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
class ProbMapProducer:
|
| 33 |
+
"""
|
| 34 |
+
Event handler triggered on completing every iteration to calculate and save the probability map.
|
| 35 |
+
This handler use metadata from MetaTensor to create the probability map. This can be simply achieved by using
|
| 36 |
+
`monai.data.SlidingPatchWSIDataset` or `monai.data.MaskedPatchWSIDataset` as the dataset.
|
| 37 |
+
|
| 38 |
+
"""
|
| 39 |
+
|
| 40 |
+
def __init__(
|
| 41 |
+
self,
|
| 42 |
+
output_dir: str = "./",
|
| 43 |
+
output_postfix: str = "",
|
| 44 |
+
prob_key: str = "pred",
|
| 45 |
+
dtype: DtypeLike = np.float64,
|
| 46 |
+
name: str | None = None,
|
| 47 |
+
) -> None:
|
| 48 |
+
"""
|
| 49 |
+
Args:
|
| 50 |
+
output_dir: output directory to save probability maps.
|
| 51 |
+
output_postfix: a string appended to all output file names.
|
| 52 |
+
prob_key: the key associated to the probability output of the model
|
| 53 |
+
dtype: the data type in which the probability map is stored. Default np.float64.
|
| 54 |
+
name: identifier of logging.logger to use, defaulting to `engine.logger`.
|
| 55 |
+
|
| 56 |
+
"""
|
| 57 |
+
self.folder_layout = FolderLayout(
|
| 58 |
+
output_dir=output_dir,
|
| 59 |
+
postfix=output_postfix,
|
| 60 |
+
extension=".npy",
|
| 61 |
+
parent=False,
|
| 62 |
+
makedirs=True,
|
| 63 |
+
data_root_dir="",
|
| 64 |
+
)
|
| 65 |
+
|
| 66 |
+
self.logger = logging.getLogger(name)
|
| 67 |
+
self._name = name
|
| 68 |
+
self.prob_key = prob_key
|
| 69 |
+
self.dtype = dtype
|
| 70 |
+
self.prob_map: dict[str, np.ndarray] = {}
|
| 71 |
+
self.counter: dict[str, int] = {}
|
| 72 |
+
self.num_done_images: int = 0
|
| 73 |
+
self.num_images: int = 0
|
| 74 |
+
self.lock = threading.Lock()
|
| 75 |
+
|
| 76 |
+
def attach(self, engine: Engine) -> None:
|
| 77 |
+
"""
|
| 78 |
+
Args:
|
| 79 |
+
engine: Ignite Engine, it can be a trainer, validator or evaluator.
|
| 80 |
+
"""
|
| 81 |
+
|
| 82 |
+
image_data = engine.data_loader.dataset.image_data # type: ignore
|
| 83 |
+
self.num_images = len(image_data)
|
| 84 |
+
|
| 85 |
+
# Initialized probability maps for all the images
|
| 86 |
+
for sample in image_data:
|
| 87 |
+
name = sample[ProbMapKeys.NAME]
|
| 88 |
+
self.counter[name] = sample[ProbMapKeys.COUNT]
|
| 89 |
+
self.prob_map[name] = np.zeros(sample[ProbMapKeys.SIZE], dtype=self.dtype)
|
| 90 |
+
|
| 91 |
+
if self._name is None:
|
| 92 |
+
self.logger = engine.logger
|
| 93 |
+
if not engine.has_event_handler(self, Events.ITERATION_COMPLETED):
|
| 94 |
+
engine.add_event_handler(Events.ITERATION_COMPLETED, self)
|
| 95 |
+
if not engine.has_event_handler(self.finalize, Events.COMPLETED):
|
| 96 |
+
engine.add_event_handler(Events.COMPLETED, self.finalize)
|
| 97 |
+
|
| 98 |
+
def __call__(self, engine: Engine) -> None:
|
| 99 |
+
"""
|
| 100 |
+
This method assumes self.batch_transform will extract metadata from the input batch.
|
| 101 |
+
|
| 102 |
+
Args:
|
| 103 |
+
engine: Ignite Engine, it can be a trainer, validator or evaluator.
|
| 104 |
+
"""
|
| 105 |
+
if not isinstance(engine.state.batch, dict) or not isinstance(engine.state.output, dict):
|
| 106 |
+
raise ValueError("engine.state.batch and engine.state.output must be dictionaries.")
|
| 107 |
+
names = engine.state.batch[CommonKeys.IMAGE].meta[ProbMapKeys.NAME]
|
| 108 |
+
locs = engine.state.batch[CommonKeys.IMAGE].meta[ProbMapKeys.LOCATION]
|
| 109 |
+
probs = engine.state.output[self.prob_key]
|
| 110 |
+
for name, loc, prob in zip(names, locs, probs):
|
| 111 |
+
self.prob_map[name][tuple(loc)] = prob
|
| 112 |
+
with self.lock:
|
| 113 |
+
self.counter[name] -= 1
|
| 114 |
+
if self.counter[name] == 0:
|
| 115 |
+
self.save_prob_map(name)
|
| 116 |
+
|
| 117 |
+
def save_prob_map(self, name: str) -> None:
|
| 118 |
+
"""
|
| 119 |
+
This method save the probability map for an image, when its inference is finished,
|
| 120 |
+
and delete that probability map from memory.
|
| 121 |
+
|
| 122 |
+
Args:
|
| 123 |
+
name: the name of image to be saved.
|
| 124 |
+
"""
|
| 125 |
+
file_path = self.folder_layout.filename(name)
|
| 126 |
+
np.save(file_path, self.prob_map[name])
|
| 127 |
+
|
| 128 |
+
self.num_done_images += 1
|
| 129 |
+
self.logger.info(f"Inference of '{name}' is done [{self.num_done_images}/{self.num_images}]!")
|
| 130 |
+
del self.prob_map[name]
|
| 131 |
+
del self.counter[name]
|
| 132 |
+
|
| 133 |
+
def finalize(self, engine: Engine) -> None:
|
| 134 |
+
self.logger.info(f"Probability map is created for {self.num_done_images}/{self.num_images} images!")
|
source_code/SegMamba/monai/handlers/regression_metrics.py
ADDED
|
@@ -0,0 +1,154 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) MONAI Consortium
|
| 2 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 3 |
+
# you may not use this file except in compliance with the License.
|
| 4 |
+
# You may obtain a copy of the License at
|
| 5 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 6 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 7 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 8 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 9 |
+
# See the License for the specific language governing permissions and
|
| 10 |
+
# limitations under the License.
|
| 11 |
+
|
| 12 |
+
from __future__ import annotations
|
| 13 |
+
|
| 14 |
+
from collections.abc import Callable
|
| 15 |
+
|
| 16 |
+
from monai.handlers.ignite_metric import IgniteMetricHandler
|
| 17 |
+
from monai.metrics import MAEMetric, MSEMetric, PSNRMetric, RMSEMetric
|
| 18 |
+
from monai.utils import MetricReduction
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
class MeanSquaredError(IgniteMetricHandler):
|
| 22 |
+
"""
|
| 23 |
+
Computes Mean Squared Error from full size Tensor and collects average over batch, iterations.
|
| 24 |
+
"""
|
| 25 |
+
|
| 26 |
+
def __init__(
|
| 27 |
+
self,
|
| 28 |
+
reduction: MetricReduction | str = MetricReduction.MEAN,
|
| 29 |
+
output_transform: Callable = lambda x: x,
|
| 30 |
+
save_details: bool = True,
|
| 31 |
+
) -> None:
|
| 32 |
+
"""
|
| 33 |
+
|
| 34 |
+
Args:
|
| 35 |
+
reduction: define the mode to reduce metrics, will only execute reduction on `not-nan` values,
|
| 36 |
+
available reduction modes: {``"none"``, ``"mean"``, ``"sum"``, ``"mean_batch"``, ``"sum_batch"``,
|
| 37 |
+
``"mean_channel"``, ``"sum_channel"``}, default to ``"mean"``. if "none", will not do reduction.
|
| 38 |
+
output_transform: callable to extract `y_pred` and `y` from `ignite.engine.state.output` then
|
| 39 |
+
construct `(y_pred, y)` pair, where `y_pred` and `y` can be `batch-first` Tensors or
|
| 40 |
+
lists of `channel-first` Tensors. the form of `(y_pred, y)` is required by the `update()`.
|
| 41 |
+
`engine.state` and `output_transform` inherit from the ignite concept:
|
| 42 |
+
https://pytorch.org/ignite/concepts.html#state, explanation and usage example are in the tutorial:
|
| 43 |
+
https://github.com/Project-MONAI/tutorials/blob/master/modules/batch_output_transform.ipynb.
|
| 44 |
+
save_details: whether to save metric computation details per image, for example: mean squared error of every image.
|
| 45 |
+
default to True, will save to `engine.state.metric_details` dict with the metric name as key.
|
| 46 |
+
|
| 47 |
+
See also:
|
| 48 |
+
:py:class:`monai.metrics.MSEMetric`
|
| 49 |
+
"""
|
| 50 |
+
metric_fn = MSEMetric(reduction=reduction)
|
| 51 |
+
super().__init__(metric_fn=metric_fn, output_transform=output_transform, save_details=save_details)
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
class MeanAbsoluteError(IgniteMetricHandler):
|
| 55 |
+
"""
|
| 56 |
+
Computes Mean Absolute Error from full size Tensor and collects average over batch, iterations.
|
| 57 |
+
"""
|
| 58 |
+
|
| 59 |
+
def __init__(
|
| 60 |
+
self,
|
| 61 |
+
reduction: MetricReduction | str = MetricReduction.MEAN,
|
| 62 |
+
output_transform: Callable = lambda x: x,
|
| 63 |
+
save_details: bool = True,
|
| 64 |
+
) -> None:
|
| 65 |
+
"""
|
| 66 |
+
|
| 67 |
+
Args:
|
| 68 |
+
reduction: define the mode to reduce metrics, will only execute reduction on `not-nan` values,
|
| 69 |
+
available reduction modes: {``"none"``, ``"mean"``, ``"sum"``, ``"mean_batch"``, ``"sum_batch"``,
|
| 70 |
+
``"mean_channel"``, ``"sum_channel"``}, default to ``"mean"``. if "none", will not do reduction.
|
| 71 |
+
output_transform: callable to extract `y_pred` and `y` from `ignite.engine.state.output` then
|
| 72 |
+
construct `(y_pred, y)` pair, where `y_pred` and `y` can be `batch-first` Tensors or
|
| 73 |
+
lists of `channel-first` Tensors. the form of `(y_pred, y)` is required by the `update()`.
|
| 74 |
+
`engine.state` and `output_transform` inherit from the ignite concept:
|
| 75 |
+
https://pytorch.org/ignite/concepts.html#state, explanation and usage example are in the tutorial:
|
| 76 |
+
https://github.com/Project-MONAI/tutorials/blob/master/modules/batch_output_transform.ipynb.
|
| 77 |
+
save_details: whether to save metric computation details per image, for example: mean squared error of every image.
|
| 78 |
+
default to True, will save to `engine.state.metric_details` dict with the metric name as key.
|
| 79 |
+
|
| 80 |
+
See also:
|
| 81 |
+
:py:class:`monai.metrics.MAEMetric`
|
| 82 |
+
"""
|
| 83 |
+
metric_fn = MAEMetric(reduction=reduction)
|
| 84 |
+
super().__init__(metric_fn=metric_fn, output_transform=output_transform, save_details=save_details)
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
class RootMeanSquaredError(IgniteMetricHandler):
|
| 88 |
+
"""
|
| 89 |
+
Computes Root Mean Squared Error from full size Tensor and collects average over batch, iterations.
|
| 90 |
+
"""
|
| 91 |
+
|
| 92 |
+
def __init__(
|
| 93 |
+
self,
|
| 94 |
+
reduction: MetricReduction | str = MetricReduction.MEAN,
|
| 95 |
+
output_transform: Callable = lambda x: x,
|
| 96 |
+
save_details: bool = True,
|
| 97 |
+
) -> None:
|
| 98 |
+
"""
|
| 99 |
+
|
| 100 |
+
Args:
|
| 101 |
+
reduction: define the mode to reduce metrics, will only execute reduction on `not-nan` values,
|
| 102 |
+
available reduction modes: {``"none"``, ``"mean"``, ``"sum"``, ``"mean_batch"``, ``"sum_batch"``,
|
| 103 |
+
``"mean_channel"``, ``"sum_channel"``}, default to ``"mean"``. if "none", will not do reduction.
|
| 104 |
+
output_transform: callable to extract `y_pred` and `y` from `ignite.engine.state.output` then
|
| 105 |
+
construct `(y_pred, y)` pair, where `y_pred` and `y` can be `batch-first` Tensors or
|
| 106 |
+
lists of `channel-first` Tensors. the form of `(y_pred, y)` is required by the `update()`.
|
| 107 |
+
`engine.state` and `output_transform` inherit from the ignite concept:
|
| 108 |
+
https://pytorch.org/ignite/concepts.html#state, explanation and usage example are in the tutorial:
|
| 109 |
+
https://github.com/Project-MONAI/tutorials/blob/master/modules/batch_output_transform.ipynb.
|
| 110 |
+
save_details: whether to save metric computation details per image, for example: mean squared error of every image.
|
| 111 |
+
default to True, will save to `engine.state.metric_details` dict with the metric name as key.
|
| 112 |
+
|
| 113 |
+
See also:
|
| 114 |
+
:py:class:`monai.metrics.RMSEMetric`
|
| 115 |
+
"""
|
| 116 |
+
metric_fn = RMSEMetric(reduction=reduction)
|
| 117 |
+
super().__init__(metric_fn=metric_fn, output_transform=output_transform, save_details=save_details)
|
| 118 |
+
|
| 119 |
+
|
| 120 |
+
class PeakSignalToNoiseRatio(IgniteMetricHandler):
|
| 121 |
+
"""
|
| 122 |
+
Computes Peak Signal to Noise Ratio from full size Tensor and collects average over batch, iterations.
|
| 123 |
+
"""
|
| 124 |
+
|
| 125 |
+
def __init__(
|
| 126 |
+
self,
|
| 127 |
+
max_val: int | float,
|
| 128 |
+
reduction: MetricReduction | str = MetricReduction.MEAN,
|
| 129 |
+
output_transform: Callable = lambda x: x,
|
| 130 |
+
save_details: bool = True,
|
| 131 |
+
) -> None:
|
| 132 |
+
"""
|
| 133 |
+
|
| 134 |
+
Args:
|
| 135 |
+
max_val: The dynamic range of the images/volumes (i.e., the difference between the
|
| 136 |
+
maximum and the minimum allowed values e.g. 255 for a uint8 image).
|
| 137 |
+
reduction: define the mode to reduce metrics, will only execute reduction on `not-nan` values,
|
| 138 |
+
available reduction modes: {``"none"``, ``"mean"``, ``"sum"``, ``"mean_batch"``, ``"sum_batch"``,
|
| 139 |
+
``"mean_channel"``, ``"sum_channel"``}, default to ``"mean"``. if "none", will not do reduction.
|
| 140 |
+
output_transform: callable to extract `y_pred` and `y` from `ignite.engine.state.output` then
|
| 141 |
+
construct `(y_pred, y)` pair, where `y_pred` and `y` can be `batch-first` Tensors or
|
| 142 |
+
lists of `channel-first` Tensors. the form of `(y_pred, y)` is required by the `update()`.
|
| 143 |
+
`engine.state` and `output_transform` inherit from the ignite concept:
|
| 144 |
+
https://pytorch.org/ignite/concepts.html#state, explanation and usage example are in the tutorial:
|
| 145 |
+
https://github.com/Project-MONAI/tutorials/blob/master/modules/batch_output_transform.ipynb.
|
| 146 |
+
save_details: whether to save metric computation details per image, for example: mean squared error of every image.
|
| 147 |
+
default to True, will save to `engine.state.metric_details` dict with the metric name as key.
|
| 148 |
+
reduction: {``"none"``, ``"mean"``, ``"sum"``, ``"mean_batch"``, ``"sum_batch"``,
|
| 149 |
+
|
| 150 |
+
See also:
|
| 151 |
+
:py:class:`monai.metrics.PSNRMetric`
|
| 152 |
+
"""
|
| 153 |
+
metric_fn = PSNRMetric(max_val=max_val, reduction=reduction)
|
| 154 |
+
super().__init__(metric_fn=metric_fn, output_transform=output_transform, save_details=save_details)
|
source_code/SegMamba/monai/handlers/roc_auc.py
ADDED
|
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) MONAI Consortium
|
| 2 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 3 |
+
# you may not use this file except in compliance with the License.
|
| 4 |
+
# You may obtain a copy of the License at
|
| 5 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 6 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 7 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 8 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 9 |
+
# See the License for the specific language governing permissions and
|
| 10 |
+
# limitations under the License.
|
| 11 |
+
|
| 12 |
+
from __future__ import annotations
|
| 13 |
+
|
| 14 |
+
from collections.abc import Callable
|
| 15 |
+
|
| 16 |
+
from monai.handlers.ignite_metric import IgniteMetricHandler
|
| 17 |
+
from monai.metrics import ROCAUCMetric
|
| 18 |
+
from monai.utils import Average
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
class ROCAUC(IgniteMetricHandler):
|
| 22 |
+
"""
|
| 23 |
+
Computes Area Under the Receiver Operating Characteristic Curve (ROC AUC).
|
| 24 |
+
accumulating predictions and the ground-truth during an epoch and applying `compute_roc_auc`.
|
| 25 |
+
|
| 26 |
+
Args:
|
| 27 |
+
average: {``"macro"``, ``"weighted"``, ``"micro"``, ``"none"``}
|
| 28 |
+
Type of averaging performed if not binary classification. Defaults to ``"macro"``.
|
| 29 |
+
|
| 30 |
+
- ``"macro"``: calculate metrics for each label, and find their unweighted mean.
|
| 31 |
+
This does not take label imbalance into account.
|
| 32 |
+
- ``"weighted"``: calculate metrics for each label, and find their average,
|
| 33 |
+
weighted by support (the number of true instances for each label).
|
| 34 |
+
- ``"micro"``: calculate metrics globally by considering each element of the label
|
| 35 |
+
indicator matrix as a label.
|
| 36 |
+
- ``"none"``: the scores for each class are returned.
|
| 37 |
+
|
| 38 |
+
output_transform: callable to extract `y_pred` and `y` from `ignite.engine.state.output` then
|
| 39 |
+
construct `(y_pred, y)` pair, where `y_pred` and `y` can be `batch-first` Tensors or
|
| 40 |
+
lists of `channel-first` Tensors. the form of `(y_pred, y)` is required by the `update()`.
|
| 41 |
+
`engine.state` and `output_transform` inherit from the ignite concept:
|
| 42 |
+
https://pytorch.org/ignite/concepts.html#state, explanation and usage example are in the tutorial:
|
| 43 |
+
https://github.com/Project-MONAI/tutorials/blob/master/modules/batch_output_transform.ipynb.
|
| 44 |
+
|
| 45 |
+
Note:
|
| 46 |
+
ROCAUC expects y to be comprised of 0's and 1's.
|
| 47 |
+
y_pred must either be probability estimates or confidence values.
|
| 48 |
+
|
| 49 |
+
"""
|
| 50 |
+
|
| 51 |
+
def __init__(self, average: Average | str = Average.MACRO, output_transform: Callable = lambda x: x) -> None:
|
| 52 |
+
metric_fn = ROCAUCMetric(average=Average(average))
|
| 53 |
+
super().__init__(metric_fn=metric_fn, output_transform=output_transform, save_details=False)
|
source_code/SegMamba/monai/handlers/smartcache_handler.py
ADDED
|
@@ -0,0 +1,81 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) MONAI Consortium
|
| 2 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 3 |
+
# you may not use this file except in compliance with the License.
|
| 4 |
+
# You may obtain a copy of the License at
|
| 5 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 6 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 7 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 8 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 9 |
+
# See the License for the specific language governing permissions and
|
| 10 |
+
# limitations under the License.
|
| 11 |
+
|
| 12 |
+
from __future__ import annotations
|
| 13 |
+
|
| 14 |
+
from typing import TYPE_CHECKING
|
| 15 |
+
|
| 16 |
+
from monai.config import IgniteInfo
|
| 17 |
+
from monai.data import SmartCacheDataset
|
| 18 |
+
from monai.utils import min_version, optional_import
|
| 19 |
+
|
| 20 |
+
Events, _ = optional_import("ignite.engine", IgniteInfo.OPT_IMPORT_VERSION, min_version, "Events")
|
| 21 |
+
if TYPE_CHECKING:
|
| 22 |
+
from ignite.engine import Engine
|
| 23 |
+
else:
|
| 24 |
+
Engine, _ = optional_import("ignite.engine", IgniteInfo.OPT_IMPORT_VERSION, min_version, "Engine")
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
class SmartCacheHandler:
|
| 28 |
+
"""
|
| 29 |
+
Attach SmartCache logic to the engine in Ignite.
|
| 30 |
+
Mainly include the `start`, `update_cache`, and `shutdown` functions of SmartCacheDataset.
|
| 31 |
+
|
| 32 |
+
"""
|
| 33 |
+
|
| 34 |
+
def __init__(self, smartcacher: SmartCacheDataset) -> None:
|
| 35 |
+
"""
|
| 36 |
+
Args:
|
| 37 |
+
smartcacher: predefined SmartCacheDataset, will attach it to the engine.
|
| 38 |
+
|
| 39 |
+
Raises:
|
| 40 |
+
TypeError: When ``smartcacher`` is not a ``monai.data.SmartCacheDataset``.
|
| 41 |
+
|
| 42 |
+
"""
|
| 43 |
+
if not isinstance(smartcacher, SmartCacheDataset):
|
| 44 |
+
raise TypeError("smartcacher must be a monai.data.SmartCacheDataset.")
|
| 45 |
+
self.smartcacher = smartcacher
|
| 46 |
+
|
| 47 |
+
def attach(self, engine: Engine) -> None:
|
| 48 |
+
"""
|
| 49 |
+
Args:
|
| 50 |
+
engine: Ignite Engine, it can be a trainer, validator or evaluator.
|
| 51 |
+
"""
|
| 52 |
+
engine.add_event_handler(Events.STARTED, self.started)
|
| 53 |
+
engine.add_event_handler(Events.EPOCH_COMPLETED, self.epoch_completed)
|
| 54 |
+
engine.add_event_handler(Events.COMPLETED, self.completed)
|
| 55 |
+
|
| 56 |
+
def started(self, engine: Engine) -> None:
|
| 57 |
+
"""Callback for train or validation/evaluation started Event.
|
| 58 |
+
Start the replacement thread of SmartCacheDataset.
|
| 59 |
+
|
| 60 |
+
Args:
|
| 61 |
+
engine: Ignite Engine, it can be a trainer, validator or evaluator.
|
| 62 |
+
"""
|
| 63 |
+
self.smartcacher.start()
|
| 64 |
+
|
| 65 |
+
def epoch_completed(self, engine: Engine) -> None:
|
| 66 |
+
"""Callback for train or validation/evaluation epoch completed Event.
|
| 67 |
+
Update cache content with replacement data.
|
| 68 |
+
|
| 69 |
+
Args:
|
| 70 |
+
engine: Ignite Engine, it can be a trainer, validator or evaluator.
|
| 71 |
+
"""
|
| 72 |
+
self.smartcacher.update_cache()
|
| 73 |
+
|
| 74 |
+
def completed(self, engine: Engine) -> None:
|
| 75 |
+
"""Callback for train or validation/evaluation completed Event.
|
| 76 |
+
Stop the replacement thread of SmartCacheDataset.
|
| 77 |
+
|
| 78 |
+
Args:
|
| 79 |
+
engine: Ignite Engine, it can be a trainer, validator or evaluator.
|
| 80 |
+
"""
|
| 81 |
+
self.smartcacher.shutdown()
|
source_code/SegMamba/monai/handlers/stats_handler.py
ADDED
|
@@ -0,0 +1,293 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) MONAI Consortium
|
| 2 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 3 |
+
# you may not use this file except in compliance with the License.
|
| 4 |
+
# You may obtain a copy of the License at
|
| 5 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 6 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 7 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 8 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 9 |
+
# See the License for the specific language governing permissions and
|
| 10 |
+
# limitations under the License.
|
| 11 |
+
|
| 12 |
+
from __future__ import annotations
|
| 13 |
+
|
| 14 |
+
import logging
|
| 15 |
+
import warnings
|
| 16 |
+
from collections.abc import Callable, Sequence
|
| 17 |
+
from typing import TYPE_CHECKING, Any
|
| 18 |
+
|
| 19 |
+
import torch
|
| 20 |
+
|
| 21 |
+
from monai.apps import get_logger
|
| 22 |
+
from monai.config import IgniteInfo
|
| 23 |
+
from monai.utils import is_scalar, min_version, optional_import
|
| 24 |
+
|
| 25 |
+
Events, _ = optional_import("ignite.engine", IgniteInfo.OPT_IMPORT_VERSION, min_version, "Events")
|
| 26 |
+
if TYPE_CHECKING:
|
| 27 |
+
from ignite.engine import Engine
|
| 28 |
+
else:
|
| 29 |
+
Engine, _ = optional_import(
|
| 30 |
+
"ignite.engine", IgniteInfo.OPT_IMPORT_VERSION, min_version, "Engine", as_type="decorator"
|
| 31 |
+
)
|
| 32 |
+
|
| 33 |
+
DEFAULT_KEY_VAL_FORMAT = "{}: {:.4f} "
|
| 34 |
+
DEFAULT_TAG = "Loss"
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
class StatsHandler:
|
| 38 |
+
"""
|
| 39 |
+
StatsHandler defines a set of Ignite Event-handlers for all the log printing logics.
|
| 40 |
+
It can be used for any Ignite Engine(trainer, validator and evaluator).
|
| 41 |
+
And it can support logging for epoch level and iteration level with pre-defined loggers.
|
| 42 |
+
|
| 43 |
+
Note that if ``name`` is None, this class will leverage `engine.logger` as the logger, otherwise,
|
| 44 |
+
``logging.getLogger(name)`` is used. In both cases, it's important to make sure that the logging level is at least
|
| 45 |
+
``INFO``. To change the level of logging, please call ``import ignite; ignite.utils.setup_logger(name)``
|
| 46 |
+
(when ``name`` is not None) or ``engine.logger = ignite.utils.setup_logger(engine.logger.name, reset=True)``
|
| 47 |
+
(when ``name`` is None) before running the engine with this handler attached.
|
| 48 |
+
|
| 49 |
+
Default behaviors:
|
| 50 |
+
- When EPOCH_COMPLETED, logs ``engine.state.metrics`` using ``self.logger``.
|
| 51 |
+
- When ITERATION_COMPLETED, logs
|
| 52 |
+
``self.output_transform(engine.state.output)`` using ``self.logger``.
|
| 53 |
+
|
| 54 |
+
Usage example::
|
| 55 |
+
|
| 56 |
+
import ignite
|
| 57 |
+
import monai
|
| 58 |
+
|
| 59 |
+
trainer = ignite.engine.Engine(lambda x, y: [0.0]) # an example trainer
|
| 60 |
+
monai.handlers.StatsHandler(name="train_stats").attach(trainer)
|
| 61 |
+
|
| 62 |
+
trainer.run(range(3), max_epochs=4)
|
| 63 |
+
|
| 64 |
+
More details of example is available in the tutorial:
|
| 65 |
+
https://github.com/Project-MONAI/tutorials/blob/master/modules/engines/unet_training_dict.py.
|
| 66 |
+
|
| 67 |
+
"""
|
| 68 |
+
|
| 69 |
+
def __init__(
|
| 70 |
+
self,
|
| 71 |
+
iteration_log: bool | Callable[[Engine, int], bool] = True,
|
| 72 |
+
epoch_log: bool | Callable[[Engine, int], bool] = True,
|
| 73 |
+
epoch_print_logger: Callable[[Engine], Any] | None = None,
|
| 74 |
+
iteration_print_logger: Callable[[Engine], Any] | None = None,
|
| 75 |
+
output_transform: Callable = lambda x: x[0],
|
| 76 |
+
global_epoch_transform: Callable = lambda x: x,
|
| 77 |
+
state_attributes: Sequence[str] | None = None,
|
| 78 |
+
name: str | None = "StatsHandler",
|
| 79 |
+
tag_name: str = DEFAULT_TAG,
|
| 80 |
+
key_var_format: str = DEFAULT_KEY_VAL_FORMAT,
|
| 81 |
+
) -> None:
|
| 82 |
+
"""
|
| 83 |
+
|
| 84 |
+
Args:
|
| 85 |
+
iteration_log: whether to log data when iteration completed, default to `True`. ``iteration_log`` can
|
| 86 |
+
be also a function and it will be interpreted as an event filter
|
| 87 |
+
(see https://pytorch.org/ignite/generated/ignite.engine.events.Events.html for details).
|
| 88 |
+
Event filter function accepts as input engine and event value (iteration) and should return True/False.
|
| 89 |
+
Event filtering can be helpful to customize iteration logging frequency.
|
| 90 |
+
epoch_log: whether to log data when epoch completed, default to `True`. ``epoch_log`` can be
|
| 91 |
+
also a function and it will be interpreted as an event filter. See ``iteration_log`` argument for more
|
| 92 |
+
details.
|
| 93 |
+
epoch_print_logger: customized callable printer for epoch level logging.
|
| 94 |
+
Must accept parameter "engine", use default printer if None.
|
| 95 |
+
iteration_print_logger: customized callable printer for iteration level logging.
|
| 96 |
+
Must accept parameter "engine", use default printer if None.
|
| 97 |
+
output_transform: a callable that is used to transform the
|
| 98 |
+
``ignite.engine.state.output`` into a scalar to print, or a dictionary of {key: scalar}.
|
| 99 |
+
In the latter case, the output string will be formatted as key: value.
|
| 100 |
+
By default this value logging happens when every iteration completed.
|
| 101 |
+
The default behavior is to print loss from output[0] as output is a decollated list
|
| 102 |
+
and we replicated loss value for every item of the decollated list.
|
| 103 |
+
`engine.state` and `output_transform` inherit from the ignite concept:
|
| 104 |
+
https://pytorch.org/ignite/concepts.html#state, explanation and usage example are in the tutorial:
|
| 105 |
+
https://github.com/Project-MONAI/tutorials/blob/master/modules/batch_output_transform.ipynb.
|
| 106 |
+
global_epoch_transform: a callable that is used to customize global epoch number.
|
| 107 |
+
For example, in evaluation, the evaluator engine might want to print synced epoch number
|
| 108 |
+
with the trainer engine.
|
| 109 |
+
state_attributes: expected attributes from `engine.state`, if provided, will extract them
|
| 110 |
+
when epoch completed.
|
| 111 |
+
name: identifier of `logging.logger` to use, if None, defaulting to ``engine.logger``.
|
| 112 |
+
tag_name: when iteration output is a scalar, tag_name is used to print
|
| 113 |
+
tag_name: scalar_value to logger. Defaults to ``'Loss'``.
|
| 114 |
+
key_var_format: a formatting string to control the output string format of key: value.
|
| 115 |
+
|
| 116 |
+
"""
|
| 117 |
+
|
| 118 |
+
self.iteration_log = iteration_log
|
| 119 |
+
self.epoch_log = epoch_log
|
| 120 |
+
self.epoch_print_logger = epoch_print_logger
|
| 121 |
+
self.iteration_print_logger = iteration_print_logger
|
| 122 |
+
self.output_transform = output_transform
|
| 123 |
+
self.global_epoch_transform = global_epoch_transform
|
| 124 |
+
self.state_attributes = state_attributes
|
| 125 |
+
self.tag_name = tag_name
|
| 126 |
+
self.key_var_format = key_var_format
|
| 127 |
+
self.logger = get_logger(name) # type: ignore
|
| 128 |
+
self.name = name
|
| 129 |
+
|
| 130 |
+
def attach(self, engine: Engine) -> None:
|
| 131 |
+
"""
|
| 132 |
+
Register a set of Ignite Event-Handlers to a specified Ignite engine.
|
| 133 |
+
|
| 134 |
+
Args:
|
| 135 |
+
engine: Ignite Engine, it can be a trainer, validator or evaluator.
|
| 136 |
+
|
| 137 |
+
"""
|
| 138 |
+
if self.name is None:
|
| 139 |
+
self.logger = engine.logger
|
| 140 |
+
if self.logger.getEffectiveLevel() > logging.INFO:
|
| 141 |
+
suggested = f"\n\nimport ignite\nignite.utils.setup_logger('{self.logger.name}', reset=True)"
|
| 142 |
+
if self.logger.name != engine.logger.name:
|
| 143 |
+
suggested += f"\nignite.utils.setup_logger('{engine.logger.name}', reset=True)"
|
| 144 |
+
suggested += "\n\n"
|
| 145 |
+
warnings.warn(
|
| 146 |
+
f"the effective log level of {self.logger.name} is higher than INFO, StatsHandler may not output logs,"
|
| 147 |
+
f"\nplease use the following code before running the engine to enable it: {suggested}"
|
| 148 |
+
)
|
| 149 |
+
if self.iteration_log and not engine.has_event_handler(self.iteration_completed, Events.ITERATION_COMPLETED):
|
| 150 |
+
event = Events.ITERATION_COMPLETED
|
| 151 |
+
if callable(self.iteration_log): # substitute event with new one using filter callable
|
| 152 |
+
event = event(event_filter=self.iteration_log)
|
| 153 |
+
engine.add_event_handler(event, self.iteration_completed)
|
| 154 |
+
if self.epoch_log and not engine.has_event_handler(self.epoch_completed, Events.EPOCH_COMPLETED):
|
| 155 |
+
event = Events.EPOCH_COMPLETED
|
| 156 |
+
if callable(self.epoch_log): # substitute event with new one using filter callable
|
| 157 |
+
event = event(event_filter=self.epoch_log)
|
| 158 |
+
engine.add_event_handler(event, self.epoch_completed)
|
| 159 |
+
if not engine.has_event_handler(self.exception_raised, Events.EXCEPTION_RAISED):
|
| 160 |
+
engine.add_event_handler(Events.EXCEPTION_RAISED, self.exception_raised)
|
| 161 |
+
|
| 162 |
+
def epoch_completed(self, engine: Engine) -> None:
|
| 163 |
+
"""
|
| 164 |
+
Handler for train or validation/evaluation epoch completed Event.
|
| 165 |
+
Print epoch level log, default values are from Ignite `engine.state.metrics` dict.
|
| 166 |
+
|
| 167 |
+
Args:
|
| 168 |
+
engine: Ignite Engine, it can be a trainer, validator or evaluator.
|
| 169 |
+
|
| 170 |
+
"""
|
| 171 |
+
if self.epoch_print_logger is not None:
|
| 172 |
+
self.epoch_print_logger(engine)
|
| 173 |
+
else:
|
| 174 |
+
self._default_epoch_print(engine)
|
| 175 |
+
|
| 176 |
+
def iteration_completed(self, engine: Engine) -> None:
|
| 177 |
+
"""
|
| 178 |
+
Handler for train or validation/evaluation iteration completed Event.
|
| 179 |
+
Print iteration level log, default values are from Ignite `engine.state.output`.
|
| 180 |
+
|
| 181 |
+
Args:
|
| 182 |
+
engine: Ignite Engine, it can be a trainer, validator or evaluator.
|
| 183 |
+
|
| 184 |
+
"""
|
| 185 |
+
if self.iteration_print_logger is not None:
|
| 186 |
+
self.iteration_print_logger(engine)
|
| 187 |
+
else:
|
| 188 |
+
self._default_iteration_print(engine)
|
| 189 |
+
|
| 190 |
+
def exception_raised(self, _engine: Engine, e: Exception) -> None:
|
| 191 |
+
"""
|
| 192 |
+
Handler for train or validation/evaluation exception raised Event.
|
| 193 |
+
Print the exception information and traceback. This callback may be skipped because the logic
|
| 194 |
+
with Ignite can only trigger the first attached handler for `EXCEPTION_RAISED` event.
|
| 195 |
+
|
| 196 |
+
Args:
|
| 197 |
+
_engine: Ignite Engine, unused argument.
|
| 198 |
+
e: the exception caught in Ignite during engine.run().
|
| 199 |
+
|
| 200 |
+
"""
|
| 201 |
+
self.logger.exception(f"Exception: {e}")
|
| 202 |
+
raise e
|
| 203 |
+
|
| 204 |
+
def _default_epoch_print(self, engine: Engine) -> None:
|
| 205 |
+
"""
|
| 206 |
+
Execute epoch level log operation.
|
| 207 |
+
Default to print the values from Ignite `engine.state.metrics` dict and
|
| 208 |
+
print the values of specified attributes of `engine.state`.
|
| 209 |
+
|
| 210 |
+
Args:
|
| 211 |
+
engine: Ignite Engine, it can be a trainer, validator or evaluator.
|
| 212 |
+
|
| 213 |
+
"""
|
| 214 |
+
current_epoch = self.global_epoch_transform(engine.state.epoch)
|
| 215 |
+
|
| 216 |
+
prints_dict = engine.state.metrics
|
| 217 |
+
if prints_dict is not None and len(prints_dict) > 0:
|
| 218 |
+
out_str = f"Epoch[{current_epoch}] Metrics -- "
|
| 219 |
+
for name in sorted(prints_dict):
|
| 220 |
+
value = prints_dict[name]
|
| 221 |
+
out_str += self.key_var_format.format(name, value) if is_scalar(value) else f"{name}: {str(value)}"
|
| 222 |
+
self.logger.info(out_str)
|
| 223 |
+
|
| 224 |
+
if (
|
| 225 |
+
hasattr(engine.state, "key_metric_name")
|
| 226 |
+
and hasattr(engine.state, "best_metric")
|
| 227 |
+
and hasattr(engine.state, "best_metric_epoch")
|
| 228 |
+
and engine.state.key_metric_name is not None
|
| 229 |
+
):
|
| 230 |
+
out_str = f"Key metric: {engine.state.key_metric_name} "
|
| 231 |
+
out_str += f"best value: {engine.state.best_metric} "
|
| 232 |
+
out_str += f"at epoch: {engine.state.best_metric_epoch}"
|
| 233 |
+
self.logger.info(out_str)
|
| 234 |
+
|
| 235 |
+
if self.state_attributes is not None and len(self.state_attributes) > 0:
|
| 236 |
+
out_str = "State values: "
|
| 237 |
+
for attr in self.state_attributes:
|
| 238 |
+
out_str += f"{attr}: {getattr(engine.state, attr, None)} "
|
| 239 |
+
self.logger.info(out_str)
|
| 240 |
+
|
| 241 |
+
def _default_iteration_print(self, engine: Engine) -> None:
|
| 242 |
+
"""
|
| 243 |
+
Execute iteration log operation based on Ignite `engine.state.output` data.
|
| 244 |
+
Print the values from `self.output_transform(engine.state.output)`.
|
| 245 |
+
Since `engine.state.output` is a decollated list and we replicated the loss value for every item
|
| 246 |
+
of the decollated list, the default behavior is to print the loss from `output[0]`.
|
| 247 |
+
|
| 248 |
+
Args:
|
| 249 |
+
engine: Ignite Engine, it can be a trainer, validator or evaluator.
|
| 250 |
+
|
| 251 |
+
"""
|
| 252 |
+
loss = self.output_transform(engine.state.output)
|
| 253 |
+
if loss is None:
|
| 254 |
+
return # no printing if the output is empty
|
| 255 |
+
|
| 256 |
+
out_str = ""
|
| 257 |
+
if isinstance(loss, dict): # print dictionary items
|
| 258 |
+
for name in sorted(loss):
|
| 259 |
+
value = loss[name]
|
| 260 |
+
if not is_scalar(value):
|
| 261 |
+
warnings.warn(
|
| 262 |
+
"ignoring non-scalar output in StatsHandler,"
|
| 263 |
+
" make sure `output_transform(engine.state.output)` returns"
|
| 264 |
+
" a scalar or dictionary of key and scalar pairs to avoid this warning."
|
| 265 |
+
" {}:{}".format(name, type(value))
|
| 266 |
+
)
|
| 267 |
+
continue # not printing multi dimensional output
|
| 268 |
+
out_str += self.key_var_format.format(name, value.item() if isinstance(value, torch.Tensor) else value)
|
| 269 |
+
elif is_scalar(loss): # not printing multi dimensional output
|
| 270 |
+
out_str += self.key_var_format.format(
|
| 271 |
+
self.tag_name, loss.item() if isinstance(loss, torch.Tensor) else loss
|
| 272 |
+
)
|
| 273 |
+
else:
|
| 274 |
+
warnings.warn(
|
| 275 |
+
"ignoring non-scalar output in StatsHandler,"
|
| 276 |
+
" make sure `output_transform(engine.state.output)` returns"
|
| 277 |
+
" a scalar or a dictionary of key and scalar pairs to avoid this warning."
|
| 278 |
+
" {}".format(type(loss))
|
| 279 |
+
)
|
| 280 |
+
|
| 281 |
+
if not out_str:
|
| 282 |
+
return # no value to print
|
| 283 |
+
|
| 284 |
+
num_iterations = engine.state.epoch_length
|
| 285 |
+
current_iteration = engine.state.iteration
|
| 286 |
+
if num_iterations is not None:
|
| 287 |
+
current_iteration = (current_iteration - 1) % num_iterations + 1
|
| 288 |
+
current_epoch = engine.state.epoch
|
| 289 |
+
num_epochs = engine.state.max_epochs
|
| 290 |
+
|
| 291 |
+
base_str = f"Epoch: {current_epoch}/{num_epochs}, Iter: {current_iteration}/{num_iterations} --"
|
| 292 |
+
|
| 293 |
+
self.logger.info(" ".join([base_str, out_str]))
|
source_code/SegMamba/monai/handlers/utils.py
ADDED
|
@@ -0,0 +1,219 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) MONAI Consortium
|
| 2 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 3 |
+
# you may not use this file except in compliance with the License.
|
| 4 |
+
# You may obtain a copy of the License at
|
| 5 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 6 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 7 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 8 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 9 |
+
# See the License for the specific language governing permissions and
|
| 10 |
+
# limitations under the License.
|
| 11 |
+
|
| 12 |
+
from __future__ import annotations
|
| 13 |
+
|
| 14 |
+
import os
|
| 15 |
+
from collections import OrderedDict
|
| 16 |
+
from collections.abc import Callable, Sequence
|
| 17 |
+
from typing import TYPE_CHECKING, Any
|
| 18 |
+
|
| 19 |
+
import numpy as np
|
| 20 |
+
import torch
|
| 21 |
+
|
| 22 |
+
from monai.config import IgniteInfo, KeysCollection, PathLike
|
| 23 |
+
from monai.utils import ensure_tuple, look_up_option, min_version, optional_import
|
| 24 |
+
|
| 25 |
+
idist, _ = optional_import("ignite", IgniteInfo.OPT_IMPORT_VERSION, min_version, "distributed")
|
| 26 |
+
if TYPE_CHECKING:
|
| 27 |
+
from ignite.engine import Engine
|
| 28 |
+
else:
|
| 29 |
+
Engine, _ = optional_import("ignite.engine", IgniteInfo.OPT_IMPORT_VERSION, min_version, "Engine")
|
| 30 |
+
|
| 31 |
+
__all__ = ["stopping_fn_from_metric", "stopping_fn_from_loss", "write_metrics_reports", "from_engine"]
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
def stopping_fn_from_metric(metric_name: str) -> Callable[[Engine], Any]:
|
| 35 |
+
"""
|
| 36 |
+
Returns a stopping function for ignite.handlers.EarlyStopping using the given metric name.
|
| 37 |
+
"""
|
| 38 |
+
|
| 39 |
+
def stopping_fn(engine: Engine) -> Any:
|
| 40 |
+
return engine.state.metrics[metric_name]
|
| 41 |
+
|
| 42 |
+
return stopping_fn
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
def stopping_fn_from_loss() -> Callable[[Engine], Any]:
|
| 46 |
+
"""
|
| 47 |
+
Returns a stopping function for ignite.handlers.EarlyStopping using the loss value.
|
| 48 |
+
"""
|
| 49 |
+
|
| 50 |
+
def stopping_fn(engine: Engine) -> Any:
|
| 51 |
+
return -engine.state.output # type:ignore
|
| 52 |
+
|
| 53 |
+
return stopping_fn
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
def write_metrics_reports(
|
| 57 |
+
save_dir: PathLike,
|
| 58 |
+
images: Sequence[str] | None,
|
| 59 |
+
metrics: dict[str, torch.Tensor | np.ndarray] | None,
|
| 60 |
+
metric_details: dict[str, torch.Tensor | np.ndarray] | None,
|
| 61 |
+
summary_ops: str | Sequence[str] | None,
|
| 62 |
+
deli: str = ",",
|
| 63 |
+
output_type: str = "csv",
|
| 64 |
+
class_labels: list[str] | None = None,
|
| 65 |
+
) -> None:
|
| 66 |
+
"""
|
| 67 |
+
Utility function to write the metrics into files, contains 3 parts:
|
| 68 |
+
1. if `metrics` dict is not None, write overall metrics into file, every line is a metric name and value pair.
|
| 69 |
+
2. if `metric_details` dict is not None, write raw metric data of every image into file, every line for 1 image.
|
| 70 |
+
3. if `summary_ops` is not None, compute summary based on operations on `metric_details` and write to file.
|
| 71 |
+
|
| 72 |
+
Args:
|
| 73 |
+
save_dir: directory to save all the metrics reports.
|
| 74 |
+
images: name or path of every input image corresponding to the metric_details data.
|
| 75 |
+
if None, will use index number as the filename of every input image.
|
| 76 |
+
metrics: a dictionary of (metric name, metric value) pairs.
|
| 77 |
+
metric_details: a dictionary of (metric name, metric raw values) pairs, usually, it comes from metrics
|
| 78 |
+
computation, for example, the raw value can be the mean_dice of every channel of every input image.
|
| 79 |
+
summary_ops: expected computation operations to generate the summary report.
|
| 80 |
+
it can be: None, "*" or list of strings, default to None.
|
| 81 |
+
None - don't generate summary report for every expected metric_details.
|
| 82 |
+
"*" - generate summary report for every metric_details with all the supported operations.
|
| 83 |
+
list of strings - generate summary report for every metric_details with specified operations, they
|
| 84 |
+
should be within list: ["mean", "median", "max", "min", "<int>percentile", "std", "notnans"].
|
| 85 |
+
the number in "<int>percentile" should be [0, 100], like: "15percentile". default: "90percentile".
|
| 86 |
+
for more details, please check: https://numpy.org/doc/stable/reference/generated/numpy.nanpercentile.html.
|
| 87 |
+
note that: for the overall summary, it computes `nanmean` of all classes for each image first,
|
| 88 |
+
then compute summary. example of the generated summary report::
|
| 89 |
+
|
| 90 |
+
class mean median max 5percentile 95percentile notnans
|
| 91 |
+
class0 6.0000 6.0000 7.0000 5.1000 6.9000 2.0000
|
| 92 |
+
class1 6.0000 6.0000 6.0000 6.0000 6.0000 1.0000
|
| 93 |
+
mean 6.2500 6.2500 7.0000 5.5750 6.9250 2.0000
|
| 94 |
+
|
| 95 |
+
deli: the delimiter character in the saved file, default to "," as the default output type is `csv`.
|
| 96 |
+
to be consistent with: https://docs.python.org/3/library/csv.html#csv.Dialect.delimiter.
|
| 97 |
+
output_type: expected output file type, supported types: ["csv"], default to "csv".
|
| 98 |
+
class_labels: list of class names used to name the classes in the output report, if None,
|
| 99 |
+
"class0", ..., "classn" are used, default to None.
|
| 100 |
+
|
| 101 |
+
"""
|
| 102 |
+
if output_type.lower() != "csv":
|
| 103 |
+
raise ValueError(f"unsupported output type: {output_type}.")
|
| 104 |
+
|
| 105 |
+
if not os.path.exists(save_dir):
|
| 106 |
+
os.makedirs(save_dir)
|
| 107 |
+
|
| 108 |
+
if metrics is not None and len(metrics) > 0:
|
| 109 |
+
with open(os.path.join(save_dir, "metrics.csv"), "w") as f:
|
| 110 |
+
for k, v in metrics.items():
|
| 111 |
+
f.write(f"{k}{deli}{str(v)}\n")
|
| 112 |
+
if metric_details is not None and len(metric_details) > 0:
|
| 113 |
+
for k, v in metric_details.items():
|
| 114 |
+
if isinstance(v, torch.Tensor):
|
| 115 |
+
v = v.cpu().numpy()
|
| 116 |
+
if v.ndim == 0:
|
| 117 |
+
# reshape to [1, 1] if no batch and class dims
|
| 118 |
+
v = v.reshape((1, 1))
|
| 119 |
+
elif v.ndim == 1:
|
| 120 |
+
# reshape to [N, 1] if no class dim
|
| 121 |
+
v = v.reshape((-1, 1))
|
| 122 |
+
|
| 123 |
+
# add the average value of all classes to v
|
| 124 |
+
if class_labels is None:
|
| 125 |
+
class_labels = ["class" + str(i) for i in range(v.shape[1])]
|
| 126 |
+
else:
|
| 127 |
+
class_labels = [str(i) for i in class_labels] # ensure to have a list of str
|
| 128 |
+
|
| 129 |
+
class_labels += ["mean"]
|
| 130 |
+
v = np.concatenate([v, np.nanmean(v, axis=1, keepdims=True)], axis=1)
|
| 131 |
+
|
| 132 |
+
with open(os.path.join(save_dir, f"{k}_raw.csv"), "w") as f:
|
| 133 |
+
f.write(f"filename{deli}{deli.join(class_labels)}\n")
|
| 134 |
+
for i, b in enumerate(v):
|
| 135 |
+
f.write(
|
| 136 |
+
f"{images[i] if images is not None else str(i)}{deli}"
|
| 137 |
+
f"{deli.join([f'{c:.4f}' if isinstance(c, (int, float)) else str(c) for c in b])}\n"
|
| 138 |
+
)
|
| 139 |
+
|
| 140 |
+
if summary_ops is not None:
|
| 141 |
+
supported_ops = OrderedDict(
|
| 142 |
+
{
|
| 143 |
+
"mean": np.nanmean,
|
| 144 |
+
"median": np.nanmedian,
|
| 145 |
+
"max": np.nanmax,
|
| 146 |
+
"min": np.nanmin,
|
| 147 |
+
"90percentile": lambda x: np.nanpercentile(x[0], x[1]),
|
| 148 |
+
"std": np.nanstd,
|
| 149 |
+
"notnans": lambda x: (~np.isnan(x)).sum(),
|
| 150 |
+
}
|
| 151 |
+
)
|
| 152 |
+
ops = ensure_tuple(summary_ops)
|
| 153 |
+
if "*" in ops:
|
| 154 |
+
ops = tuple(supported_ops.keys())
|
| 155 |
+
|
| 156 |
+
def _compute_op(op: str, d: np.ndarray) -> Any:
|
| 157 |
+
if not op.endswith("percentile"):
|
| 158 |
+
c_op = look_up_option(op, supported_ops)
|
| 159 |
+
return c_op(d)
|
| 160 |
+
|
| 161 |
+
threshold = int(op.split("percentile")[0])
|
| 162 |
+
return supported_ops["90percentile"]((d, threshold)) # type: ignore
|
| 163 |
+
|
| 164 |
+
with open(os.path.join(save_dir, f"{k}_summary.csv"), "w") as f:
|
| 165 |
+
f.write(f"class{deli}{deli.join(ops)}\n")
|
| 166 |
+
for i, c in enumerate(np.transpose(v)):
|
| 167 |
+
f.write(f"{class_labels[i]}{deli}{deli.join([f'{_compute_op(k, c):.4f}' for k in ops])}\n")
|
| 168 |
+
|
| 169 |
+
|
| 170 |
+
def from_engine(keys: KeysCollection, first: bool = False) -> Callable:
|
| 171 |
+
"""
|
| 172 |
+
Utility function to simplify the `batch_transform` or `output_transform` args of ignite components
|
| 173 |
+
when handling dictionary or list of dictionaries(for example: `engine.state.batch` or `engine.state.output`).
|
| 174 |
+
Users only need to set the expected keys, then it will return a callable function to extract data from
|
| 175 |
+
dictionary and construct a tuple respectively.
|
| 176 |
+
|
| 177 |
+
If data is a list of dictionaries after decollating, extract expected keys and construct lists respectively,
|
| 178 |
+
for example, if data is `[{"A": 1, "B": 2}, {"A": 3, "B": 4}]`, from_engine(["A", "B"]): `([1, 3], [2, 4])`.
|
| 179 |
+
|
| 180 |
+
It can help avoid a complicated `lambda` function and make the arg of metrics more straight-forward.
|
| 181 |
+
For example, set the first key as the prediction and the second key as label to get the expected data
|
| 182 |
+
from `engine.state.output` for a metric::
|
| 183 |
+
|
| 184 |
+
from monai.handlers import MeanDice, from_engine
|
| 185 |
+
|
| 186 |
+
metric = MeanDice(
|
| 187 |
+
include_background=False,
|
| 188 |
+
output_transform=from_engine(["pred", "label"])
|
| 189 |
+
)
|
| 190 |
+
|
| 191 |
+
Args:
|
| 192 |
+
keys: specified keys to extract data from dictionary or decollated list of dictionaries.
|
| 193 |
+
first: whether only extract specified keys from the first item if input data is a list of dictionaries,
|
| 194 |
+
it's used to extract the scalar data which doesn't have batch dim and was replicated into every
|
| 195 |
+
dictionary when decollating, like `loss`, etc.
|
| 196 |
+
|
| 197 |
+
|
| 198 |
+
"""
|
| 199 |
+
_keys = ensure_tuple(keys)
|
| 200 |
+
|
| 201 |
+
def _wrapper(data):
|
| 202 |
+
if isinstance(data, dict):
|
| 203 |
+
return tuple(data[k] for k in _keys)
|
| 204 |
+
if isinstance(data, list) and isinstance(data[0], dict):
|
| 205 |
+
# if data is a list of dictionaries, extract expected keys and construct lists,
|
| 206 |
+
# if `first=True`, only extract keys from the first item of the list
|
| 207 |
+
ret = [data[0][k] if first else [i[k] for i in data] for k in _keys]
|
| 208 |
+
return tuple(ret) if len(ret) > 1 else ret[0]
|
| 209 |
+
|
| 210 |
+
return _wrapper
|
| 211 |
+
|
| 212 |
+
|
| 213 |
+
def ignore_data(x: Any) -> None:
|
| 214 |
+
"""
|
| 215 |
+
Always return `None` for any input data.
|
| 216 |
+
A typical usage is to avoid logging the engine output of every iteration during evaluation.
|
| 217 |
+
|
| 218 |
+
"""
|
| 219 |
+
return None
|
source_code/SegMamba/monai/handlers/validation_handler.py
ADDED
|
@@ -0,0 +1,86 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) MONAI Consortium
|
| 2 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 3 |
+
# you may not use this file except in compliance with the License.
|
| 4 |
+
# You may obtain a copy of the License at
|
| 5 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 6 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 7 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 8 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 9 |
+
# See the License for the specific language governing permissions and
|
| 10 |
+
# limitations under the License.
|
| 11 |
+
|
| 12 |
+
from __future__ import annotations
|
| 13 |
+
|
| 14 |
+
from typing import TYPE_CHECKING
|
| 15 |
+
|
| 16 |
+
from monai.config import IgniteInfo
|
| 17 |
+
from monai.engines.evaluator import Evaluator
|
| 18 |
+
from monai.utils import min_version, optional_import
|
| 19 |
+
|
| 20 |
+
Events, _ = optional_import("ignite.engine", IgniteInfo.OPT_IMPORT_VERSION, min_version, "Events")
|
| 21 |
+
if TYPE_CHECKING:
|
| 22 |
+
from ignite.engine import Engine
|
| 23 |
+
else:
|
| 24 |
+
Engine, _ = optional_import("ignite.engine", IgniteInfo.OPT_IMPORT_VERSION, min_version, "Engine")
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
class ValidationHandler:
|
| 28 |
+
"""
|
| 29 |
+
Attach validator to the trainer engine in Ignite.
|
| 30 |
+
It can support to execute validation every N epochs or every N iterations.
|
| 31 |
+
|
| 32 |
+
"""
|
| 33 |
+
|
| 34 |
+
def __init__(
|
| 35 |
+
self, interval: int, validator: Evaluator | None = None, epoch_level: bool = True, exec_at_start: bool = False
|
| 36 |
+
) -> None:
|
| 37 |
+
"""
|
| 38 |
+
Args:
|
| 39 |
+
interval: do validation every N epochs or every N iterations during training.
|
| 40 |
+
validator: run the validator when trigger validation, suppose to be Evaluator.
|
| 41 |
+
if None, should call `set_validator()` before training.
|
| 42 |
+
epoch_level: execute validation every N epochs or N iterations.
|
| 43 |
+
`True` is epoch level, `False` is iteration level.
|
| 44 |
+
exec_at_start: whether to execute a validation first when starting the training.
|
| 45 |
+
default to `False`. It can be useful especially for some transfer-learning cases
|
| 46 |
+
to validate the initial model before training.
|
| 47 |
+
|
| 48 |
+
Raises:
|
| 49 |
+
TypeError: When ``validator`` is not a ``monai.engines.evaluator.Evaluator``.
|
| 50 |
+
|
| 51 |
+
"""
|
| 52 |
+
if validator is not None and not isinstance(validator, Evaluator):
|
| 53 |
+
raise TypeError(f"validator must be a monai.engines.evaluator.Evaluator but is {type(validator).__name__}.")
|
| 54 |
+
self.validator = validator
|
| 55 |
+
self.interval = interval
|
| 56 |
+
self.epoch_level = epoch_level
|
| 57 |
+
self.exec_at_start = exec_at_start
|
| 58 |
+
|
| 59 |
+
def set_validator(self, validator: Evaluator) -> None:
|
| 60 |
+
"""
|
| 61 |
+
Set validator if not setting in the __init__().
|
| 62 |
+
"""
|
| 63 |
+
if not isinstance(validator, Evaluator):
|
| 64 |
+
raise TypeError(f"validator must be a monai.engines.evaluator.Evaluator but is {type(validator).__name__}.")
|
| 65 |
+
self.validator = validator
|
| 66 |
+
|
| 67 |
+
def attach(self, engine: Engine) -> None:
|
| 68 |
+
"""
|
| 69 |
+
Args:
|
| 70 |
+
engine: Ignite Engine, it can be a trainer, validator or evaluator.
|
| 71 |
+
"""
|
| 72 |
+
if self.epoch_level:
|
| 73 |
+
engine.add_event_handler(Events.EPOCH_COMPLETED(every=self.interval), self)
|
| 74 |
+
else:
|
| 75 |
+
engine.add_event_handler(Events.ITERATION_COMPLETED(every=self.interval), self)
|
| 76 |
+
if self.exec_at_start:
|
| 77 |
+
engine.add_event_handler(Events.STARTED, self)
|
| 78 |
+
|
| 79 |
+
def __call__(self, engine: Engine) -> None:
|
| 80 |
+
"""
|
| 81 |
+
Args:
|
| 82 |
+
engine: Ignite Engine, it can be a trainer, validator or evaluator.
|
| 83 |
+
"""
|
| 84 |
+
if self.validator is None:
|
| 85 |
+
raise RuntimeError("please set validator in __init__() or call `set_validator()` before training.")
|
| 86 |
+
self.validator.run(engine.state.epoch)
|
source_code/SegMamba/monai/inferers/merger.py
ADDED
|
@@ -0,0 +1,381 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) MONAI Consortium
|
| 2 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 3 |
+
# you may not use this file except in compliance with the License.
|
| 4 |
+
# You may obtain a copy of the License at
|
| 5 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 6 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 7 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 8 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 9 |
+
# See the License for the specific language governing permissions and
|
| 10 |
+
# limitations under the License.
|
| 11 |
+
|
| 12 |
+
from __future__ import annotations
|
| 13 |
+
|
| 14 |
+
import threading
|
| 15 |
+
from abc import ABC, abstractmethod
|
| 16 |
+
from collections.abc import Sequence
|
| 17 |
+
from contextlib import nullcontext
|
| 18 |
+
from typing import TYPE_CHECKING, Any
|
| 19 |
+
|
| 20 |
+
import numpy as np
|
| 21 |
+
import torch
|
| 22 |
+
|
| 23 |
+
from monai.utils import ensure_tuple_size, optional_import, require_pkg
|
| 24 |
+
|
| 25 |
+
if TYPE_CHECKING:
|
| 26 |
+
import zarr
|
| 27 |
+
else:
|
| 28 |
+
zarr, _ = optional_import("zarr")
|
| 29 |
+
|
| 30 |
+
__all__ = ["Merger", "AvgMerger", "ZarrAvgMerger"]
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
class Merger(ABC):
|
| 34 |
+
"""
|
| 35 |
+
A base class for merging patches.
|
| 36 |
+
Extend this class to support operations for `PatchInference`.
|
| 37 |
+
There are two methods that must be implemented in the concrete classes:
|
| 38 |
+
|
| 39 |
+
- aggregate: aggregate the values at their corresponding locations
|
| 40 |
+
- finalize: perform any final process and return the merged output
|
| 41 |
+
|
| 42 |
+
Args:
|
| 43 |
+
merged_shape: the shape of the tensor required to merge the patches.
|
| 44 |
+
cropped_shape: the shape of the final merged output tensor.
|
| 45 |
+
If not provided, it will be the same as `merged_shape`.
|
| 46 |
+
device: the device where Merger tensors should reside.
|
| 47 |
+
"""
|
| 48 |
+
|
| 49 |
+
def __init__(
|
| 50 |
+
self,
|
| 51 |
+
merged_shape: Sequence[int],
|
| 52 |
+
cropped_shape: Sequence[int] | None = None,
|
| 53 |
+
device: torch.device | str | None = None,
|
| 54 |
+
) -> None:
|
| 55 |
+
self.merged_shape = merged_shape
|
| 56 |
+
self.cropped_shape = self.merged_shape if cropped_shape is None else cropped_shape
|
| 57 |
+
self.device = device
|
| 58 |
+
self.is_finalized = False
|
| 59 |
+
|
| 60 |
+
@abstractmethod
|
| 61 |
+
def aggregate(self, values: torch.Tensor, location: Sequence[int]) -> None:
|
| 62 |
+
"""
|
| 63 |
+
Aggregate values for merging.
|
| 64 |
+
This method is being called in a loop and should add values to their corresponding location in the merged output results.
|
| 65 |
+
|
| 66 |
+
Args:
|
| 67 |
+
values: a tensor of shape BCHW[D], representing the values of inference output.
|
| 68 |
+
location: a tuple/list giving the top left location of the patch in the output.
|
| 69 |
+
|
| 70 |
+
Raises:
|
| 71 |
+
NotImplementedError: When the subclass does not override this method.
|
| 72 |
+
|
| 73 |
+
"""
|
| 74 |
+
raise NotImplementedError(f"Subclass {self.__class__.__name__} must implement this method.")
|
| 75 |
+
|
| 76 |
+
@abstractmethod
|
| 77 |
+
def finalize(self) -> Any:
|
| 78 |
+
"""
|
| 79 |
+
Perform final operations for merging patches and return the final merged output.
|
| 80 |
+
|
| 81 |
+
Returns:
|
| 82 |
+
The results of merged patches, which is commonly a torch.Tensor representing the merged result, or
|
| 83 |
+
a string representing the filepath to the merged results on disk.
|
| 84 |
+
|
| 85 |
+
Raises:
|
| 86 |
+
NotImplementedError: When the subclass does not override this method.
|
| 87 |
+
|
| 88 |
+
"""
|
| 89 |
+
raise NotImplementedError(f"Subclass {self.__class__.__name__} must implement this method.")
|
| 90 |
+
|
| 91 |
+
|
| 92 |
+
class AvgMerger(Merger):
|
| 93 |
+
"""Merge patches by taking average of the overlapping area
|
| 94 |
+
|
| 95 |
+
Args:
|
| 96 |
+
merged_shape: the shape of the tensor required to merge the patches.
|
| 97 |
+
cropped_shape: the shape of the final merged output tensor.
|
| 98 |
+
If not provided, it will be the same as `merged_shape`.
|
| 99 |
+
device: the device for aggregator tensors and final results.
|
| 100 |
+
value_dtype: the dtype for value aggregating tensor and the final result.
|
| 101 |
+
count_dtype: the dtype for sample counting tensor.
|
| 102 |
+
"""
|
| 103 |
+
|
| 104 |
+
def __init__(
|
| 105 |
+
self,
|
| 106 |
+
merged_shape: Sequence[int],
|
| 107 |
+
cropped_shape: Sequence[int] | None = None,
|
| 108 |
+
value_dtype: torch.dtype = torch.float32,
|
| 109 |
+
count_dtype: torch.dtype = torch.uint8,
|
| 110 |
+
device: torch.device | str = "cpu",
|
| 111 |
+
) -> None:
|
| 112 |
+
super().__init__(merged_shape=merged_shape, cropped_shape=cropped_shape, device=device)
|
| 113 |
+
if not self.merged_shape:
|
| 114 |
+
raise ValueError(f"`merged_shape` must be provided for `AvgMerger`. {self.merged_shape} is give.")
|
| 115 |
+
self.value_dtype = value_dtype
|
| 116 |
+
self.count_dtype = count_dtype
|
| 117 |
+
self.values = torch.zeros(self.merged_shape, dtype=self.value_dtype, device=self.device)
|
| 118 |
+
self.counts = torch.zeros(self.merged_shape, dtype=self.count_dtype, device=self.device)
|
| 119 |
+
|
| 120 |
+
def aggregate(self, values: torch.Tensor, location: Sequence[int]) -> None:
|
| 121 |
+
"""
|
| 122 |
+
Aggregate values for merging.
|
| 123 |
+
|
| 124 |
+
Args:
|
| 125 |
+
values: a tensor of shape BCHW[D], representing the values of inference output.
|
| 126 |
+
location: a tuple/list giving the top left location of the patch in the original image.
|
| 127 |
+
|
| 128 |
+
Raises:
|
| 129 |
+
NotImplementedError: When the subclass does not override this method.
|
| 130 |
+
|
| 131 |
+
"""
|
| 132 |
+
if self.is_finalized:
|
| 133 |
+
raise ValueError("`AvgMerger` is already finalized. Please instantiate a new object to aggregate.")
|
| 134 |
+
patch_size = values.shape[2:]
|
| 135 |
+
map_slice = tuple(slice(loc, loc + size) for loc, size in zip(location, patch_size))
|
| 136 |
+
map_slice = ensure_tuple_size(map_slice, values.ndim, pad_val=slice(None), pad_from_start=True)
|
| 137 |
+
self.values[map_slice] += values
|
| 138 |
+
self.counts[map_slice] += 1
|
| 139 |
+
|
| 140 |
+
def finalize(self) -> torch.Tensor:
|
| 141 |
+
"""
|
| 142 |
+
Finalize merging by dividing values by counts and return the merged tensor.
|
| 143 |
+
|
| 144 |
+
Notes:
|
| 145 |
+
To avoid creating a new tensor for the final results (to save memory space),
|
| 146 |
+
after this method is called, `get_values()` method will return the "final" averaged values,
|
| 147 |
+
and not the accumulating values. Also calling `finalize()` multiple times does not have any effect.
|
| 148 |
+
|
| 149 |
+
Returns:
|
| 150 |
+
torch.tensor: a tensor of merged patches
|
| 151 |
+
"""
|
| 152 |
+
# guard against multiple call to finalize
|
| 153 |
+
if not self.is_finalized:
|
| 154 |
+
# use in-place division to save space
|
| 155 |
+
self.values.div_(self.counts)
|
| 156 |
+
# finalize the shape
|
| 157 |
+
self.values = self.values[tuple(slice(0, end) for end in self.cropped_shape)]
|
| 158 |
+
# set finalize flag to protect performing in-place division again
|
| 159 |
+
self.is_finalized = True
|
| 160 |
+
|
| 161 |
+
return self.values
|
| 162 |
+
|
| 163 |
+
def get_output(self) -> torch.Tensor:
|
| 164 |
+
"""
|
| 165 |
+
Get the final merged output.
|
| 166 |
+
|
| 167 |
+
Returns:
|
| 168 |
+
torch.Tensor: merged output.
|
| 169 |
+
"""
|
| 170 |
+
return self.finalize()
|
| 171 |
+
|
| 172 |
+
def get_values(self) -> torch.Tensor:
|
| 173 |
+
"""
|
| 174 |
+
Get the accumulated values during aggregation or final averaged values after it is finalized.
|
| 175 |
+
|
| 176 |
+
Returns:
|
| 177 |
+
torch.tensor: aggregated values.
|
| 178 |
+
|
| 179 |
+
Notes:
|
| 180 |
+
- If called before calling `finalize()`, this method returns the accumulating values.
|
| 181 |
+
- If called after calling `finalize()`, this method returns the final merged [and averaged] values.
|
| 182 |
+
"""
|
| 183 |
+
return self.values
|
| 184 |
+
|
| 185 |
+
def get_counts(self) -> torch.Tensor:
|
| 186 |
+
"""
|
| 187 |
+
Get the aggregator tensor for number of samples.
|
| 188 |
+
|
| 189 |
+
Returns:
|
| 190 |
+
torch.Tensor: number of accumulated samples at each location.
|
| 191 |
+
"""
|
| 192 |
+
return self.counts
|
| 193 |
+
|
| 194 |
+
|
| 195 |
+
@require_pkg(pkg_name="zarr")
|
| 196 |
+
class ZarrAvgMerger(Merger):
|
| 197 |
+
"""Merge patches by taking average of the overlapping area and store the results in zarr array.
|
| 198 |
+
|
| 199 |
+
Zarr is a format for the storage of chunked, compressed, N-dimensional arrays.
|
| 200 |
+
Zarr data can be stored in any storage system that can be represented as a key-value store,
|
| 201 |
+
like POSIX file systems, cloud object storage, zip files, and relational and document databases.
|
| 202 |
+
See https://zarr.readthedocs.io/en/stable/ for more details.
|
| 203 |
+
It is particularly useful for storing N-dimensional arrays too large to fit into memory.
|
| 204 |
+
One specific use case of this class is to merge patches extracted from whole slide images (WSI),
|
| 205 |
+
where the merged results do not fit into memory and need to be stored on a file system.
|
| 206 |
+
|
| 207 |
+
Args:
|
| 208 |
+
merged_shape: the shape of the tensor required to merge the patches.
|
| 209 |
+
cropped_shape: the shape of the final merged output tensor.
|
| 210 |
+
If not provided, it will be the same as `merged_shape`.
|
| 211 |
+
dtype: the dtype for the final merged result. Default is `float32`.
|
| 212 |
+
value_dtype: the dtype for value aggregating tensor and the final result. Default is `float32`.
|
| 213 |
+
count_dtype: the dtype for sample counting tensor. Default is `uint8`.
|
| 214 |
+
store: the zarr store to save the final results. Default is "merged.zarr".
|
| 215 |
+
value_store: the zarr store to save the value aggregating tensor. Default is a temporary store.
|
| 216 |
+
count_store: the zarr store to save the sample counting tensor. Default is a temporary store.
|
| 217 |
+
compressor: the compressor for final merged zarr array. Default is "default".
|
| 218 |
+
value_compressor: the compressor for value aggregating zarr array. Default is None.
|
| 219 |
+
count_compressor: the compressor for sample counting zarr array. Default is None.
|
| 220 |
+
chunks : int or tuple of ints that defines the chunk shape, or boolean. Default is True.
|
| 221 |
+
If True, chunk shape will be guessed from `shape` and `dtype`.
|
| 222 |
+
If False, it will be set to `shape`, i.e., single chunk for the whole array.
|
| 223 |
+
If an int, the chunk size in each dimension will be given by the value of `chunks`.
|
| 224 |
+
"""
|
| 225 |
+
|
| 226 |
+
def __init__(
|
| 227 |
+
self,
|
| 228 |
+
merged_shape: Sequence[int],
|
| 229 |
+
cropped_shape: Sequence[int] | None = None,
|
| 230 |
+
dtype: np.dtype | str = "float32",
|
| 231 |
+
value_dtype: np.dtype | str = "float32",
|
| 232 |
+
count_dtype: np.dtype | str = "uint8",
|
| 233 |
+
store: zarr.storage.Store | str = "merged.zarr",
|
| 234 |
+
value_store: zarr.storage.Store | str | None = None,
|
| 235 |
+
count_store: zarr.storage.Store | str | None = None,
|
| 236 |
+
compressor: str = "default",
|
| 237 |
+
value_compressor: str | None = None,
|
| 238 |
+
count_compressor: str | None = None,
|
| 239 |
+
chunks: Sequence[int] | bool = True,
|
| 240 |
+
thread_locking: bool = True,
|
| 241 |
+
) -> None:
|
| 242 |
+
super().__init__(merged_shape=merged_shape, cropped_shape=cropped_shape)
|
| 243 |
+
if not self.merged_shape:
|
| 244 |
+
raise ValueError(f"`merged_shape` must be provided for `ZarrAvgMerger`. {self.merged_shape} is give.")
|
| 245 |
+
self.output_dtype = dtype
|
| 246 |
+
self.value_dtype = value_dtype
|
| 247 |
+
self.count_dtype = count_dtype
|
| 248 |
+
self.store = store
|
| 249 |
+
self.value_store = zarr.storage.TempStore() if value_store is None else value_store
|
| 250 |
+
self.count_store = zarr.storage.TempStore() if count_store is None else count_store
|
| 251 |
+
self.chunks = chunks
|
| 252 |
+
self.compressor = compressor
|
| 253 |
+
self.value_compressor = value_compressor
|
| 254 |
+
self.count_compressor = count_compressor
|
| 255 |
+
self.output = zarr.empty(
|
| 256 |
+
shape=self.merged_shape,
|
| 257 |
+
chunks=self.chunks,
|
| 258 |
+
dtype=self.output_dtype,
|
| 259 |
+
compressor=self.compressor,
|
| 260 |
+
store=self.store,
|
| 261 |
+
overwrite=True,
|
| 262 |
+
)
|
| 263 |
+
self.values = zarr.zeros(
|
| 264 |
+
shape=self.merged_shape,
|
| 265 |
+
chunks=self.chunks,
|
| 266 |
+
dtype=self.value_dtype,
|
| 267 |
+
compressor=self.value_compressor,
|
| 268 |
+
store=self.value_store,
|
| 269 |
+
overwrite=True,
|
| 270 |
+
)
|
| 271 |
+
self.counts = zarr.zeros(
|
| 272 |
+
shape=self.merged_shape,
|
| 273 |
+
chunks=self.chunks,
|
| 274 |
+
dtype=self.count_dtype,
|
| 275 |
+
compressor=self.count_compressor,
|
| 276 |
+
store=self.count_store,
|
| 277 |
+
overwrite=True,
|
| 278 |
+
)
|
| 279 |
+
self.lock: threading.Lock | nullcontext
|
| 280 |
+
if thread_locking:
|
| 281 |
+
# use lock to protect the in-place addition during aggregation
|
| 282 |
+
self.lock = threading.Lock()
|
| 283 |
+
else:
|
| 284 |
+
# use nullcontext to avoid the locking if not needed
|
| 285 |
+
self.lock = nullcontext()
|
| 286 |
+
|
| 287 |
+
def aggregate(self, values: torch.Tensor, location: Sequence[int]) -> None:
|
| 288 |
+
"""
|
| 289 |
+
Aggregate values for merging.
|
| 290 |
+
|
| 291 |
+
Args:
|
| 292 |
+
values: a tensor of shape BCHW[D], representing the values of inference output.
|
| 293 |
+
location: a tuple/list giving the top left location of the patch in the original image.
|
| 294 |
+
"""
|
| 295 |
+
if self.is_finalized:
|
| 296 |
+
raise ValueError("`ZarrAvgMerger` is already finalized. Please instantiate a new object to aggregate.")
|
| 297 |
+
patch_size = values.shape[2:]
|
| 298 |
+
map_slice = tuple(slice(loc, loc + size) for loc, size in zip(location, patch_size))
|
| 299 |
+
map_slice = ensure_tuple_size(map_slice, values.ndim, pad_val=slice(None), pad_from_start=True)
|
| 300 |
+
with self.lock:
|
| 301 |
+
self.values[map_slice] += values.numpy()
|
| 302 |
+
self.counts[map_slice] += 1
|
| 303 |
+
|
| 304 |
+
def finalize(self) -> zarr.Array:
|
| 305 |
+
"""
|
| 306 |
+
Finalize merging by dividing values by counts and return the merged tensor.
|
| 307 |
+
|
| 308 |
+
Notes:
|
| 309 |
+
To avoid creating a new tensor for the final results (to save memory space),
|
| 310 |
+
after this method is called, `get_values()` method will return the "final" averaged values,
|
| 311 |
+
and not the accumulating values. Also calling `finalize()` multiple times does not have any effect.
|
| 312 |
+
|
| 313 |
+
Returns:
|
| 314 |
+
zarr.Array: a zarr array of of merged patches
|
| 315 |
+
"""
|
| 316 |
+
# guard against multiple calls to finalize
|
| 317 |
+
if not self.is_finalized:
|
| 318 |
+
# use chunks for division to fit into memory
|
| 319 |
+
for chunk in iterate_over_chunks(self.values.chunks, self.values.cdata_shape):
|
| 320 |
+
self.output[chunk] = self.values[chunk] / self.counts[chunk]
|
| 321 |
+
# finalize the shape
|
| 322 |
+
self.output.resize(self.cropped_shape)
|
| 323 |
+
# set finalize flag to protect performing in-place division again
|
| 324 |
+
self.is_finalized = True
|
| 325 |
+
|
| 326 |
+
return self.output
|
| 327 |
+
|
| 328 |
+
def get_output(self) -> zarr.Array:
|
| 329 |
+
"""
|
| 330 |
+
Get the final merged output.
|
| 331 |
+
|
| 332 |
+
Returns:
|
| 333 |
+
zarr.Array: Merged (averaged) output tensor.
|
| 334 |
+
"""
|
| 335 |
+
return self.output
|
| 336 |
+
|
| 337 |
+
def get_values(self) -> zarr.Array:
|
| 338 |
+
"""
|
| 339 |
+
Get the accumulated values during aggregation
|
| 340 |
+
|
| 341 |
+
Returns:
|
| 342 |
+
zarr.Array: aggregated values.
|
| 343 |
+
|
| 344 |
+
"""
|
| 345 |
+
return self.values
|
| 346 |
+
|
| 347 |
+
def get_counts(self) -> zarr.Array:
|
| 348 |
+
"""
|
| 349 |
+
Get the aggregator tensor for number of samples.
|
| 350 |
+
|
| 351 |
+
Returns:
|
| 352 |
+
zarr.Array: Number of accumulated samples at each location.
|
| 353 |
+
"""
|
| 354 |
+
return self.counts
|
| 355 |
+
|
| 356 |
+
|
| 357 |
+
def iterate_over_chunks(chunks, cdata_shape, slice_tuple=()):
|
| 358 |
+
"""
|
| 359 |
+
Iterate over chunks of a given shape.
|
| 360 |
+
|
| 361 |
+
Args:
|
| 362 |
+
chunks: the chunk shape
|
| 363 |
+
cdata_shape: the shape of the data in chunks
|
| 364 |
+
slice_tuple: the slice tuple to be used for indexing
|
| 365 |
+
|
| 366 |
+
Raises:
|
| 367 |
+
ValueError: When the length of chunks and cdata_shape are not the same.
|
| 368 |
+
|
| 369 |
+
Yields:
|
| 370 |
+
slices of the data
|
| 371 |
+
"""
|
| 372 |
+
if len(chunks) != len(cdata_shape):
|
| 373 |
+
raise ValueError("chunks and cdata_shape must have the same length")
|
| 374 |
+
if len(chunks) == 1:
|
| 375 |
+
for i in range(cdata_shape[0]):
|
| 376 |
+
yield slice_tuple + (slice(i * chunks[0], (i + 1) * chunks[0]),)
|
| 377 |
+
else:
|
| 378 |
+
for i in range(cdata_shape[0]):
|
| 379 |
+
yield from iterate_over_chunks(
|
| 380 |
+
chunks[1:], cdata_shape[1:], slice_tuple + (slice(i * chunks[0], (i + 1) * chunks[0]),)
|
| 381 |
+
)
|
source_code/SegMamba/monai/inferers/splitter.py
ADDED
|
@@ -0,0 +1,444 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) MONAI Consortium
|
| 2 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 3 |
+
# you may not use this file except in compliance with the License.
|
| 4 |
+
# You may obtain a copy of the License at
|
| 5 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 6 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 7 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 8 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 9 |
+
# See the License for the specific language governing permissions and
|
| 10 |
+
# limitations under the License.
|
| 11 |
+
|
| 12 |
+
from __future__ import annotations
|
| 13 |
+
|
| 14 |
+
import os
|
| 15 |
+
import warnings
|
| 16 |
+
from abc import ABC, abstractmethod
|
| 17 |
+
from collections.abc import Callable, Iterable, Sequence
|
| 18 |
+
from inspect import _empty, isclass, signature
|
| 19 |
+
from typing import Any
|
| 20 |
+
|
| 21 |
+
import torch
|
| 22 |
+
|
| 23 |
+
from monai.data.utils import iter_patch_position
|
| 24 |
+
from monai.data.wsi_reader import BaseWSIReader, WSIReader
|
| 25 |
+
from monai.transforms.utility.array import convert_to_tensor
|
| 26 |
+
from monai.utils.misc import PathLike, ensure_tuple, ensure_tuple_rep
|
| 27 |
+
|
| 28 |
+
__all__ = ["Splitter", "SlidingWindowSplitter", "WSISlidingWindowSplitter"]
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
class Splitter(ABC):
|
| 32 |
+
"""
|
| 33 |
+
A base class for splitting the inputs into iterable tuple of patches and locations
|
| 34 |
+
Extend this class to support operations for `PatchInference`, e.g. SlidingPatchSplitter.
|
| 35 |
+
|
| 36 |
+
Args:
|
| 37 |
+
patch_size: the size of patches to be generated.
|
| 38 |
+
device: the device where the patches are generated.
|
| 39 |
+
"""
|
| 40 |
+
|
| 41 |
+
def __init__(self, patch_size: Sequence[int] | int, device: torch.device | str | None = None) -> None:
|
| 42 |
+
self.patch_size = patch_size
|
| 43 |
+
self.device = device
|
| 44 |
+
|
| 45 |
+
@abstractmethod
|
| 46 |
+
def get_input_shape(self, inputs: Any) -> tuple:
|
| 47 |
+
"""
|
| 48 |
+
Return the input spatial shape.
|
| 49 |
+
|
| 50 |
+
Args:
|
| 51 |
+
inputs: either a tensor of shape BCHW[D], representing a batch of images,
|
| 52 |
+
or a filename (str) or list of filenames to the image(s).
|
| 53 |
+
|
| 54 |
+
Raises:
|
| 55 |
+
NotImplementedError: When the subclass does not override this method.
|
| 56 |
+
|
| 57 |
+
"""
|
| 58 |
+
raise NotImplementedError(f"Subclass {self.__class__.__name__} must implement this method.")
|
| 59 |
+
|
| 60 |
+
@abstractmethod
|
| 61 |
+
def get_padded_shape(self, inputs: Any) -> tuple:
|
| 62 |
+
"""
|
| 63 |
+
Return the actual spatial shape covered by the output split patches.
|
| 64 |
+
For instance, if the input image is padded, the actual spatial shape will be enlarged
|
| 65 |
+
and not the same as input spatial shape.
|
| 66 |
+
|
| 67 |
+
Args:
|
| 68 |
+
inputs: either a tensor of shape BCHW[D], representing a batch of images,
|
| 69 |
+
or a filename (str) or list of filenames to the image(s).
|
| 70 |
+
|
| 71 |
+
Raises:
|
| 72 |
+
NotImplementedError: When the subclass does not override this method.
|
| 73 |
+
|
| 74 |
+
"""
|
| 75 |
+
raise NotImplementedError(f"Subclass {self.__class__.__name__} must implement this method.")
|
| 76 |
+
|
| 77 |
+
@abstractmethod
|
| 78 |
+
def __call__(self, inputs: Any) -> Iterable[tuple[torch.Tensor, Sequence[int]]]:
|
| 79 |
+
"""
|
| 80 |
+
Split the input image (or batch of images) into patches and return pairs of (patch, location).
|
| 81 |
+
Where location is the coordinate of top left [front] corner of a patch.
|
| 82 |
+
|
| 83 |
+
Args:
|
| 84 |
+
inputs: either a tensor of shape BCHW[D], representing a batch of images,
|
| 85 |
+
or a filename (str) or list of filenames to the image(s).
|
| 86 |
+
|
| 87 |
+
Raises:
|
| 88 |
+
NotImplementedError: When the subclass does not override this method.
|
| 89 |
+
|
| 90 |
+
"""
|
| 91 |
+
raise NotImplementedError(f"Subclass {self.__class__.__name__} must implement this method.")
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
class SlidingWindowSplitter(Splitter):
|
| 95 |
+
"""
|
| 96 |
+
Splits the input into patches with sliding window strategy and a possible overlap.
|
| 97 |
+
It also allows offsetting the starting position and filtering the patches.
|
| 98 |
+
|
| 99 |
+
Args:
|
| 100 |
+
patch_size : the size of the patches to be generated.
|
| 101 |
+
offset: the amount of offset for the patches with respect to the original input. Defaults to 0.
|
| 102 |
+
overlap: the amount of overlap between patches in each dimension. It can be either a float in
|
| 103 |
+
the range of [0.0, 1.0) that defines relative overlap to the patch size, or it can be a non-negative int
|
| 104 |
+
that defines number of pixels for overlap. Defaults to 0.0.
|
| 105 |
+
filter_fn: a callable to filter patches. It should accepts exactly two parameters (patch, location), and
|
| 106 |
+
return True for a patch to keep. Defaults to no filtering.
|
| 107 |
+
pad_mode: string define the mode for `torch.nn.functional.pad`. The acceptable values are
|
| 108 |
+
`"constant"`, `"reflect"`, `"replicate"`, `"circular"` or `None`. Default to `"constant"`.
|
| 109 |
+
If None, no padding will be applied, so it will drop the patches crossing the border of
|
| 110 |
+
the image (either when the offset is negative or the image is non-divisible by the patch_size).
|
| 111 |
+
pad_value: the value for `"constant"` padding. Defaults to 0.
|
| 112 |
+
device: the device where the patches are generated. Defaults to the device of inputs.
|
| 113 |
+
|
| 114 |
+
Note:
|
| 115 |
+
When a scaler value is provided for `patch_size`, `offset`, or `overlap`,
|
| 116 |
+
it is broadcasted to all the spatial dimensions.
|
| 117 |
+
"""
|
| 118 |
+
|
| 119 |
+
def __init__(
|
| 120 |
+
self,
|
| 121 |
+
patch_size: Sequence[int] | int,
|
| 122 |
+
overlap: Sequence[float] | float | Sequence[int] | int = 0.0,
|
| 123 |
+
offset: Sequence[int] | int = 0,
|
| 124 |
+
filter_fn: Callable | None = None,
|
| 125 |
+
pad_mode: str | None = "constant",
|
| 126 |
+
pad_value: float | int = 0,
|
| 127 |
+
device: torch.device | str | None = None,
|
| 128 |
+
) -> None:
|
| 129 |
+
super().__init__(patch_size=patch_size, device=device)
|
| 130 |
+
self.offset = offset
|
| 131 |
+
# check if fraction overlaps are within the range of [0, 1)
|
| 132 |
+
if isinstance(ensure_tuple(overlap)[0], float) and any(ov < 0.0 or ov >= 1.0 for ov in ensure_tuple(overlap)):
|
| 133 |
+
raise ValueError(
|
| 134 |
+
f"Relative overlap must be between 0.0 and 1.0 but {overlap} is given. "
|
| 135 |
+
"If you wish to use number of pixels as overlap, please provide integer numbers."
|
| 136 |
+
)
|
| 137 |
+
elif any(ov < 0 for ov in ensure_tuple(overlap)):
|
| 138 |
+
raise ValueError(f"Number of pixels for overlap cannot be negative. {overlap} is given. ")
|
| 139 |
+
|
| 140 |
+
self.overlap = overlap
|
| 141 |
+
self.filter_fn = self._validate_filter_fn(filter_fn)
|
| 142 |
+
# padding
|
| 143 |
+
self.pad_mode = pad_mode
|
| 144 |
+
self.pad_value = pad_value
|
| 145 |
+
# check a valid padding mode is provided if there is any negative offset.
|
| 146 |
+
if not self.pad_mode and any(off < 0 for off in ensure_tuple(offset)):
|
| 147 |
+
raise ValueError(f"Negative `offset`requires a valid padding mode but `mode` is set to {self.pad_mode}.")
|
| 148 |
+
|
| 149 |
+
@staticmethod
|
| 150 |
+
def _validate_filter_fn(filter_fn):
|
| 151 |
+
if callable(filter_fn):
|
| 152 |
+
sig = signature(filter_fn)
|
| 153 |
+
n_params = len(sig.parameters)
|
| 154 |
+
num_pos_params = len([v for v in sig.parameters.values() if v.default is _empty])
|
| 155 |
+
if n_params < 2:
|
| 156 |
+
raise ValueError(
|
| 157 |
+
f"`filter_fn` requires to accept at least two parameters (patch, location)."
|
| 158 |
+
f"The provided callable ({filter_fn}) has {n_params} parameters."
|
| 159 |
+
)
|
| 160 |
+
elif num_pos_params > 2:
|
| 161 |
+
raise ValueError(
|
| 162 |
+
f"`filter_fn` can have at most two positional parameters (patch, location)."
|
| 163 |
+
f"The provided callable ({filter_fn}) has {num_pos_params} positional parameters."
|
| 164 |
+
)
|
| 165 |
+
elif filter_fn is not None:
|
| 166 |
+
raise ValueError(
|
| 167 |
+
"`filter_fn` should be a callable with two input parameters (patch, location). "
|
| 168 |
+
f"{type(filter_fn)} is given."
|
| 169 |
+
)
|
| 170 |
+
return filter_fn
|
| 171 |
+
|
| 172 |
+
def _calculate_pad_size(self, spatial_shape, spatial_ndim, patch_size, offset, overlap):
|
| 173 |
+
# initialize with zero
|
| 174 |
+
pad_size = [0] * 2 * spatial_ndim
|
| 175 |
+
if not self.pad_mode:
|
| 176 |
+
return pad_size, False
|
| 177 |
+
# set the starting pad size only if the offset is negative
|
| 178 |
+
pad_size[1::2] = (-min(off, 0) for off in offset)
|
| 179 |
+
# set the ending pad size only if it is not divisible by the patch size
|
| 180 |
+
end_padding = []
|
| 181 |
+
for sh, off, ps, ov in zip(spatial_shape, offset, patch_size, overlap):
|
| 182 |
+
if ps == 0:
|
| 183 |
+
pad_amount = 0
|
| 184 |
+
else:
|
| 185 |
+
if isinstance(ov, float):
|
| 186 |
+
pad_amount = (off - sh + ps) % round(ps - (ps * ov))
|
| 187 |
+
else:
|
| 188 |
+
pad_amount = (off - sh + ps) % round(ps - ov)
|
| 189 |
+
end_padding.append(pad_amount)
|
| 190 |
+
|
| 191 |
+
pad_size[::2] = end_padding
|
| 192 |
+
return pad_size, any(pad_size[1::2])
|
| 193 |
+
|
| 194 |
+
def _get_valid_shape_parameters(
|
| 195 |
+
self, spatial_shape: Sequence[int]
|
| 196 |
+
) -> tuple[tuple[int, ...], tuple[float, ...] | tuple[int, ...], tuple[int, ...]]:
|
| 197 |
+
spatial_ndim = len(spatial_shape)
|
| 198 |
+
# patch_size
|
| 199 |
+
patch_size = ensure_tuple_rep(self.patch_size, spatial_ndim)
|
| 200 |
+
# overlap
|
| 201 |
+
overlap = ensure_tuple_rep(self.overlap, spatial_ndim)
|
| 202 |
+
overlap = tuple(o if p else type(overlap[0])(0) for o, p in zip(overlap, patch_size))
|
| 203 |
+
if any(ov > ps for ov, ps in zip(overlap, patch_size)):
|
| 204 |
+
raise ValueError(f"`overlap` ({overlap}) cannot be larger than patch size ({patch_size}).")
|
| 205 |
+
# offset
|
| 206 |
+
offset = ensure_tuple_rep(self.offset, spatial_ndim)
|
| 207 |
+
for off, ps, sh in zip(offset, patch_size, spatial_shape):
|
| 208 |
+
if off < -ps:
|
| 209 |
+
raise ValueError(f"Negative `offset` ({off}) cannot be larger than `patch_size` ({ps}) in magnitude.")
|
| 210 |
+
if off >= sh:
|
| 211 |
+
raise ValueError(f"`offset` ({off}) cannot be larger than inputs size ({sh}).")
|
| 212 |
+
return patch_size, overlap, offset
|
| 213 |
+
|
| 214 |
+
def _get_patch(self, inputs: Any, location: tuple[int, ...], patch_size: tuple[int, ...]) -> Any:
|
| 215 |
+
slices = (slice(None),) * 2 + tuple(slice(loc, loc + ps) for loc, ps in zip(location, patch_size))
|
| 216 |
+
return inputs[slices]
|
| 217 |
+
|
| 218 |
+
def get_input_shape(self, inputs: Any) -> tuple:
|
| 219 |
+
"""
|
| 220 |
+
Return the input spatial shape.
|
| 221 |
+
|
| 222 |
+
Args:
|
| 223 |
+
inputs: either a tensor of shape BCHW[D], representing a batch of images,
|
| 224 |
+
or a filename (str) or list of filenames to the image(s).
|
| 225 |
+
|
| 226 |
+
Returns:
|
| 227 |
+
spatial_shape
|
| 228 |
+
"""
|
| 229 |
+
return tuple(inputs.shape[2:])
|
| 230 |
+
|
| 231 |
+
def get_padded_shape(self, inputs: Any) -> tuple:
|
| 232 |
+
"""
|
| 233 |
+
Return the actual spatial shape covered by the output split patches.
|
| 234 |
+
For instance, if the input image is padded, the actual spatial shape will be enlarged
|
| 235 |
+
and not the same as input spatial shape.
|
| 236 |
+
|
| 237 |
+
Args:
|
| 238 |
+
inputs: either a tensor of shape BCHW[D], representing a batch of images,
|
| 239 |
+
or a filename (str) or list of filenames to the image(s).
|
| 240 |
+
|
| 241 |
+
Returns:
|
| 242 |
+
padded_spatial_shape
|
| 243 |
+
|
| 244 |
+
"""
|
| 245 |
+
spatial_shape = self.get_input_shape(inputs)
|
| 246 |
+
if not self.pad_mode:
|
| 247 |
+
return spatial_shape
|
| 248 |
+
spatial_ndim = len(spatial_shape)
|
| 249 |
+
patch_size, overlap, offset = self._get_valid_shape_parameters(spatial_shape)
|
| 250 |
+
pad_size, _ = self._calculate_pad_size(spatial_shape, spatial_ndim, patch_size, offset, overlap)
|
| 251 |
+
padded_spatial_shape = tuple(ss + ps + pe for ss, ps, pe in zip(spatial_shape, pad_size[1::2], pad_size[::2]))
|
| 252 |
+
|
| 253 |
+
return padded_spatial_shape
|
| 254 |
+
|
| 255 |
+
def __call__(self, inputs: Any) -> Iterable[tuple[torch.Tensor, Sequence[int]]]:
|
| 256 |
+
"""Split the input tensor into patches and return patches and locations.
|
| 257 |
+
|
| 258 |
+
Args:
|
| 259 |
+
inputs: either a torch.Tensor with BCHW[D] dimensions, representing an image or a batch of images
|
| 260 |
+
|
| 261 |
+
Yields:
|
| 262 |
+
tuple[torch.Tensor, Sequence[int]]: yields tuple of patch and location
|
| 263 |
+
"""
|
| 264 |
+
|
| 265 |
+
if not isinstance(inputs, torch.Tensor):
|
| 266 |
+
raise ValueError(f"The input should be a tensor. {type(inputs)} is given.")
|
| 267 |
+
|
| 268 |
+
spatial_shape = inputs.shape[2:]
|
| 269 |
+
spatial_ndim = len(spatial_shape)
|
| 270 |
+
patch_size, overlap, offset = self._get_valid_shape_parameters(spatial_shape)
|
| 271 |
+
pad_size, is_start_padded = self._calculate_pad_size(spatial_shape, spatial_ndim, patch_size, offset, overlap)
|
| 272 |
+
|
| 273 |
+
# Padding
|
| 274 |
+
if self.pad_mode and any(pad_size):
|
| 275 |
+
# pad the inputs
|
| 276 |
+
inputs = torch.nn.functional.pad(inputs, pad_size[::-1], mode=self.pad_mode, value=self.pad_value)
|
| 277 |
+
# update spatial shape
|
| 278 |
+
spatial_shape = inputs.shape[2:]
|
| 279 |
+
# correct the offset with respect to the padded image
|
| 280 |
+
if is_start_padded:
|
| 281 |
+
offset = tuple(off + p for off, p in zip(offset, pad_size[1::2]))
|
| 282 |
+
|
| 283 |
+
# Splitting
|
| 284 |
+
for location in iter_patch_position(spatial_shape, patch_size, offset, overlap, False):
|
| 285 |
+
patch = self._get_patch(inputs, location, patch_size)
|
| 286 |
+
patch = convert_to_tensor(patch, device=self.device)
|
| 287 |
+
# correct the location with respect to original inputs (remove starting pads)
|
| 288 |
+
if is_start_padded:
|
| 289 |
+
location = tuple(loc - p for loc, p in zip(location, pad_size[1::2]))
|
| 290 |
+
# filter patch and yield
|
| 291 |
+
if self.filter_fn is None or self.filter_fn(patch, location):
|
| 292 |
+
yield patch, location
|
| 293 |
+
|
| 294 |
+
|
| 295 |
+
class WSISlidingWindowSplitter(SlidingWindowSplitter):
|
| 296 |
+
"""
|
| 297 |
+
Splits the whole slide image input into patches with sliding window strategy and a possible overlap.
|
| 298 |
+
This extracts patches from file without loading the entire slide into memory.
|
| 299 |
+
It also allows offsetting the starting position and filtering the patches.
|
| 300 |
+
|
| 301 |
+
Args:
|
| 302 |
+
patch_size : the size of the patches to be generated.
|
| 303 |
+
offset: the amount of offset for the patches with respect to the original input. Defaults to 0.
|
| 304 |
+
overlap: the amount of overlap between patches in each dimension. It can be either a float in
|
| 305 |
+
the range of [0.0, 1.0) that defines relative overlap to the patch size, or it can be a non-negative int
|
| 306 |
+
that defines number of pixels for overlap. Defaults to 0.0.
|
| 307 |
+
filter_fn: a callable to filter patches. It should accepts exactly two parameters (patch, location), and
|
| 308 |
+
return True for a patch to keep. Defaults to no filtering.
|
| 309 |
+
pad_mode: define the mode for padding. Either "constant" or None. Default to "constant".
|
| 310 |
+
Padding is only supported with "OpenSlide" or "cuCIM" backend, and the filling value is 256.
|
| 311 |
+
device: the device where the patches are generated. Defaults to the device of inputs.
|
| 312 |
+
reader: the module to be used for loading whole slide imaging. If `reader` is
|
| 313 |
+
|
| 314 |
+
- a string, it defines the backend of `monai.data.WSIReader`. Defaults to "OpenSlide".
|
| 315 |
+
- a class (inherited from `BaseWSIReader`), it is initialized and set as wsi_reader.
|
| 316 |
+
- an instance of a class inherited from `BaseWSIReader`, it is set as the wsi_reader.
|
| 317 |
+
|
| 318 |
+
To obtain an optimized performance please use either "cuCIM" or "OpenSlide" backend.
|
| 319 |
+
reader_kwargs: the arguments to pass to `WSIReader` or the provided whole slide reader class.
|
| 320 |
+
For instance, level=2, dtype=torch.float32, etc.
|
| 321 |
+
Note that if `level` is not provided, `level=0` is assumed.
|
| 322 |
+
|
| 323 |
+
Note:
|
| 324 |
+
When a scaler value is provided for `patch_size`, `offset`, or `overlap`,
|
| 325 |
+
it is broadcasted to all the spatial dimensions.
|
| 326 |
+
"""
|
| 327 |
+
|
| 328 |
+
def __init__(
|
| 329 |
+
self,
|
| 330 |
+
patch_size: Sequence[int] | int,
|
| 331 |
+
overlap: Sequence[float] | float | Sequence[int] | int = 0.0,
|
| 332 |
+
offset: Sequence[int] | int = 0,
|
| 333 |
+
filter_fn: Callable | None = None,
|
| 334 |
+
pad_mode: str | None = "constant",
|
| 335 |
+
device: torch.device | str | None = None,
|
| 336 |
+
reader: str | BaseWSIReader | type[BaseWSIReader] | None = "OpenSlide",
|
| 337 |
+
**reader_kwargs: dict,
|
| 338 |
+
) -> None:
|
| 339 |
+
if pad_mode and pad_mode != "constant":
|
| 340 |
+
raise ValueError(
|
| 341 |
+
f"The underlying wsi readers only support for constant padding. pad_mod={pad_mode} is given."
|
| 342 |
+
)
|
| 343 |
+
|
| 344 |
+
super().__init__(
|
| 345 |
+
patch_size=patch_size, overlap=overlap, offset=offset, filter_fn=filter_fn, device=device, pad_mode=pad_mode
|
| 346 |
+
)
|
| 347 |
+
# Set WSI reader
|
| 348 |
+
self._set_reader(reader, reader_kwargs)
|
| 349 |
+
if self.reader.backend.lower() not in ["openslide", "cucim"]:
|
| 350 |
+
warnings.warn(
|
| 351 |
+
f"WSIReader with {self.reader.backend.lower()} backend is not supported for efficiently loading patches. "
|
| 352 |
+
"This may cause an significant slow down and a large memory foot print. "
|
| 353 |
+
"Please use other backends such as 'OpenSlide' or 'cuCIM' instead."
|
| 354 |
+
)
|
| 355 |
+
|
| 356 |
+
def _set_reader(self, reader: str | BaseWSIReader | type[BaseWSIReader] | None, reader_kwargs: dict) -> None:
|
| 357 |
+
"""
|
| 358 |
+
Set the WSI reader object based on the input reader
|
| 359 |
+
|
| 360 |
+
Args:
|
| 361 |
+
reader: the module to be used for loading whole slide imaging. If `reader` is
|
| 362 |
+
|
| 363 |
+
- a string, it defines the backend of `monai.data.WSIReader`. Defaults to cuCIM.
|
| 364 |
+
- a class (inherited from `BaseWSIReader`), it is initialized and set as wsi_reader.
|
| 365 |
+
- an instance of a class inherited from `BaseWSIReader`, it is set as the wsi_reader.
|
| 366 |
+
"""
|
| 367 |
+
self.reader: WSIReader | BaseWSIReader
|
| 368 |
+
self.reader_kwargs = reader_kwargs
|
| 369 |
+
if isinstance(reader, str):
|
| 370 |
+
self.reader = WSIReader(backend=reader, **self.reader_kwargs)
|
| 371 |
+
elif isclass(reader) and issubclass(reader, BaseWSIReader):
|
| 372 |
+
self.reader = reader(**self.reader_kwargs)
|
| 373 |
+
elif isinstance(reader, BaseWSIReader):
|
| 374 |
+
self.reader = reader
|
| 375 |
+
else:
|
| 376 |
+
raise ValueError(f"Unsupported reader type: {reader}.")
|
| 377 |
+
|
| 378 |
+
def _get_patch(self, inputs: Any, location: tuple[int, ...], patch_size: tuple[int, ...]) -> Any:
|
| 379 |
+
patch, _ = self.reader.get_data(wsi=inputs, location=location, size=patch_size) # type: ignore
|
| 380 |
+
return patch[None]
|
| 381 |
+
|
| 382 |
+
def get_input_shape(self, inputs: Any) -> tuple:
|
| 383 |
+
"""
|
| 384 |
+
Return the input spatial shape.
|
| 385 |
+
|
| 386 |
+
Args:
|
| 387 |
+
inputs: either a tensor of shape BCHW[D], representing a batch of images,
|
| 388 |
+
or a filename (str) or list of filenames to the image(s).
|
| 389 |
+
|
| 390 |
+
Returns:
|
| 391 |
+
spatial_shape
|
| 392 |
+
|
| 393 |
+
"""
|
| 394 |
+
wsi = self.reader.read(inputs)
|
| 395 |
+
level = self.reader_kwargs.get("level", 0)
|
| 396 |
+
return self.reader.get_size(wsi, level)
|
| 397 |
+
|
| 398 |
+
def __call__(self, inputs: PathLike | Sequence[PathLike]) -> Iterable[tuple[torch.Tensor, Sequence[int]]]:
|
| 399 |
+
"""Split the input tensor into patches and return patches and locations.
|
| 400 |
+
|
| 401 |
+
Args:
|
| 402 |
+
inputs: the file path to a whole slide image.
|
| 403 |
+
|
| 404 |
+
Yields:
|
| 405 |
+
tuple[torch.Tensor, Sequence[int]]: yields tuple of patch and location
|
| 406 |
+
"""
|
| 407 |
+
# Handle if the input file paths are batched
|
| 408 |
+
if not isinstance(inputs, str) and isinstance(inputs, Sequence):
|
| 409 |
+
if len(inputs) > 1:
|
| 410 |
+
raise ValueError("Only batch size of one would work for wsi image. Please provide one path at a time.")
|
| 411 |
+
inputs = inputs[0]
|
| 412 |
+
|
| 413 |
+
# Check if the input is a sting or path like
|
| 414 |
+
if not isinstance(inputs, (str, os.PathLike)):
|
| 415 |
+
raise ValueError(f"The input should be the path to the whole slide image. {type(inputs)} is given.")
|
| 416 |
+
|
| 417 |
+
wsi = self.reader.read(inputs)
|
| 418 |
+
level = self.reader_kwargs.get("level", 0)
|
| 419 |
+
downsample_ratio = self.reader.get_downsample_ratio(wsi, level)
|
| 420 |
+
spatial_shape: tuple = self.reader.get_size(wsi, level)
|
| 421 |
+
spatial_ndim = len(spatial_shape)
|
| 422 |
+
if spatial_ndim != 2:
|
| 423 |
+
raise ValueError(f"WSIReader only support 2D images. {spatial_ndim} spatial dimension is provided.")
|
| 424 |
+
patch_size, overlap, offset = self._get_valid_shape_parameters(spatial_shape)
|
| 425 |
+
pad_size, is_start_padded = self._calculate_pad_size(spatial_shape, spatial_ndim, patch_size, offset, overlap)
|
| 426 |
+
|
| 427 |
+
# Padding (extend the spatial shape)
|
| 428 |
+
if any(pad_size):
|
| 429 |
+
spatial_shape = tuple(ss + ps + pe for ss, ps, pe in zip(spatial_shape, pad_size[1::2], pad_size[::2]))
|
| 430 |
+
# correct the offset with respect to the padded image
|
| 431 |
+
if is_start_padded:
|
| 432 |
+
offset = tuple(off + p for off, p in zip(offset, pad_size[1::2]))
|
| 433 |
+
|
| 434 |
+
# Splitting (extracting patches)
|
| 435 |
+
for location in iter_patch_position(spatial_shape, patch_size, offset, overlap, False):
|
| 436 |
+
location_ = tuple(round(loc * downsample_ratio) for loc in location)
|
| 437 |
+
patch = self._get_patch(wsi, location_, patch_size)
|
| 438 |
+
patch = convert_to_tensor(patch, device=self.device)
|
| 439 |
+
# correct the location with respect to original inputs (remove starting pads)
|
| 440 |
+
if is_start_padded:
|
| 441 |
+
location = tuple(loc - p for loc, p in zip(location, pad_size[1::2]))
|
| 442 |
+
# filter patch and yield
|
| 443 |
+
if self.filter_fn is None or self.filter_fn(patch, location):
|
| 444 |
+
yield patch, location
|
source_code/SegMamba/monai/inferers/utils.py
ADDED
|
@@ -0,0 +1,405 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) MONAI Consortium
|
| 2 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 3 |
+
# you may not use this file except in compliance with the License.
|
| 4 |
+
# You may obtain a copy of the License at
|
| 5 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 6 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 7 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 8 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 9 |
+
# See the License for the specific language governing permissions and
|
| 10 |
+
# limitations under the License.
|
| 11 |
+
|
| 12 |
+
from __future__ import annotations
|
| 13 |
+
|
| 14 |
+
import itertools
|
| 15 |
+
from collections.abc import Callable, Mapping, Sequence
|
| 16 |
+
from typing import Any, Iterable
|
| 17 |
+
|
| 18 |
+
import numpy as np
|
| 19 |
+
import torch
|
| 20 |
+
import torch.nn.functional as F
|
| 21 |
+
|
| 22 |
+
from monai.data.meta_tensor import MetaTensor
|
| 23 |
+
from monai.data.utils import compute_importance_map, dense_patch_slices, get_valid_patch_size
|
| 24 |
+
from monai.utils import (
|
| 25 |
+
BlendMode,
|
| 26 |
+
PytorchPadMode,
|
| 27 |
+
convert_data_type,
|
| 28 |
+
convert_to_dst_type,
|
| 29 |
+
ensure_tuple,
|
| 30 |
+
ensure_tuple_rep,
|
| 31 |
+
fall_back_tuple,
|
| 32 |
+
look_up_option,
|
| 33 |
+
optional_import,
|
| 34 |
+
pytorch_after,
|
| 35 |
+
)
|
| 36 |
+
|
| 37 |
+
tqdm, _ = optional_import("tqdm", name="tqdm")
|
| 38 |
+
_nearest_mode = "nearest-exact" if pytorch_after(1, 11) else "nearest"
|
| 39 |
+
|
| 40 |
+
__all__ = ["sliding_window_inference"]
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
def sliding_window_inference(
|
| 44 |
+
inputs: torch.Tensor | MetaTensor,
|
| 45 |
+
roi_size: Sequence[int] | int,
|
| 46 |
+
sw_batch_size: int,
|
| 47 |
+
predictor: Callable[..., torch.Tensor | Sequence[torch.Tensor] | dict[Any, torch.Tensor]],
|
| 48 |
+
overlap: Sequence[float] | float = 0.25,
|
| 49 |
+
mode: BlendMode | str = BlendMode.CONSTANT,
|
| 50 |
+
sigma_scale: Sequence[float] | float = 0.125,
|
| 51 |
+
padding_mode: PytorchPadMode | str = PytorchPadMode.CONSTANT,
|
| 52 |
+
cval: float = 0.0,
|
| 53 |
+
sw_device: torch.device | str | None = None,
|
| 54 |
+
device: torch.device | str | None = None,
|
| 55 |
+
progress: bool = False,
|
| 56 |
+
roi_weight_map: torch.Tensor | None = None,
|
| 57 |
+
process_fn: Callable | None = None,
|
| 58 |
+
buffer_steps: int | None = None,
|
| 59 |
+
buffer_dim: int = -1,
|
| 60 |
+
with_coord: bool = False,
|
| 61 |
+
*args: Any,
|
| 62 |
+
**kwargs: Any,
|
| 63 |
+
) -> torch.Tensor | tuple[torch.Tensor, ...] | dict[Any, torch.Tensor]:
|
| 64 |
+
"""
|
| 65 |
+
Sliding window inference on `inputs` with `predictor`.
|
| 66 |
+
|
| 67 |
+
The outputs of `predictor` could be a tensor, a tuple, or a dictionary of tensors.
|
| 68 |
+
Each output in the tuple or dict value is allowed to have different resolutions with respect to the input.
|
| 69 |
+
e.g., the input patch spatial size is [128,128,128], the output (a tuple of two patches) patch sizes
|
| 70 |
+
could be ([128,64,256], [64,32,128]).
|
| 71 |
+
In this case, the parameter `overlap` and `roi_size` need to be carefully chosen to ensure the output ROI is still
|
| 72 |
+
an integer. If the predictor's input and output spatial sizes are not equal, we recommend choosing the parameters
|
| 73 |
+
so that `overlap*roi_size*output_size/input_size` is an integer (for each spatial dimension).
|
| 74 |
+
|
| 75 |
+
When roi_size is larger than the inputs' spatial size, the input image are padded during inference.
|
| 76 |
+
To maintain the same spatial sizes, the output image will be cropped to the original input size.
|
| 77 |
+
|
| 78 |
+
Args:
|
| 79 |
+
inputs: input image to be processed (assuming NCHW[D])
|
| 80 |
+
roi_size: the spatial window size for inferences.
|
| 81 |
+
When its components have None or non-positives, the corresponding inputs dimension will be used.
|
| 82 |
+
if the components of the `roi_size` are non-positive values, the transform will use the
|
| 83 |
+
corresponding components of img size. For example, `roi_size=(32, -1)` will be adapted
|
| 84 |
+
to `(32, 64)` if the second spatial dimension size of img is `64`.
|
| 85 |
+
sw_batch_size: the batch size to run window slices.
|
| 86 |
+
predictor: given input tensor ``patch_data`` in shape NCHW[D],
|
| 87 |
+
The outputs of the function call ``predictor(patch_data)`` should be a tensor, a tuple, or a dictionary
|
| 88 |
+
with Tensor values. Each output in the tuple or dict value should have the same batch_size, i.e. NM'H'W'[D'];
|
| 89 |
+
where H'W'[D'] represents the output patch's spatial size, M is the number of output channels,
|
| 90 |
+
N is `sw_batch_size`, e.g., the input shape is (7, 1, 128,128,128),
|
| 91 |
+
the output could be a tuple of two tensors, with shapes: ((7, 5, 128, 64, 256), (7, 4, 64, 32, 128)).
|
| 92 |
+
In this case, the parameter `overlap` and `roi_size` need to be carefully chosen
|
| 93 |
+
to ensure the scaled output ROI sizes are still integers.
|
| 94 |
+
If the `predictor`'s input and output spatial sizes are different,
|
| 95 |
+
we recommend choosing the parameters so that ``overlap*roi_size*zoom_scale`` is an integer for each dimension.
|
| 96 |
+
overlap: Amount of overlap between scans along each spatial dimension, defaults to ``0.25``.
|
| 97 |
+
mode: {``"constant"``, ``"gaussian"``}
|
| 98 |
+
How to blend output of overlapping windows. Defaults to ``"constant"``.
|
| 99 |
+
|
| 100 |
+
- ``"constant``": gives equal weight to all predictions.
|
| 101 |
+
- ``"gaussian``": gives less weight to predictions on edges of windows.
|
| 102 |
+
|
| 103 |
+
sigma_scale: the standard deviation coefficient of the Gaussian window when `mode` is ``"gaussian"``.
|
| 104 |
+
Default: 0.125. Actual window sigma is ``sigma_scale`` * ``dim_size``.
|
| 105 |
+
When sigma_scale is a sequence of floats, the values denote sigma_scale at the corresponding
|
| 106 |
+
spatial dimensions.
|
| 107 |
+
padding_mode: {``"constant"``, ``"reflect"``, ``"replicate"``, ``"circular"``}
|
| 108 |
+
Padding mode for ``inputs``, when ``roi_size`` is larger than inputs. Defaults to ``"constant"``
|
| 109 |
+
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.pad.html
|
| 110 |
+
cval: fill value for 'constant' padding mode. Default: 0
|
| 111 |
+
sw_device: device for the window data.
|
| 112 |
+
By default the device (and accordingly the memory) of the `inputs` is used.
|
| 113 |
+
Normally `sw_device` should be consistent with the device where `predictor` is defined.
|
| 114 |
+
device: device for the stitched output prediction.
|
| 115 |
+
By default the device (and accordingly the memory) of the `inputs` is used. If for example
|
| 116 |
+
set to device=torch.device('cpu') the gpu memory consumption is less and independent of the
|
| 117 |
+
`inputs` and `roi_size`. Output is on the `device`.
|
| 118 |
+
progress: whether to print a `tqdm` progress bar.
|
| 119 |
+
roi_weight_map: pre-computed (non-negative) weight map for each ROI.
|
| 120 |
+
If not given, and ``mode`` is not `constant`, this map will be computed on the fly.
|
| 121 |
+
process_fn: process inference output and adjust the importance map per window
|
| 122 |
+
buffer_steps: the number of sliding window iterations along the ``buffer_dim``
|
| 123 |
+
to be buffered on ``sw_device`` before writing to ``device``.
|
| 124 |
+
(Typically, ``sw_device`` is ``cuda`` and ``device`` is ``cpu``.)
|
| 125 |
+
default is None, no buffering. For the buffer dim, when spatial size is divisible by buffer_steps*roi_size,
|
| 126 |
+
(i.e. no overlapping among the buffers) non_blocking copy may be automatically enabled for efficiency.
|
| 127 |
+
buffer_dim: the spatial dimension along which the buffers are created.
|
| 128 |
+
0 indicates the first spatial dimension. Default is -1, the last spatial dimension.
|
| 129 |
+
with_coord: whether to pass the window coordinates to ``predictor``. Default is False.
|
| 130 |
+
If True, the signature of ``predictor`` should be ``predictor(patch_data, patch_coord, ...)``.
|
| 131 |
+
args: optional args to be passed to ``predictor``.
|
| 132 |
+
kwargs: optional keyword args to be passed to ``predictor``.
|
| 133 |
+
|
| 134 |
+
Note:
|
| 135 |
+
- input must be channel-first and have a batch dim, supports N-D sliding window.
|
| 136 |
+
|
| 137 |
+
"""
|
| 138 |
+
buffered = buffer_steps is not None and buffer_steps > 0
|
| 139 |
+
num_spatial_dims = len(inputs.shape) - 2
|
| 140 |
+
if buffered:
|
| 141 |
+
if buffer_dim < -num_spatial_dims or buffer_dim > num_spatial_dims:
|
| 142 |
+
raise ValueError(f"buffer_dim must be in [{-num_spatial_dims}, {num_spatial_dims}], got {buffer_dim}.")
|
| 143 |
+
if buffer_dim < 0:
|
| 144 |
+
buffer_dim += num_spatial_dims
|
| 145 |
+
overlap = ensure_tuple_rep(overlap, num_spatial_dims)
|
| 146 |
+
for o in overlap:
|
| 147 |
+
if o < 0 or o >= 1:
|
| 148 |
+
raise ValueError(f"overlap must be >= 0 and < 1, got {overlap}.")
|
| 149 |
+
compute_dtype = inputs.dtype
|
| 150 |
+
|
| 151 |
+
# determine image spatial size and batch size
|
| 152 |
+
# Note: all input images must have the same image size and batch size
|
| 153 |
+
batch_size, _, *image_size_ = inputs.shape
|
| 154 |
+
device = device or inputs.device
|
| 155 |
+
sw_device = sw_device or inputs.device
|
| 156 |
+
|
| 157 |
+
temp_meta = None
|
| 158 |
+
if isinstance(inputs, MetaTensor):
|
| 159 |
+
temp_meta = MetaTensor([]).copy_meta_from(inputs, copy_attr=False)
|
| 160 |
+
inputs = convert_data_type(inputs, torch.Tensor, wrap_sequence=True)[0]
|
| 161 |
+
roi_size = fall_back_tuple(roi_size, image_size_)
|
| 162 |
+
|
| 163 |
+
# in case that image size is smaller than roi size
|
| 164 |
+
image_size = tuple(max(image_size_[i], roi_size[i]) for i in range(num_spatial_dims))
|
| 165 |
+
pad_size = []
|
| 166 |
+
for k in range(len(inputs.shape) - 1, 1, -1):
|
| 167 |
+
diff = max(roi_size[k - 2] - inputs.shape[k], 0)
|
| 168 |
+
half = diff // 2
|
| 169 |
+
pad_size.extend([half, diff - half])
|
| 170 |
+
if any(pad_size):
|
| 171 |
+
inputs = F.pad(inputs, pad=pad_size, mode=look_up_option(padding_mode, PytorchPadMode), value=cval)
|
| 172 |
+
|
| 173 |
+
# Store all slices
|
| 174 |
+
scan_interval = _get_scan_interval(image_size, roi_size, num_spatial_dims, overlap)
|
| 175 |
+
slices = dense_patch_slices(image_size, roi_size, scan_interval, return_slice=not buffered)
|
| 176 |
+
|
| 177 |
+
num_win = len(slices) # number of windows per image
|
| 178 |
+
total_slices = num_win * batch_size # total number of windows
|
| 179 |
+
windows_range: Iterable
|
| 180 |
+
if not buffered:
|
| 181 |
+
non_blocking = False
|
| 182 |
+
windows_range = range(0, total_slices, sw_batch_size)
|
| 183 |
+
else:
|
| 184 |
+
slices, n_per_batch, b_slices, windows_range = _create_buffered_slices(
|
| 185 |
+
slices, batch_size, sw_batch_size, buffer_dim, buffer_steps
|
| 186 |
+
)
|
| 187 |
+
non_blocking, _ss = torch.cuda.is_available(), -1
|
| 188 |
+
for x in b_slices[:n_per_batch]:
|
| 189 |
+
if x[1] < _ss: # detect overlapping slices
|
| 190 |
+
non_blocking = False
|
| 191 |
+
break
|
| 192 |
+
_ss = x[2]
|
| 193 |
+
|
| 194 |
+
# Create window-level importance map
|
| 195 |
+
valid_patch_size = get_valid_patch_size(image_size, roi_size)
|
| 196 |
+
if valid_patch_size == roi_size and (roi_weight_map is not None):
|
| 197 |
+
importance_map_ = roi_weight_map
|
| 198 |
+
else:
|
| 199 |
+
try:
|
| 200 |
+
valid_p_size = ensure_tuple(valid_patch_size)
|
| 201 |
+
importance_map_ = compute_importance_map(
|
| 202 |
+
valid_p_size, mode=mode, sigma_scale=sigma_scale, device=sw_device, dtype=compute_dtype
|
| 203 |
+
)
|
| 204 |
+
if len(importance_map_.shape) == num_spatial_dims and not process_fn:
|
| 205 |
+
importance_map_ = importance_map_[None, None] # adds batch, channel dimensions
|
| 206 |
+
except Exception as e:
|
| 207 |
+
raise RuntimeError(
|
| 208 |
+
f"patch size {valid_p_size}, mode={mode}, sigma_scale={sigma_scale}, device={device}\n"
|
| 209 |
+
"Seems to be OOM. Please try smaller patch size or mode='constant' instead of mode='gaussian'."
|
| 210 |
+
) from e
|
| 211 |
+
importance_map_ = convert_data_type(importance_map_, torch.Tensor, device=sw_device, dtype=compute_dtype)[0]
|
| 212 |
+
|
| 213 |
+
# stores output and count map
|
| 214 |
+
output_image_list, count_map_list, sw_device_buffer, b_s, b_i = [], [], [], 0, 0 # type: ignore
|
| 215 |
+
# for each patch
|
| 216 |
+
for slice_g in tqdm(windows_range) if progress else windows_range:
|
| 217 |
+
slice_range = range(slice_g, min(slice_g + sw_batch_size, b_slices[b_s][0] if buffered else total_slices))
|
| 218 |
+
unravel_slice = [
|
| 219 |
+
[slice(idx // num_win, idx // num_win + 1), slice(None)] + list(slices[idx % num_win])
|
| 220 |
+
for idx in slice_range
|
| 221 |
+
]
|
| 222 |
+
if sw_batch_size > 1:
|
| 223 |
+
win_data = torch.cat([inputs[win_slice] for win_slice in unravel_slice]).to(sw_device)
|
| 224 |
+
else:
|
| 225 |
+
win_data = inputs[unravel_slice[0]].to(sw_device)
|
| 226 |
+
if with_coord:
|
| 227 |
+
seg_prob_out = predictor(win_data, unravel_slice, *args, **kwargs) # batched patch
|
| 228 |
+
else:
|
| 229 |
+
seg_prob_out = predictor(win_data, *args, **kwargs) # batched patch
|
| 230 |
+
|
| 231 |
+
# convert seg_prob_out to tuple seg_tuple, this does not allocate new memory.
|
| 232 |
+
dict_keys, seg_tuple = _flatten_struct(seg_prob_out)
|
| 233 |
+
if process_fn:
|
| 234 |
+
seg_tuple, w_t = process_fn(seg_tuple, win_data, importance_map_)
|
| 235 |
+
else:
|
| 236 |
+
w_t = importance_map_
|
| 237 |
+
if len(w_t.shape) == num_spatial_dims:
|
| 238 |
+
w_t = w_t[None, None]
|
| 239 |
+
w_t = w_t.to(dtype=compute_dtype, device=sw_device)
|
| 240 |
+
if buffered:
|
| 241 |
+
c_start, c_end = b_slices[b_s][1:]
|
| 242 |
+
if not sw_device_buffer:
|
| 243 |
+
k = seg_tuple[0].shape[1] # len(seg_tuple) > 1 is currently ignored
|
| 244 |
+
sp_size = list(image_size)
|
| 245 |
+
sp_size[buffer_dim] = c_end - c_start
|
| 246 |
+
sw_device_buffer = [torch.zeros(size=[1, k, *sp_size], dtype=compute_dtype, device=sw_device)]
|
| 247 |
+
for p, s in zip(seg_tuple[0], unravel_slice):
|
| 248 |
+
offset = s[buffer_dim + 2].start - c_start
|
| 249 |
+
s[buffer_dim + 2] = slice(offset, offset + roi_size[buffer_dim])
|
| 250 |
+
s[0] = slice(0, 1)
|
| 251 |
+
sw_device_buffer[0][s] += p * w_t
|
| 252 |
+
b_i += len(unravel_slice)
|
| 253 |
+
if b_i < b_slices[b_s][0]:
|
| 254 |
+
continue
|
| 255 |
+
else:
|
| 256 |
+
sw_device_buffer = list(seg_tuple)
|
| 257 |
+
|
| 258 |
+
for ss in range(len(sw_device_buffer)):
|
| 259 |
+
b_shape = sw_device_buffer[ss].shape
|
| 260 |
+
seg_chns, seg_shape = b_shape[1], b_shape[2:]
|
| 261 |
+
z_scale = None
|
| 262 |
+
if not buffered and seg_shape != roi_size:
|
| 263 |
+
z_scale = [out_w_i / float(in_w_i) for out_w_i, in_w_i in zip(seg_shape, roi_size)]
|
| 264 |
+
w_t = F.interpolate(w_t, seg_shape, mode=_nearest_mode)
|
| 265 |
+
if len(output_image_list) <= ss:
|
| 266 |
+
output_shape = [batch_size, seg_chns]
|
| 267 |
+
output_shape += [int(_i * _z) for _i, _z in zip(image_size, z_scale)] if z_scale else list(image_size)
|
| 268 |
+
# allocate memory to store the full output and the count for overlapping parts
|
| 269 |
+
new_tensor: Callable = torch.empty if non_blocking else torch.zeros # type: ignore
|
| 270 |
+
output_image_list.append(new_tensor(output_shape, dtype=compute_dtype, device=device))
|
| 271 |
+
count_map_list.append(torch.zeros([1, 1] + output_shape[2:], dtype=compute_dtype, device=device))
|
| 272 |
+
w_t_ = w_t.to(device)
|
| 273 |
+
for __s in slices:
|
| 274 |
+
if z_scale is not None:
|
| 275 |
+
__s = tuple(slice(int(_si.start * z_s), int(_si.stop * z_s)) for _si, z_s in zip(__s, z_scale))
|
| 276 |
+
count_map_list[-1][(slice(None), slice(None), *__s)] += w_t_
|
| 277 |
+
if buffered:
|
| 278 |
+
o_slice = [slice(None)] * len(inputs.shape)
|
| 279 |
+
o_slice[buffer_dim + 2] = slice(c_start, c_end)
|
| 280 |
+
img_b = b_s // n_per_batch # image batch index
|
| 281 |
+
o_slice[0] = slice(img_b, img_b + 1)
|
| 282 |
+
if non_blocking:
|
| 283 |
+
output_image_list[0][o_slice].copy_(sw_device_buffer[0], non_blocking=non_blocking)
|
| 284 |
+
else:
|
| 285 |
+
output_image_list[0][o_slice] += sw_device_buffer[0].to(device=device)
|
| 286 |
+
else:
|
| 287 |
+
sw_device_buffer[ss] *= w_t
|
| 288 |
+
sw_device_buffer[ss] = sw_device_buffer[ss].to(device)
|
| 289 |
+
_compute_coords(unravel_slice, z_scale, output_image_list[ss], sw_device_buffer[ss])
|
| 290 |
+
sw_device_buffer = []
|
| 291 |
+
if buffered:
|
| 292 |
+
b_s += 1
|
| 293 |
+
|
| 294 |
+
if non_blocking:
|
| 295 |
+
torch.cuda.current_stream().synchronize()
|
| 296 |
+
|
| 297 |
+
# account for any overlapping sections
|
| 298 |
+
for ss in range(len(output_image_list)):
|
| 299 |
+
output_image_list[ss] /= count_map_list.pop(0)
|
| 300 |
+
|
| 301 |
+
# remove padding if image_size smaller than roi_size
|
| 302 |
+
if any(pad_size):
|
| 303 |
+
for ss, output_i in enumerate(output_image_list):
|
| 304 |
+
zoom_scale = [_shape_d / _roi_size_d for _shape_d, _roi_size_d in zip(output_i.shape[2:], roi_size)]
|
| 305 |
+
final_slicing: list[slice] = []
|
| 306 |
+
for sp in range(num_spatial_dims):
|
| 307 |
+
si = num_spatial_dims - sp - 1
|
| 308 |
+
slice_dim = slice(
|
| 309 |
+
int(round(pad_size[sp * 2] * zoom_scale[si])),
|
| 310 |
+
int(round((pad_size[sp * 2] + image_size_[si]) * zoom_scale[si])),
|
| 311 |
+
)
|
| 312 |
+
final_slicing.insert(0, slice_dim)
|
| 313 |
+
output_image_list[ss] = output_i[(slice(None), slice(None), *final_slicing)]
|
| 314 |
+
|
| 315 |
+
final_output = _pack_struct(output_image_list, dict_keys)
|
| 316 |
+
if temp_meta is not None:
|
| 317 |
+
final_output = convert_to_dst_type(final_output, temp_meta, device=device)[0]
|
| 318 |
+
else:
|
| 319 |
+
final_output = convert_to_dst_type(final_output, inputs, device=device)[0]
|
| 320 |
+
|
| 321 |
+
return final_output # type: ignore
|
| 322 |
+
|
| 323 |
+
|
| 324 |
+
def _create_buffered_slices(slices, batch_size, sw_batch_size, buffer_dim, buffer_steps):
|
| 325 |
+
"""rearrange slices for buffering"""
|
| 326 |
+
slices_np = np.asarray(slices)
|
| 327 |
+
slices_np = slices_np[np.argsort(slices_np[:, buffer_dim, 0], kind="mergesort")]
|
| 328 |
+
slices = [tuple(slice(c[0], c[1]) for c in i) for i in slices_np]
|
| 329 |
+
slices_np = slices_np[:, buffer_dim]
|
| 330 |
+
|
| 331 |
+
_, _, _b_lens = np.unique(slices_np[:, 0], return_counts=True, return_index=True)
|
| 332 |
+
b_ends = np.cumsum(_b_lens).tolist() # possible buffer flush boundaries
|
| 333 |
+
x = [0, *b_ends][:: min(len(b_ends), int(buffer_steps))]
|
| 334 |
+
if x[-1] < b_ends[-1]:
|
| 335 |
+
x.append(b_ends[-1])
|
| 336 |
+
n_per_batch = len(x) - 1
|
| 337 |
+
windows_range = [
|
| 338 |
+
range(b * x[-1] + x[i], b * x[-1] + x[i + 1], sw_batch_size)
|
| 339 |
+
for b in range(batch_size)
|
| 340 |
+
for i in range(n_per_batch)
|
| 341 |
+
]
|
| 342 |
+
b_slices = []
|
| 343 |
+
for _s, _r in enumerate(windows_range):
|
| 344 |
+
s_s = slices_np[windows_range[_s - 1].stop % len(slices) if _s > 0 else 0, 0]
|
| 345 |
+
s_e = slices_np[(_r.stop - 1) % len(slices), 1]
|
| 346 |
+
b_slices.append((_r.stop, s_s, s_e)) # buffer index, slice start, slice end
|
| 347 |
+
windows_range = itertools.chain(*windows_range) # type: ignore
|
| 348 |
+
return slices, n_per_batch, b_slices, windows_range
|
| 349 |
+
|
| 350 |
+
|
| 351 |
+
def _compute_coords(coords, z_scale, out, patch):
|
| 352 |
+
"""sliding window batch spatial scaling indexing for multi-resolution outputs."""
|
| 353 |
+
for original_idx, p in zip(coords, patch):
|
| 354 |
+
idx_zm = list(original_idx) # 4D for 2D image, 5D for 3D image
|
| 355 |
+
if z_scale:
|
| 356 |
+
for axis in range(2, len(idx_zm)):
|
| 357 |
+
idx_zm[axis] = slice(
|
| 358 |
+
int(original_idx[axis].start * z_scale[axis - 2]), int(original_idx[axis].stop * z_scale[axis - 2])
|
| 359 |
+
)
|
| 360 |
+
out[idx_zm] += p
|
| 361 |
+
|
| 362 |
+
|
| 363 |
+
def _get_scan_interval(
|
| 364 |
+
image_size: Sequence[int], roi_size: Sequence[int], num_spatial_dims: int, overlap: Sequence[float]
|
| 365 |
+
) -> tuple[int, ...]:
|
| 366 |
+
"""
|
| 367 |
+
Compute scan interval according to the image size, roi size and overlap.
|
| 368 |
+
Scan interval will be `int((1 - overlap) * roi_size)`, if interval is 0,
|
| 369 |
+
use 1 instead to make sure sliding window works.
|
| 370 |
+
|
| 371 |
+
"""
|
| 372 |
+
if len(image_size) != num_spatial_dims:
|
| 373 |
+
raise ValueError(f"len(image_size) {len(image_size)} different from spatial dims {num_spatial_dims}.")
|
| 374 |
+
if len(roi_size) != num_spatial_dims:
|
| 375 |
+
raise ValueError(f"len(roi_size) {len(roi_size)} different from spatial dims {num_spatial_dims}.")
|
| 376 |
+
|
| 377 |
+
scan_interval = []
|
| 378 |
+
for i, o in zip(range(num_spatial_dims), overlap):
|
| 379 |
+
if roi_size[i] == image_size[i]:
|
| 380 |
+
scan_interval.append(int(roi_size[i]))
|
| 381 |
+
else:
|
| 382 |
+
interval = int(roi_size[i] * (1 - o))
|
| 383 |
+
scan_interval.append(interval if interval > 0 else 1)
|
| 384 |
+
return tuple(scan_interval)
|
| 385 |
+
|
| 386 |
+
|
| 387 |
+
def _flatten_struct(seg_out):
|
| 388 |
+
dict_keys = None
|
| 389 |
+
seg_probs: tuple[torch.Tensor, ...]
|
| 390 |
+
if isinstance(seg_out, torch.Tensor):
|
| 391 |
+
seg_probs = (seg_out,)
|
| 392 |
+
elif isinstance(seg_out, Mapping):
|
| 393 |
+
dict_keys = sorted(seg_out.keys()) # track predictor's output keys
|
| 394 |
+
seg_probs = tuple(seg_out[k] for k in dict_keys)
|
| 395 |
+
else:
|
| 396 |
+
seg_probs = ensure_tuple(seg_out)
|
| 397 |
+
return dict_keys, seg_probs
|
| 398 |
+
|
| 399 |
+
|
| 400 |
+
def _pack_struct(seg_out, dict_keys=None):
|
| 401 |
+
if dict_keys is not None:
|
| 402 |
+
return dict(zip(dict_keys, seg_out))
|
| 403 |
+
if isinstance(seg_out, (list, tuple)) and len(seg_out) == 1:
|
| 404 |
+
return seg_out[0]
|
| 405 |
+
return ensure_tuple(seg_out)
|
source_code/SegMamba/monai/losses/cldice.py
ADDED
|
@@ -0,0 +1,184 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) MONAI Consortium
|
| 2 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 3 |
+
# you may not use this file except in compliance with the License.
|
| 4 |
+
# You may obtain a copy of the License at
|
| 5 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 6 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 7 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 8 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 9 |
+
# See the License for the specific language governing permissions and
|
| 10 |
+
# limitations under the License.
|
| 11 |
+
|
| 12 |
+
from __future__ import annotations
|
| 13 |
+
|
| 14 |
+
import torch
|
| 15 |
+
import torch.nn.functional as F
|
| 16 |
+
from torch.nn.modules.loss import _Loss
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
def soft_erode(img: torch.Tensor) -> torch.Tensor: # type: ignore
|
| 20 |
+
"""
|
| 21 |
+
Perform soft erosion on the input image
|
| 22 |
+
|
| 23 |
+
Args:
|
| 24 |
+
img: the shape should be BCH(WD)
|
| 25 |
+
|
| 26 |
+
Adapted from:
|
| 27 |
+
https://github.com/jocpae/clDice/blob/master/cldice_loss/pytorch/soft_skeleton.py#L6
|
| 28 |
+
"""
|
| 29 |
+
if len(img.shape) == 4:
|
| 30 |
+
p1 = -(F.max_pool2d(-img, (3, 1), (1, 1), (1, 0)))
|
| 31 |
+
p2 = -(F.max_pool2d(-img, (1, 3), (1, 1), (0, 1)))
|
| 32 |
+
return torch.min(p1, p2)
|
| 33 |
+
elif len(img.shape) == 5:
|
| 34 |
+
p1 = -(F.max_pool3d(-img, (3, 1, 1), (1, 1, 1), (1, 0, 0)))
|
| 35 |
+
p2 = -(F.max_pool3d(-img, (1, 3, 1), (1, 1, 1), (0, 1, 0)))
|
| 36 |
+
p3 = -(F.max_pool3d(-img, (1, 1, 3), (1, 1, 1), (0, 0, 1)))
|
| 37 |
+
return torch.min(torch.min(p1, p2), p3)
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
def soft_dilate(img: torch.Tensor) -> torch.Tensor: # type: ignore
|
| 41 |
+
"""
|
| 42 |
+
Perform soft dilation on the input image
|
| 43 |
+
|
| 44 |
+
Args:
|
| 45 |
+
img: the shape should be BCH(WD)
|
| 46 |
+
|
| 47 |
+
Adapted from:
|
| 48 |
+
https://github.com/jocpae/clDice/blob/master/cldice_loss/pytorch/soft_skeleton.py#L18
|
| 49 |
+
"""
|
| 50 |
+
if len(img.shape) == 4:
|
| 51 |
+
return F.max_pool2d(img, (3, 3), (1, 1), (1, 1))
|
| 52 |
+
elif len(img.shape) == 5:
|
| 53 |
+
return F.max_pool3d(img, (3, 3, 3), (1, 1, 1), (1, 1, 1))
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
def soft_open(img: torch.Tensor) -> torch.Tensor:
|
| 57 |
+
"""
|
| 58 |
+
Wrapper function to perform soft opening on the input image
|
| 59 |
+
|
| 60 |
+
Args:
|
| 61 |
+
img: the shape should be BCH(WD)
|
| 62 |
+
|
| 63 |
+
Adapted from:
|
| 64 |
+
https://github.com/jocpae/clDice/blob/master/cldice_loss/pytorch/soft_skeleton.py#L25
|
| 65 |
+
"""
|
| 66 |
+
eroded_image = soft_erode(img)
|
| 67 |
+
dilated_image = soft_dilate(eroded_image)
|
| 68 |
+
return dilated_image
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
def soft_skel(img: torch.Tensor, iter_: int) -> torch.Tensor:
|
| 72 |
+
"""
|
| 73 |
+
Perform soft skeletonization on the input image
|
| 74 |
+
|
| 75 |
+
Adapted from:
|
| 76 |
+
https://github.com/jocpae/clDice/blob/master/cldice_loss/pytorch/soft_skeleton.py#L29
|
| 77 |
+
|
| 78 |
+
Args:
|
| 79 |
+
img: the shape should be BCH(WD)
|
| 80 |
+
iter_: number of iterations for skeletonization
|
| 81 |
+
|
| 82 |
+
Returns:
|
| 83 |
+
skeletonized image
|
| 84 |
+
"""
|
| 85 |
+
img1 = soft_open(img)
|
| 86 |
+
skel = F.relu(img - img1)
|
| 87 |
+
for _ in range(iter_):
|
| 88 |
+
img = soft_erode(img)
|
| 89 |
+
img1 = soft_open(img)
|
| 90 |
+
delta = F.relu(img - img1)
|
| 91 |
+
skel = skel + F.relu(delta - skel * delta)
|
| 92 |
+
return skel
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
def soft_dice(y_true: torch.Tensor, y_pred: torch.Tensor, smooth: float = 1.0) -> torch.Tensor:
|
| 96 |
+
"""
|
| 97 |
+
Function to compute soft dice loss
|
| 98 |
+
|
| 99 |
+
Adapted from:
|
| 100 |
+
https://github.com/jocpae/clDice/blob/master/cldice_loss/pytorch/cldice.py#L22
|
| 101 |
+
|
| 102 |
+
Args:
|
| 103 |
+
y_true: the shape should be BCH(WD)
|
| 104 |
+
y_pred: the shape should be BCH(WD)
|
| 105 |
+
|
| 106 |
+
Returns:
|
| 107 |
+
dice loss
|
| 108 |
+
"""
|
| 109 |
+
intersection = torch.sum((y_true * y_pred)[:, 1:, ...])
|
| 110 |
+
coeff = (2.0 * intersection + smooth) / (torch.sum(y_true[:, 1:, ...]) + torch.sum(y_pred[:, 1:, ...]) + smooth)
|
| 111 |
+
soft_dice: torch.Tensor = 1.0 - coeff
|
| 112 |
+
return soft_dice
|
| 113 |
+
|
| 114 |
+
|
| 115 |
+
class SoftclDiceLoss(_Loss):
|
| 116 |
+
"""
|
| 117 |
+
Compute the Soft clDice loss defined in:
|
| 118 |
+
|
| 119 |
+
Shit et al. (2021) clDice -- A Novel Topology-Preserving Loss Function
|
| 120 |
+
for Tubular Structure Segmentation. (https://arxiv.org/abs/2003.07311)
|
| 121 |
+
|
| 122 |
+
Adapted from:
|
| 123 |
+
https://github.com/jocpae/clDice/blob/master/cldice_loss/pytorch/cldice.py#L7
|
| 124 |
+
"""
|
| 125 |
+
|
| 126 |
+
def __init__(self, iter_: int = 3, smooth: float = 1.0) -> None:
|
| 127 |
+
"""
|
| 128 |
+
Args:
|
| 129 |
+
iter_: Number of iterations for skeletonization
|
| 130 |
+
smooth: Smoothing parameter
|
| 131 |
+
"""
|
| 132 |
+
super().__init__()
|
| 133 |
+
self.iter = iter_
|
| 134 |
+
self.smooth = smooth
|
| 135 |
+
|
| 136 |
+
def forward(self, y_true: torch.Tensor, y_pred: torch.Tensor) -> torch.Tensor:
|
| 137 |
+
skel_pred = soft_skel(y_pred, self.iter)
|
| 138 |
+
skel_true = soft_skel(y_true, self.iter)
|
| 139 |
+
tprec = (torch.sum(torch.multiply(skel_pred, y_true)[:, 1:, ...]) + self.smooth) / (
|
| 140 |
+
torch.sum(skel_pred[:, 1:, ...]) + self.smooth
|
| 141 |
+
)
|
| 142 |
+
tsens = (torch.sum(torch.multiply(skel_true, y_pred)[:, 1:, ...]) + self.smooth) / (
|
| 143 |
+
torch.sum(skel_true[:, 1:, ...]) + self.smooth
|
| 144 |
+
)
|
| 145 |
+
cl_dice: torch.Tensor = 1.0 - 2.0 * (tprec * tsens) / (tprec + tsens)
|
| 146 |
+
return cl_dice
|
| 147 |
+
|
| 148 |
+
|
| 149 |
+
class SoftDiceclDiceLoss(_Loss):
|
| 150 |
+
"""
|
| 151 |
+
Compute the Soft clDice loss defined in:
|
| 152 |
+
|
| 153 |
+
Shit et al. (2021) clDice -- A Novel Topology-Preserving Loss Function
|
| 154 |
+
for Tubular Structure Segmentation. (https://arxiv.org/abs/2003.07311)
|
| 155 |
+
|
| 156 |
+
Adapted from:
|
| 157 |
+
https://github.com/jocpae/clDice/blob/master/cldice_loss/pytorch/cldice.py#L38
|
| 158 |
+
"""
|
| 159 |
+
|
| 160 |
+
def __init__(self, iter_: int = 3, alpha: float = 0.5, smooth: float = 1.0) -> None:
|
| 161 |
+
"""
|
| 162 |
+
Args:
|
| 163 |
+
iter_: Number of iterations for skeletonization
|
| 164 |
+
smooth: Smoothing parameter
|
| 165 |
+
alpha: Weighing factor for cldice
|
| 166 |
+
"""
|
| 167 |
+
super().__init__()
|
| 168 |
+
self.iter = iter_
|
| 169 |
+
self.smooth = smooth
|
| 170 |
+
self.alpha = alpha
|
| 171 |
+
|
| 172 |
+
def forward(self, y_true: torch.Tensor, y_pred: torch.Tensor) -> torch.Tensor:
|
| 173 |
+
dice = soft_dice(y_true, y_pred, self.smooth)
|
| 174 |
+
skel_pred = soft_skel(y_pred, self.iter)
|
| 175 |
+
skel_true = soft_skel(y_true, self.iter)
|
| 176 |
+
tprec = (torch.sum(torch.multiply(skel_pred, y_true)[:, 1:, ...]) + self.smooth) / (
|
| 177 |
+
torch.sum(skel_pred[:, 1:, ...]) + self.smooth
|
| 178 |
+
)
|
| 179 |
+
tsens = (torch.sum(torch.multiply(skel_true, y_pred)[:, 1:, ...]) + self.smooth) / (
|
| 180 |
+
torch.sum(skel_true[:, 1:, ...]) + self.smooth
|
| 181 |
+
)
|
| 182 |
+
cl_dice = 1.0 - 2.0 * (tprec * tsens) / (tprec + tsens)
|
| 183 |
+
total_loss: torch.Tensor = (1.0 - self.alpha) * dice + self.alpha * cl_dice
|
| 184 |
+
return total_loss
|
source_code/SegMamba/monai/losses/dice.py
ADDED
|
@@ -0,0 +1,1068 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) MONAI Consortium
|
| 2 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 3 |
+
# you may not use this file except in compliance with the License.
|
| 4 |
+
# You may obtain a copy of the License at
|
| 5 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 6 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 7 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 8 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 9 |
+
# See the License for the specific language governing permissions and
|
| 10 |
+
# limitations under the License.
|
| 11 |
+
|
| 12 |
+
from __future__ import annotations
|
| 13 |
+
|
| 14 |
+
import warnings
|
| 15 |
+
from collections.abc import Callable, Sequence
|
| 16 |
+
from typing import Any
|
| 17 |
+
|
| 18 |
+
import numpy as np
|
| 19 |
+
import torch
|
| 20 |
+
import torch.nn as nn
|
| 21 |
+
import torch.nn.functional as F
|
| 22 |
+
from torch.nn.modules.loss import _Loss
|
| 23 |
+
|
| 24 |
+
from monai.losses.focal_loss import FocalLoss
|
| 25 |
+
from monai.losses.spatial_mask import MaskedLoss
|
| 26 |
+
from monai.networks import one_hot
|
| 27 |
+
from monai.utils import DiceCEReduction, LossReduction, Weight, deprecated_arg, look_up_option, pytorch_after
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
class DiceLoss(_Loss):
|
| 31 |
+
"""
|
| 32 |
+
Compute average Dice loss between two tensors. It can support both multi-classes and multi-labels tasks.
|
| 33 |
+
The data `input` (BNHW[D] where N is number of classes) is compared with ground truth `target` (BNHW[D]).
|
| 34 |
+
|
| 35 |
+
Note that axis N of `input` is expected to be logits or probabilities for each class, if passing logits as input,
|
| 36 |
+
must set `sigmoid=True` or `softmax=True`, or specifying `other_act`. And the same axis of `target`
|
| 37 |
+
can be 1 or N (one-hot format).
|
| 38 |
+
|
| 39 |
+
The `smooth_nr` and `smooth_dr` parameters are values added to the intersection and union components of
|
| 40 |
+
the inter-over-union calculation to smooth results respectively, these values should be small.
|
| 41 |
+
|
| 42 |
+
The original paper: Milletari, F. et. al. (2016) V-Net: Fully Convolutional Neural Networks forVolumetric
|
| 43 |
+
Medical Image Segmentation, 3DV, 2016.
|
| 44 |
+
|
| 45 |
+
"""
|
| 46 |
+
|
| 47 |
+
def __init__(
|
| 48 |
+
self,
|
| 49 |
+
include_background: bool = True,
|
| 50 |
+
to_onehot_y: bool = False,
|
| 51 |
+
sigmoid: bool = False,
|
| 52 |
+
softmax: bool = False,
|
| 53 |
+
other_act: Callable | None = None,
|
| 54 |
+
squared_pred: bool = False,
|
| 55 |
+
jaccard: bool = False,
|
| 56 |
+
reduction: LossReduction | str = LossReduction.MEAN,
|
| 57 |
+
smooth_nr: float = 1e-5,
|
| 58 |
+
smooth_dr: float = 1e-5,
|
| 59 |
+
batch: bool = False,
|
| 60 |
+
weight: Sequence[float] | float | int | torch.Tensor | None = None,
|
| 61 |
+
) -> None:
|
| 62 |
+
"""
|
| 63 |
+
Args:
|
| 64 |
+
include_background: if False, channel index 0 (background category) is excluded from the calculation.
|
| 65 |
+
if the non-background segmentations are small compared to the total image size they can get overwhelmed
|
| 66 |
+
by the signal from the background so excluding it in such cases helps convergence.
|
| 67 |
+
to_onehot_y: whether to convert the ``target`` into the one-hot format,
|
| 68 |
+
using the number of classes inferred from `input` (``input.shape[1]``). Defaults to False.
|
| 69 |
+
sigmoid: if True, apply a sigmoid function to the prediction.
|
| 70 |
+
softmax: if True, apply a softmax function to the prediction.
|
| 71 |
+
other_act: callable function to execute other activation layers, Defaults to ``None``. for example:
|
| 72 |
+
``other_act = torch.tanh``.
|
| 73 |
+
squared_pred: use squared versions of targets and predictions in the denominator or not.
|
| 74 |
+
jaccard: compute Jaccard Index (soft IoU) instead of dice or not.
|
| 75 |
+
reduction: {``"none"``, ``"mean"``, ``"sum"``}
|
| 76 |
+
Specifies the reduction to apply to the output. Defaults to ``"mean"``.
|
| 77 |
+
|
| 78 |
+
- ``"none"``: no reduction will be applied.
|
| 79 |
+
- ``"mean"``: the sum of the output will be divided by the number of elements in the output.
|
| 80 |
+
- ``"sum"``: the output will be summed.
|
| 81 |
+
|
| 82 |
+
smooth_nr: a small constant added to the numerator to avoid zero.
|
| 83 |
+
smooth_dr: a small constant added to the denominator to avoid nan.
|
| 84 |
+
batch: whether to sum the intersection and union areas over the batch dimension before the dividing.
|
| 85 |
+
Defaults to False, a Dice loss value is computed independently from each item in the batch
|
| 86 |
+
before any `reduction`.
|
| 87 |
+
weight: weights to apply to the voxels of each class. If None no weights are applied.
|
| 88 |
+
The input can be a single value (same weight for all classes), a sequence of values (the length
|
| 89 |
+
of the sequence should be the same as the number of classes. If not ``include_background``,
|
| 90 |
+
the number of classes should not include the background category class 0).
|
| 91 |
+
The value/values should be no less than 0. Defaults to None.
|
| 92 |
+
|
| 93 |
+
Raises:
|
| 94 |
+
TypeError: When ``other_act`` is not an ``Optional[Callable]``.
|
| 95 |
+
ValueError: When more than 1 of [``sigmoid=True``, ``softmax=True``, ``other_act is not None``].
|
| 96 |
+
Incompatible values.
|
| 97 |
+
|
| 98 |
+
"""
|
| 99 |
+
super().__init__(reduction=LossReduction(reduction).value)
|
| 100 |
+
if other_act is not None and not callable(other_act):
|
| 101 |
+
raise TypeError(f"other_act must be None or callable but is {type(other_act).__name__}.")
|
| 102 |
+
if int(sigmoid) + int(softmax) + int(other_act is not None) > 1:
|
| 103 |
+
raise ValueError("Incompatible values: more than 1 of [sigmoid=True, softmax=True, other_act is not None].")
|
| 104 |
+
self.include_background = include_background
|
| 105 |
+
self.to_onehot_y = to_onehot_y
|
| 106 |
+
self.sigmoid = sigmoid
|
| 107 |
+
self.softmax = softmax
|
| 108 |
+
self.other_act = other_act
|
| 109 |
+
self.squared_pred = squared_pred
|
| 110 |
+
self.jaccard = jaccard
|
| 111 |
+
self.smooth_nr = float(smooth_nr)
|
| 112 |
+
self.smooth_dr = float(smooth_dr)
|
| 113 |
+
self.batch = batch
|
| 114 |
+
weight = torch.as_tensor(weight) if weight is not None else None
|
| 115 |
+
self.register_buffer("class_weight", weight)
|
| 116 |
+
self.class_weight: None | torch.Tensor
|
| 117 |
+
|
| 118 |
+
def forward(self, input: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
|
| 119 |
+
"""
|
| 120 |
+
Args:
|
| 121 |
+
input: the shape should be BNH[WD], where N is the number of classes.
|
| 122 |
+
target: the shape should be BNH[WD] or B1H[WD], where N is the number of classes.
|
| 123 |
+
|
| 124 |
+
Raises:
|
| 125 |
+
AssertionError: When input and target (after one hot transform if set)
|
| 126 |
+
have different shapes.
|
| 127 |
+
ValueError: When ``self.reduction`` is not one of ["mean", "sum", "none"].
|
| 128 |
+
|
| 129 |
+
Example:
|
| 130 |
+
>>> from monai.losses.dice import * # NOQA
|
| 131 |
+
>>> import torch
|
| 132 |
+
>>> from monai.losses.dice import DiceLoss
|
| 133 |
+
>>> B, C, H, W = 7, 5, 3, 2
|
| 134 |
+
>>> input = torch.rand(B, C, H, W)
|
| 135 |
+
>>> target_idx = torch.randint(low=0, high=C - 1, size=(B, H, W)).long()
|
| 136 |
+
>>> target = one_hot(target_idx[:, None, ...], num_classes=C)
|
| 137 |
+
>>> self = DiceLoss(reduction='none')
|
| 138 |
+
>>> loss = self(input, target)
|
| 139 |
+
>>> assert np.broadcast_shapes(loss.shape, input.shape) == input.shape
|
| 140 |
+
"""
|
| 141 |
+
if self.sigmoid:
|
| 142 |
+
input = torch.sigmoid(input)
|
| 143 |
+
|
| 144 |
+
n_pred_ch = input.shape[1]
|
| 145 |
+
if self.softmax:
|
| 146 |
+
if n_pred_ch == 1:
|
| 147 |
+
warnings.warn("single channel prediction, `softmax=True` ignored.")
|
| 148 |
+
else:
|
| 149 |
+
input = torch.softmax(input, 1)
|
| 150 |
+
|
| 151 |
+
if self.other_act is not None:
|
| 152 |
+
input = self.other_act(input)
|
| 153 |
+
|
| 154 |
+
if self.to_onehot_y:
|
| 155 |
+
if n_pred_ch == 1:
|
| 156 |
+
warnings.warn("single channel prediction, `to_onehot_y=True` ignored.")
|
| 157 |
+
else:
|
| 158 |
+
target = one_hot(target, num_classes=n_pred_ch)
|
| 159 |
+
|
| 160 |
+
if not self.include_background:
|
| 161 |
+
if n_pred_ch == 1:
|
| 162 |
+
warnings.warn("single channel prediction, `include_background=False` ignored.")
|
| 163 |
+
else:
|
| 164 |
+
# if skipping background, removing first channel
|
| 165 |
+
target = target[:, 1:]
|
| 166 |
+
input = input[:, 1:]
|
| 167 |
+
|
| 168 |
+
if target.shape != input.shape:
|
| 169 |
+
raise AssertionError(f"ground truth has different shape ({target.shape}) from input ({input.shape})")
|
| 170 |
+
|
| 171 |
+
# reducing only spatial dimensions (not batch nor channels)
|
| 172 |
+
reduce_axis: list[int] = torch.arange(2, len(input.shape)).tolist()
|
| 173 |
+
if self.batch:
|
| 174 |
+
# reducing spatial dimensions and batch
|
| 175 |
+
reduce_axis = [0] + reduce_axis
|
| 176 |
+
|
| 177 |
+
intersection = torch.sum(target * input, dim=reduce_axis)
|
| 178 |
+
|
| 179 |
+
if self.squared_pred:
|
| 180 |
+
ground_o = torch.sum(target**2, dim=reduce_axis)
|
| 181 |
+
pred_o = torch.sum(input**2, dim=reduce_axis)
|
| 182 |
+
else:
|
| 183 |
+
ground_o = torch.sum(target, dim=reduce_axis)
|
| 184 |
+
pred_o = torch.sum(input, dim=reduce_axis)
|
| 185 |
+
|
| 186 |
+
denominator = ground_o + pred_o
|
| 187 |
+
|
| 188 |
+
if self.jaccard:
|
| 189 |
+
denominator = 2.0 * (denominator - intersection)
|
| 190 |
+
|
| 191 |
+
f: torch.Tensor = 1.0 - (2.0 * intersection + self.smooth_nr) / (denominator + self.smooth_dr)
|
| 192 |
+
|
| 193 |
+
num_of_classes = target.shape[1]
|
| 194 |
+
if self.class_weight is not None and num_of_classes != 1:
|
| 195 |
+
# make sure the lengths of weights are equal to the number of classes
|
| 196 |
+
if self.class_weight.ndim == 0:
|
| 197 |
+
self.class_weight = torch.as_tensor([self.class_weight] * num_of_classes)
|
| 198 |
+
else:
|
| 199 |
+
if self.class_weight.shape[0] != num_of_classes:
|
| 200 |
+
raise ValueError(
|
| 201 |
+
"""the length of the `weight` sequence should be the same as the number of classes.
|
| 202 |
+
If `include_background=False`, the weight should not include
|
| 203 |
+
the background category class 0."""
|
| 204 |
+
)
|
| 205 |
+
if self.class_weight.min() < 0:
|
| 206 |
+
raise ValueError("the value/values of the `weight` should be no less than 0.")
|
| 207 |
+
# apply class_weight to loss
|
| 208 |
+
f = f * self.class_weight.to(f)
|
| 209 |
+
|
| 210 |
+
if self.reduction == LossReduction.MEAN.value:
|
| 211 |
+
f = torch.mean(f) # the batch and channel average
|
| 212 |
+
elif self.reduction == LossReduction.SUM.value:
|
| 213 |
+
f = torch.sum(f) # sum over the batch and channel dims
|
| 214 |
+
elif self.reduction == LossReduction.NONE.value:
|
| 215 |
+
# If we are not computing voxelwise loss components at least
|
| 216 |
+
# make sure a none reduction maintains a broadcastable shape
|
| 217 |
+
broadcast_shape = list(f.shape[0:2]) + [1] * (len(input.shape) - 2)
|
| 218 |
+
f = f.view(broadcast_shape)
|
| 219 |
+
else:
|
| 220 |
+
raise ValueError(f'Unsupported reduction: {self.reduction}, available options are ["mean", "sum", "none"].')
|
| 221 |
+
|
| 222 |
+
return f
|
| 223 |
+
|
| 224 |
+
|
| 225 |
+
class MaskedDiceLoss(DiceLoss):
|
| 226 |
+
"""
|
| 227 |
+
Add an additional `masking` process before `DiceLoss`, accept a binary mask ([0, 1]) indicating a region,
|
| 228 |
+
`input` and `target` will be masked by the region: region with mask `1` will keep the original value,
|
| 229 |
+
region with `0` mask will be converted to `0`. Then feed `input` and `target` to normal `DiceLoss` computation.
|
| 230 |
+
This has the effect of ensuring only the masked region contributes to the loss computation and
|
| 231 |
+
hence gradient calculation.
|
| 232 |
+
|
| 233 |
+
"""
|
| 234 |
+
|
| 235 |
+
def __init__(self, *args: Any, **kwargs: Any) -> None:
|
| 236 |
+
"""
|
| 237 |
+
Args follow :py:class:`monai.losses.DiceLoss`.
|
| 238 |
+
"""
|
| 239 |
+
super().__init__(*args, **kwargs)
|
| 240 |
+
self.spatial_weighted = MaskedLoss(loss=super().forward)
|
| 241 |
+
|
| 242 |
+
def forward(self, input: torch.Tensor, target: torch.Tensor, mask: torch.Tensor | None = None) -> torch.Tensor:
|
| 243 |
+
"""
|
| 244 |
+
Args:
|
| 245 |
+
input: the shape should be BNH[WD].
|
| 246 |
+
target: the shape should be BNH[WD].
|
| 247 |
+
mask: the shape should B1H[WD] or 11H[WD].
|
| 248 |
+
"""
|
| 249 |
+
return self.spatial_weighted(input=input, target=target, mask=mask) # type: ignore[no-any-return]
|
| 250 |
+
|
| 251 |
+
|
| 252 |
+
class GeneralizedDiceLoss(_Loss):
|
| 253 |
+
"""
|
| 254 |
+
Compute the generalised Dice loss defined in:
|
| 255 |
+
|
| 256 |
+
Sudre, C. et. al. (2017) Generalised Dice overlap as a deep learning
|
| 257 |
+
loss function for highly unbalanced segmentations. DLMIA 2017.
|
| 258 |
+
|
| 259 |
+
Adapted from:
|
| 260 |
+
https://github.com/NifTK/NiftyNet/blob/v0.6.0/niftynet/layer/loss_segmentation.py#L279
|
| 261 |
+
"""
|
| 262 |
+
|
| 263 |
+
def __init__(
|
| 264 |
+
self,
|
| 265 |
+
include_background: bool = True,
|
| 266 |
+
to_onehot_y: bool = False,
|
| 267 |
+
sigmoid: bool = False,
|
| 268 |
+
softmax: bool = False,
|
| 269 |
+
other_act: Callable | None = None,
|
| 270 |
+
w_type: Weight | str = Weight.SQUARE,
|
| 271 |
+
reduction: LossReduction | str = LossReduction.MEAN,
|
| 272 |
+
smooth_nr: float = 1e-5,
|
| 273 |
+
smooth_dr: float = 1e-5,
|
| 274 |
+
batch: bool = False,
|
| 275 |
+
) -> None:
|
| 276 |
+
"""
|
| 277 |
+
Args:
|
| 278 |
+
include_background: If False channel index 0 (background category) is excluded from the calculation.
|
| 279 |
+
to_onehot_y: whether to convert the ``target`` into the one-hot format,
|
| 280 |
+
using the number of classes inferred from `input` (``input.shape[1]``). Defaults to False.
|
| 281 |
+
sigmoid: If True, apply a sigmoid function to the prediction.
|
| 282 |
+
softmax: If True, apply a softmax function to the prediction.
|
| 283 |
+
other_act: callable function to execute other activation layers, Defaults to ``None``. for example:
|
| 284 |
+
``other_act = torch.tanh``.
|
| 285 |
+
w_type: {``"square"``, ``"simple"``, ``"uniform"``}
|
| 286 |
+
Type of function to transform ground truth volume to a weight factor. Defaults to ``"square"``.
|
| 287 |
+
reduction: {``"none"``, ``"mean"``, ``"sum"``}
|
| 288 |
+
Specifies the reduction to apply to the output. Defaults to ``"mean"``.
|
| 289 |
+
|
| 290 |
+
- ``"none"``: no reduction will be applied.
|
| 291 |
+
- ``"mean"``: the sum of the output will be divided by the number of elements in the output.
|
| 292 |
+
- ``"sum"``: the output will be summed.
|
| 293 |
+
smooth_nr: a small constant added to the numerator to avoid zero.
|
| 294 |
+
smooth_dr: a small constant added to the denominator to avoid nan.
|
| 295 |
+
batch: whether to sum the intersection and union areas over the batch dimension before the dividing.
|
| 296 |
+
Defaults to False, intersection over union is computed from each item in the batch.
|
| 297 |
+
If True, the class-weighted intersection and union areas are first summed across the batches.
|
| 298 |
+
|
| 299 |
+
Raises:
|
| 300 |
+
TypeError: When ``other_act`` is not an ``Optional[Callable]``.
|
| 301 |
+
ValueError: When more than 1 of [``sigmoid=True``, ``softmax=True``, ``other_act is not None``].
|
| 302 |
+
Incompatible values.
|
| 303 |
+
|
| 304 |
+
"""
|
| 305 |
+
super().__init__(reduction=LossReduction(reduction).value)
|
| 306 |
+
if other_act is not None and not callable(other_act):
|
| 307 |
+
raise TypeError(f"other_act must be None or callable but is {type(other_act).__name__}.")
|
| 308 |
+
if int(sigmoid) + int(softmax) + int(other_act is not None) > 1:
|
| 309 |
+
raise ValueError("Incompatible values: more than 1 of [sigmoid=True, softmax=True, other_act is not None].")
|
| 310 |
+
|
| 311 |
+
self.include_background = include_background
|
| 312 |
+
self.to_onehot_y = to_onehot_y
|
| 313 |
+
self.sigmoid = sigmoid
|
| 314 |
+
self.softmax = softmax
|
| 315 |
+
self.other_act = other_act
|
| 316 |
+
|
| 317 |
+
self.w_type = look_up_option(w_type, Weight)
|
| 318 |
+
|
| 319 |
+
self.smooth_nr = float(smooth_nr)
|
| 320 |
+
self.smooth_dr = float(smooth_dr)
|
| 321 |
+
self.batch = batch
|
| 322 |
+
|
| 323 |
+
def w_func(self, grnd):
|
| 324 |
+
if self.w_type == str(Weight.SIMPLE):
|
| 325 |
+
return torch.reciprocal(grnd)
|
| 326 |
+
if self.w_type == str(Weight.SQUARE):
|
| 327 |
+
return torch.reciprocal(grnd * grnd)
|
| 328 |
+
return torch.ones_like(grnd)
|
| 329 |
+
|
| 330 |
+
def forward(self, input: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
|
| 331 |
+
"""
|
| 332 |
+
Args:
|
| 333 |
+
input: the shape should be BNH[WD].
|
| 334 |
+
target: the shape should be BNH[WD].
|
| 335 |
+
|
| 336 |
+
Raises:
|
| 337 |
+
ValueError: When ``self.reduction`` is not one of ["mean", "sum", "none"].
|
| 338 |
+
|
| 339 |
+
"""
|
| 340 |
+
if self.sigmoid:
|
| 341 |
+
input = torch.sigmoid(input)
|
| 342 |
+
n_pred_ch = input.shape[1]
|
| 343 |
+
if self.softmax:
|
| 344 |
+
if n_pred_ch == 1:
|
| 345 |
+
warnings.warn("single channel prediction, `softmax=True` ignored.")
|
| 346 |
+
else:
|
| 347 |
+
input = torch.softmax(input, 1)
|
| 348 |
+
|
| 349 |
+
if self.other_act is not None:
|
| 350 |
+
input = self.other_act(input)
|
| 351 |
+
|
| 352 |
+
if self.to_onehot_y:
|
| 353 |
+
if n_pred_ch == 1:
|
| 354 |
+
warnings.warn("single channel prediction, `to_onehot_y=True` ignored.")
|
| 355 |
+
else:
|
| 356 |
+
target = one_hot(target, num_classes=n_pred_ch)
|
| 357 |
+
|
| 358 |
+
if not self.include_background:
|
| 359 |
+
if n_pred_ch == 1:
|
| 360 |
+
warnings.warn("single channel prediction, `include_background=False` ignored.")
|
| 361 |
+
else:
|
| 362 |
+
# if skipping background, removing first channel
|
| 363 |
+
target = target[:, 1:]
|
| 364 |
+
input = input[:, 1:]
|
| 365 |
+
|
| 366 |
+
if target.shape != input.shape:
|
| 367 |
+
raise AssertionError(f"ground truth has differing shape ({target.shape}) from input ({input.shape})")
|
| 368 |
+
|
| 369 |
+
# reducing only spatial dimensions (not batch nor channels)
|
| 370 |
+
reduce_axis: list[int] = torch.arange(2, len(input.shape)).tolist()
|
| 371 |
+
if self.batch:
|
| 372 |
+
reduce_axis = [0] + reduce_axis
|
| 373 |
+
intersection = torch.sum(target * input, reduce_axis)
|
| 374 |
+
|
| 375 |
+
ground_o = torch.sum(target, reduce_axis)
|
| 376 |
+
pred_o = torch.sum(input, reduce_axis)
|
| 377 |
+
|
| 378 |
+
denominator = ground_o + pred_o
|
| 379 |
+
|
| 380 |
+
w = self.w_func(ground_o.float())
|
| 381 |
+
infs = torch.isinf(w)
|
| 382 |
+
if self.batch:
|
| 383 |
+
w[infs] = 0.0
|
| 384 |
+
w = w + infs * torch.max(w)
|
| 385 |
+
else:
|
| 386 |
+
w[infs] = 0.0
|
| 387 |
+
max_values = torch.max(w, dim=1)[0].unsqueeze(dim=1)
|
| 388 |
+
w = w + infs * max_values
|
| 389 |
+
|
| 390 |
+
final_reduce_dim = 0 if self.batch else 1
|
| 391 |
+
numer = 2.0 * (intersection * w).sum(final_reduce_dim, keepdim=True) + self.smooth_nr
|
| 392 |
+
denom = (denominator * w).sum(final_reduce_dim, keepdim=True) + self.smooth_dr
|
| 393 |
+
f: torch.Tensor = 1.0 - (numer / denom)
|
| 394 |
+
|
| 395 |
+
if self.reduction == LossReduction.MEAN.value:
|
| 396 |
+
f = torch.mean(f) # the batch and channel average
|
| 397 |
+
elif self.reduction == LossReduction.SUM.value:
|
| 398 |
+
f = torch.sum(f) # sum over the batch and channel dims
|
| 399 |
+
elif self.reduction == LossReduction.NONE.value:
|
| 400 |
+
# If we are not computing voxelwise loss components at least
|
| 401 |
+
# make sure a none reduction maintains a broadcastable shape
|
| 402 |
+
broadcast_shape = list(f.shape[0:2]) + [1] * (len(input.shape) - 2)
|
| 403 |
+
f = f.view(broadcast_shape)
|
| 404 |
+
else:
|
| 405 |
+
raise ValueError(f'Unsupported reduction: {self.reduction}, available options are ["mean", "sum", "none"].')
|
| 406 |
+
|
| 407 |
+
return f
|
| 408 |
+
|
| 409 |
+
|
| 410 |
+
class GeneralizedWassersteinDiceLoss(_Loss):
|
| 411 |
+
"""
|
| 412 |
+
Compute the generalized Wasserstein Dice Loss defined in:
|
| 413 |
+
|
| 414 |
+
Fidon L. et al. (2017) Generalised Wasserstein Dice Score for Imbalanced Multi-class
|
| 415 |
+
Segmentation using Holistic Convolutional Networks. BrainLes 2017.
|
| 416 |
+
|
| 417 |
+
Or its variant (use the option weighting_mode="GDL") defined in the Appendix of:
|
| 418 |
+
|
| 419 |
+
Tilborghs, S. et al. (2020) Comparative study of deep learning methods for the automatic
|
| 420 |
+
segmentation of lung, lesion and lesion type in CT scans of COVID-19 patients.
|
| 421 |
+
arXiv preprint arXiv:2007.15546
|
| 422 |
+
|
| 423 |
+
Adapted from:
|
| 424 |
+
https://github.com/LucasFidon/GeneralizedWassersteinDiceLoss
|
| 425 |
+
"""
|
| 426 |
+
|
| 427 |
+
def __init__(
|
| 428 |
+
self,
|
| 429 |
+
dist_matrix: np.ndarray | torch.Tensor,
|
| 430 |
+
weighting_mode: str = "default",
|
| 431 |
+
reduction: LossReduction | str = LossReduction.MEAN,
|
| 432 |
+
smooth_nr: float = 1e-5,
|
| 433 |
+
smooth_dr: float = 1e-5,
|
| 434 |
+
) -> None:
|
| 435 |
+
"""
|
| 436 |
+
Args:
|
| 437 |
+
dist_matrix: 2d tensor or 2d numpy array; matrix of distances between the classes.
|
| 438 |
+
It must have dimension C x C where C is the number of classes.
|
| 439 |
+
weighting_mode: {``"default"``, ``"GDL"``}
|
| 440 |
+
Specifies how to weight the class-specific sum of errors.
|
| 441 |
+
Default to ``"default"``.
|
| 442 |
+
|
| 443 |
+
- ``"default"``: (recommended) use the original weighting method as in:
|
| 444 |
+
Fidon L. et al. (2017) Generalised Wasserstein Dice Score for Imbalanced Multi-class
|
| 445 |
+
Segmentation using Holistic Convolutional Networks. BrainLes 2017.
|
| 446 |
+
- ``"GDL"``: use a GDL-like weighting method as in the Appendix of:
|
| 447 |
+
Tilborghs, S. et al. (2020) Comparative study of deep learning methods for the automatic
|
| 448 |
+
segmentation of lung, lesion and lesion type in CT scans of COVID-19 patients.
|
| 449 |
+
arXiv preprint arXiv:2007.15546
|
| 450 |
+
reduction: {``"none"``, ``"mean"``, ``"sum"``}
|
| 451 |
+
Specifies the reduction to apply to the output. Defaults to ``"mean"``.
|
| 452 |
+
|
| 453 |
+
- ``"none"``: no reduction will be applied.
|
| 454 |
+
- ``"mean"``: the sum of the output will be divided by the number of elements in the output.
|
| 455 |
+
- ``"sum"``: the output will be summed.
|
| 456 |
+
smooth_nr: a small constant added to the numerator to avoid zero.
|
| 457 |
+
smooth_dr: a small constant added to the denominator to avoid nan.
|
| 458 |
+
|
| 459 |
+
Raises:
|
| 460 |
+
ValueError: When ``dist_matrix`` is not a square matrix.
|
| 461 |
+
|
| 462 |
+
Example:
|
| 463 |
+
.. code-block:: python
|
| 464 |
+
|
| 465 |
+
import torch
|
| 466 |
+
import numpy as np
|
| 467 |
+
from monai.losses import GeneralizedWassersteinDiceLoss
|
| 468 |
+
|
| 469 |
+
# Example with 3 classes (including the background: label 0).
|
| 470 |
+
# The distance between the background class (label 0) and the other classes is the maximum, equal to 1.
|
| 471 |
+
# The distance between class 1 and class 2 is 0.5.
|
| 472 |
+
dist_mat = np.array([[0.0, 1.0, 1.0], [1.0, 0.0, 0.5], [1.0, 0.5, 0.0]], dtype=np.float32)
|
| 473 |
+
wass_loss = GeneralizedWassersteinDiceLoss(dist_matrix=dist_mat)
|
| 474 |
+
|
| 475 |
+
pred_score = torch.tensor([[1000, 0, 0], [0, 1000, 0], [0, 0, 1000]], dtype=torch.float32)
|
| 476 |
+
grnd = torch.tensor([0, 1, 2], dtype=torch.int64)
|
| 477 |
+
wass_loss(pred_score, grnd) # 0
|
| 478 |
+
|
| 479 |
+
"""
|
| 480 |
+
super().__init__(reduction=LossReduction(reduction).value)
|
| 481 |
+
|
| 482 |
+
if dist_matrix.shape[0] != dist_matrix.shape[1]:
|
| 483 |
+
raise ValueError(f"dist_matrix must be C x C, got {dist_matrix.shape[0]} x {dist_matrix.shape[1]}.")
|
| 484 |
+
|
| 485 |
+
if weighting_mode not in ["default", "GDL"]:
|
| 486 |
+
raise ValueError("weighting_mode must be either 'default' or 'GDL, got %s." % weighting_mode)
|
| 487 |
+
|
| 488 |
+
self.m = dist_matrix
|
| 489 |
+
if isinstance(self.m, np.ndarray):
|
| 490 |
+
self.m = torch.from_numpy(self.m)
|
| 491 |
+
if torch.max(self.m) != 1:
|
| 492 |
+
self.m = self.m / torch.max(self.m)
|
| 493 |
+
self.alpha_mode = weighting_mode
|
| 494 |
+
self.num_classes = self.m.size(0)
|
| 495 |
+
self.smooth_nr = float(smooth_nr)
|
| 496 |
+
self.smooth_dr = float(smooth_dr)
|
| 497 |
+
|
| 498 |
+
def forward(self, input: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
|
| 499 |
+
"""
|
| 500 |
+
Args:
|
| 501 |
+
input: the shape should be BNH[WD].
|
| 502 |
+
target: the shape should be BNH[WD].
|
| 503 |
+
|
| 504 |
+
"""
|
| 505 |
+
# Aggregate spatial dimensions
|
| 506 |
+
flat_input = input.reshape(input.size(0), input.size(1), -1)
|
| 507 |
+
flat_target = target.reshape(target.size(0), -1).long()
|
| 508 |
+
|
| 509 |
+
# Apply the softmax to the input scores map
|
| 510 |
+
probs = F.softmax(flat_input, dim=1)
|
| 511 |
+
|
| 512 |
+
# Compute the Wasserstein distance map
|
| 513 |
+
wass_dist_map = self.wasserstein_distance_map(probs, flat_target)
|
| 514 |
+
|
| 515 |
+
# Compute the values of alpha to use
|
| 516 |
+
alpha = self._compute_alpha_generalized_true_positives(flat_target)
|
| 517 |
+
|
| 518 |
+
# Compute the numerator and denominator of the generalized Wasserstein Dice loss
|
| 519 |
+
if self.alpha_mode == "GDL":
|
| 520 |
+
# use GDL-style alpha weights (i.e. normalize by the volume of each class)
|
| 521 |
+
# contrary to the original definition we also use alpha in the "generalized all error".
|
| 522 |
+
true_pos = self._compute_generalized_true_positive(alpha, flat_target, wass_dist_map)
|
| 523 |
+
denom = self._compute_denominator(alpha, flat_target, wass_dist_map)
|
| 524 |
+
else: # default: as in the original paper
|
| 525 |
+
# (i.e. alpha=1 for all foreground classes and 0 for the background).
|
| 526 |
+
# Compute the generalised number of true positives
|
| 527 |
+
true_pos = self._compute_generalized_true_positive(alpha, flat_target, wass_dist_map)
|
| 528 |
+
all_error = torch.sum(wass_dist_map, dim=1)
|
| 529 |
+
denom = 2 * true_pos + all_error
|
| 530 |
+
|
| 531 |
+
# Compute the final loss
|
| 532 |
+
wass_dice: torch.Tensor = (2.0 * true_pos + self.smooth_nr) / (denom + self.smooth_dr)
|
| 533 |
+
wass_dice_loss: torch.Tensor = 1.0 - wass_dice
|
| 534 |
+
|
| 535 |
+
if self.reduction == LossReduction.MEAN.value:
|
| 536 |
+
wass_dice_loss = torch.mean(wass_dice_loss) # the batch and channel average
|
| 537 |
+
elif self.reduction == LossReduction.SUM.value:
|
| 538 |
+
wass_dice_loss = torch.sum(wass_dice_loss) # sum over the batch and channel dims
|
| 539 |
+
elif self.reduction == LossReduction.NONE.value:
|
| 540 |
+
# If we are not computing voxelwise loss components at least
|
| 541 |
+
# make sure a none reduction maintains a broadcastable shape
|
| 542 |
+
broadcast_shape = input.shape[0:2] + (1,) * (len(input.shape) - 2)
|
| 543 |
+
wass_dice_loss = wass_dice_loss.view(broadcast_shape)
|
| 544 |
+
else:
|
| 545 |
+
raise ValueError(f'Unsupported reduction: {self.reduction}, available options are ["mean", "sum", "none"].')
|
| 546 |
+
|
| 547 |
+
return wass_dice_loss
|
| 548 |
+
|
| 549 |
+
def wasserstein_distance_map(self, flat_proba: torch.Tensor, flat_target: torch.Tensor) -> torch.Tensor:
|
| 550 |
+
"""
|
| 551 |
+
Compute the voxel-wise Wasserstein distance between the
|
| 552 |
+
flattened prediction and the flattened labels (ground_truth) with respect
|
| 553 |
+
to the distance matrix on the label space M.
|
| 554 |
+
This corresponds to eq. 6 in:
|
| 555 |
+
|
| 556 |
+
Fidon L. et al. (2017) Generalised Wasserstein Dice Score for Imbalanced Multi-class
|
| 557 |
+
Segmentation using Holistic Convolutional Networks. BrainLes 2017.
|
| 558 |
+
|
| 559 |
+
Args:
|
| 560 |
+
flat_proba: the probabilities of input(predicted) tensor.
|
| 561 |
+
flat_target: the target tensor.
|
| 562 |
+
"""
|
| 563 |
+
# Turn the distance matrix to a map of identical matrix
|
| 564 |
+
m = torch.clone(torch.as_tensor(self.m)).to(flat_proba.device)
|
| 565 |
+
m_extended = torch.unsqueeze(m, dim=0)
|
| 566 |
+
m_extended = torch.unsqueeze(m_extended, dim=3)
|
| 567 |
+
m_extended = m_extended.expand((flat_proba.size(0), m_extended.size(1), m_extended.size(2), flat_proba.size(2)))
|
| 568 |
+
|
| 569 |
+
# Expand the feature dimensions of the target
|
| 570 |
+
flat_target_extended = torch.unsqueeze(flat_target, dim=1)
|
| 571 |
+
flat_target_extended = flat_target_extended.expand(
|
| 572 |
+
(flat_target.size(0), m_extended.size(1), flat_target.size(1))
|
| 573 |
+
)
|
| 574 |
+
flat_target_extended = torch.unsqueeze(flat_target_extended, dim=1)
|
| 575 |
+
|
| 576 |
+
# Extract the vector of class distances for the ground-truth label at each voxel
|
| 577 |
+
m_extended = torch.gather(m_extended, dim=1, index=flat_target_extended)
|
| 578 |
+
m_extended = torch.squeeze(m_extended, dim=1)
|
| 579 |
+
|
| 580 |
+
# Compute the wasserstein distance map
|
| 581 |
+
wasserstein_map = m_extended * flat_proba
|
| 582 |
+
|
| 583 |
+
# Sum over the classes
|
| 584 |
+
wasserstein_map = torch.sum(wasserstein_map, dim=1)
|
| 585 |
+
return wasserstein_map
|
| 586 |
+
|
| 587 |
+
def _compute_generalized_true_positive(
|
| 588 |
+
self, alpha: torch.Tensor, flat_target: torch.Tensor, wasserstein_distance_map: torch.Tensor
|
| 589 |
+
) -> torch.Tensor:
|
| 590 |
+
"""
|
| 591 |
+
Args:
|
| 592 |
+
alpha: generalised number of true positives of target class.
|
| 593 |
+
flat_target: the target tensor.
|
| 594 |
+
wasserstein_distance_map: the map obtained from the above function.
|
| 595 |
+
"""
|
| 596 |
+
# Extend alpha to a map and select value at each voxel according to flat_target
|
| 597 |
+
alpha_extended = torch.unsqueeze(alpha, dim=2)
|
| 598 |
+
alpha_extended = alpha_extended.expand((flat_target.size(0), self.num_classes, flat_target.size(1)))
|
| 599 |
+
flat_target_extended = torch.unsqueeze(flat_target, dim=1)
|
| 600 |
+
alpha_extended = torch.gather(alpha_extended, index=flat_target_extended, dim=1)
|
| 601 |
+
|
| 602 |
+
return torch.sum(alpha_extended * (1.0 - wasserstein_distance_map), dim=[1, 2])
|
| 603 |
+
|
| 604 |
+
def _compute_denominator(
|
| 605 |
+
self, alpha: torch.Tensor, flat_target: torch.Tensor, wasserstein_distance_map: torch.Tensor
|
| 606 |
+
) -> torch.Tensor:
|
| 607 |
+
"""
|
| 608 |
+
Args:
|
| 609 |
+
alpha: generalised number of true positives of target class.
|
| 610 |
+
flat_target: the target tensor.
|
| 611 |
+
wasserstein_distance_map: the map obtained from the above function.
|
| 612 |
+
"""
|
| 613 |
+
# Extend alpha to a map and select value at each voxel according to flat_target
|
| 614 |
+
alpha_extended = torch.unsqueeze(alpha, dim=2)
|
| 615 |
+
alpha_extended = alpha_extended.expand((flat_target.size(0), self.num_classes, flat_target.size(1)))
|
| 616 |
+
flat_target_extended = torch.unsqueeze(flat_target, dim=1)
|
| 617 |
+
alpha_extended = torch.gather(alpha_extended, index=flat_target_extended, dim=1)
|
| 618 |
+
|
| 619 |
+
return torch.sum(alpha_extended * (2.0 - wasserstein_distance_map), dim=[1, 2])
|
| 620 |
+
|
| 621 |
+
def _compute_alpha_generalized_true_positives(self, flat_target: torch.Tensor) -> torch.Tensor:
|
| 622 |
+
"""
|
| 623 |
+
Args:
|
| 624 |
+
flat_target: the target tensor.
|
| 625 |
+
"""
|
| 626 |
+
alpha: torch.Tensor = torch.ones((flat_target.size(0), self.num_classes)).float().to(flat_target.device)
|
| 627 |
+
if self.alpha_mode == "GDL": # GDL style
|
| 628 |
+
# Define alpha like in the generalized dice loss
|
| 629 |
+
# i.e. the inverse of the volume of each class.
|
| 630 |
+
one_hot_f = F.one_hot(flat_target, num_classes=self.num_classes).permute(0, 2, 1).float()
|
| 631 |
+
volumes = torch.sum(one_hot_f, dim=2)
|
| 632 |
+
alpha = 1.0 / (volumes + 1.0)
|
| 633 |
+
else: # default, i.e. like in the original paper
|
| 634 |
+
# alpha weights are 0 for the background and 1 the other classes
|
| 635 |
+
alpha[:, 0] = 0.0
|
| 636 |
+
return alpha
|
| 637 |
+
|
| 638 |
+
|
| 639 |
+
class DiceCELoss(_Loss):
|
| 640 |
+
"""
|
| 641 |
+
Compute both Dice loss and Cross Entropy Loss, and return the weighted sum of these two losses.
|
| 642 |
+
The details of Dice loss is shown in ``monai.losses.DiceLoss``.
|
| 643 |
+
The details of Cross Entropy Loss is shown in ``torch.nn.CrossEntropyLoss`` and ``torch.nn.BCEWithLogitsLoss()``.
|
| 644 |
+
In this implementation, two deprecated parameters ``size_average`` and ``reduce``, and the parameter ``ignore_index`` are
|
| 645 |
+
not supported.
|
| 646 |
+
|
| 647 |
+
"""
|
| 648 |
+
|
| 649 |
+
@deprecated_arg(
|
| 650 |
+
"ce_weight", since="1.2", removed="1.4", new_name="weight", msg_suffix="please use `weight` instead."
|
| 651 |
+
)
|
| 652 |
+
def __init__(
|
| 653 |
+
self,
|
| 654 |
+
include_background: bool = True,
|
| 655 |
+
to_onehot_y: bool = False,
|
| 656 |
+
sigmoid: bool = False,
|
| 657 |
+
softmax: bool = False,
|
| 658 |
+
other_act: Callable | None = None,
|
| 659 |
+
squared_pred: bool = False,
|
| 660 |
+
jaccard: bool = False,
|
| 661 |
+
reduction: str = "mean",
|
| 662 |
+
smooth_nr: float = 1e-5,
|
| 663 |
+
smooth_dr: float = 1e-5,
|
| 664 |
+
batch: bool = False,
|
| 665 |
+
ce_weight: torch.Tensor | None = None,
|
| 666 |
+
weight: torch.Tensor | None = None,
|
| 667 |
+
lambda_dice: float = 1.0,
|
| 668 |
+
lambda_ce: float = 1.0,
|
| 669 |
+
) -> None:
|
| 670 |
+
"""
|
| 671 |
+
Args:
|
| 672 |
+
``lambda_ce`` are only used for cross entropy loss.
|
| 673 |
+
``reduction`` and ``weight`` is used for both losses and other parameters are only used for dice loss.
|
| 674 |
+
|
| 675 |
+
include_background: if False channel index 0 (background category) is excluded from the calculation.
|
| 676 |
+
to_onehot_y: whether to convert the ``target`` into the one-hot format,
|
| 677 |
+
using the number of classes inferred from `input` (``input.shape[1]``). Defaults to False.
|
| 678 |
+
sigmoid: if True, apply a sigmoid function to the prediction, only used by the `DiceLoss`,
|
| 679 |
+
don't need to specify activation function for `CrossEntropyLoss` and `BCEWithLogitsLoss`.
|
| 680 |
+
softmax: if True, apply a softmax function to the prediction, only used by the `DiceLoss`,
|
| 681 |
+
don't need to specify activation function for `CrossEntropyLoss` and `BCEWithLogitsLoss`.
|
| 682 |
+
other_act: callable function to execute other activation layers, Defaults to ``None``. for example:
|
| 683 |
+
``other_act = torch.tanh``. only used by the `DiceLoss`, not for the `CrossEntropyLoss` and `BCEWithLogitsLoss`.
|
| 684 |
+
squared_pred: use squared versions of targets and predictions in the denominator or not.
|
| 685 |
+
jaccard: compute Jaccard Index (soft IoU) instead of dice or not.
|
| 686 |
+
reduction: {``"mean"``, ``"sum"``}
|
| 687 |
+
Specifies the reduction to apply to the output. Defaults to ``"mean"``. The dice loss should
|
| 688 |
+
as least reduce the spatial dimensions, which is different from cross entropy loss, thus here
|
| 689 |
+
the ``none`` option cannot be used.
|
| 690 |
+
|
| 691 |
+
- ``"mean"``: the sum of the output will be divided by the number of elements in the output.
|
| 692 |
+
- ``"sum"``: the output will be summed.
|
| 693 |
+
|
| 694 |
+
smooth_nr: a small constant added to the numerator to avoid zero.
|
| 695 |
+
smooth_dr: a small constant added to the denominator to avoid nan.
|
| 696 |
+
batch: whether to sum the intersection and union areas over the batch dimension before the dividing.
|
| 697 |
+
Defaults to False, a Dice loss value is computed independently from each item in the batch
|
| 698 |
+
before any `reduction`.
|
| 699 |
+
weight: a rescaling weight given to each class for cross entropy loss for `CrossEntropyLoss`.
|
| 700 |
+
or a weight of positive examples to be broadcasted with target used as `pos_weight` for `BCEWithLogitsLoss`.
|
| 701 |
+
See ``torch.nn.CrossEntropyLoss()`` or ``torch.nn.BCEWithLogitsLoss()`` for more information.
|
| 702 |
+
The weight is also used in `DiceLoss`.
|
| 703 |
+
lambda_dice: the trade-off weight value for dice loss. The value should be no less than 0.0.
|
| 704 |
+
Defaults to 1.0.
|
| 705 |
+
lambda_ce: the trade-off weight value for cross entropy loss. The value should be no less than 0.0.
|
| 706 |
+
Defaults to 1.0.
|
| 707 |
+
|
| 708 |
+
"""
|
| 709 |
+
super().__init__()
|
| 710 |
+
reduction = look_up_option(reduction, DiceCEReduction).value
|
| 711 |
+
weight = ce_weight if ce_weight is not None else weight
|
| 712 |
+
dice_weight: torch.Tensor | None
|
| 713 |
+
if weight is not None and not include_background:
|
| 714 |
+
dice_weight = weight[1:]
|
| 715 |
+
else:
|
| 716 |
+
dice_weight = weight
|
| 717 |
+
self.dice = DiceLoss(
|
| 718 |
+
include_background=include_background,
|
| 719 |
+
to_onehot_y=to_onehot_y,
|
| 720 |
+
sigmoid=sigmoid,
|
| 721 |
+
softmax=softmax,
|
| 722 |
+
other_act=other_act,
|
| 723 |
+
squared_pred=squared_pred,
|
| 724 |
+
jaccard=jaccard,
|
| 725 |
+
reduction=reduction,
|
| 726 |
+
smooth_nr=smooth_nr,
|
| 727 |
+
smooth_dr=smooth_dr,
|
| 728 |
+
batch=batch,
|
| 729 |
+
weight=dice_weight,
|
| 730 |
+
)
|
| 731 |
+
self.cross_entropy = nn.CrossEntropyLoss(weight=weight, reduction=reduction)
|
| 732 |
+
self.binary_cross_entropy = nn.BCEWithLogitsLoss(pos_weight=weight, reduction=reduction)
|
| 733 |
+
if lambda_dice < 0.0:
|
| 734 |
+
raise ValueError("lambda_dice should be no less than 0.0.")
|
| 735 |
+
if lambda_ce < 0.0:
|
| 736 |
+
raise ValueError("lambda_ce should be no less than 0.0.")
|
| 737 |
+
self.lambda_dice = lambda_dice
|
| 738 |
+
self.lambda_ce = lambda_ce
|
| 739 |
+
self.old_pt_ver = not pytorch_after(1, 10)
|
| 740 |
+
|
| 741 |
+
def ce(self, input: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
|
| 742 |
+
"""
|
| 743 |
+
Compute CrossEntropy loss for the input logits and target.
|
| 744 |
+
Will remove the channel dim according to PyTorch CrossEntropyLoss:
|
| 745 |
+
https://pytorch.org/docs/stable/generated/torch.nn.CrossEntropyLoss.html?#torch.nn.CrossEntropyLoss.
|
| 746 |
+
|
| 747 |
+
"""
|
| 748 |
+
n_pred_ch, n_target_ch = input.shape[1], target.shape[1]
|
| 749 |
+
if n_pred_ch != n_target_ch and n_target_ch == 1:
|
| 750 |
+
target = torch.squeeze(target, dim=1)
|
| 751 |
+
target = target.long()
|
| 752 |
+
elif self.old_pt_ver:
|
| 753 |
+
warnings.warn(
|
| 754 |
+
f"Multichannel targets are not supported in this older Pytorch version {torch.__version__}. "
|
| 755 |
+
"Using argmax (as a workaround) to convert target to a single channel."
|
| 756 |
+
)
|
| 757 |
+
target = torch.argmax(target, dim=1)
|
| 758 |
+
elif not torch.is_floating_point(target):
|
| 759 |
+
target = target.to(dtype=input.dtype)
|
| 760 |
+
|
| 761 |
+
return self.cross_entropy(input, target) # type: ignore[no-any-return]
|
| 762 |
+
|
| 763 |
+
def bce(self, input: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
|
| 764 |
+
"""
|
| 765 |
+
Compute Binary CrossEntropy loss for the input logits and target in one single class.
|
| 766 |
+
|
| 767 |
+
"""
|
| 768 |
+
if not torch.is_floating_point(target):
|
| 769 |
+
target = target.to(dtype=input.dtype)
|
| 770 |
+
|
| 771 |
+
return self.binary_cross_entropy(input, target) # type: ignore[no-any-return]
|
| 772 |
+
|
| 773 |
+
def forward(self, input: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
|
| 774 |
+
"""
|
| 775 |
+
Args:
|
| 776 |
+
input: the shape should be BNH[WD].
|
| 777 |
+
target: the shape should be BNH[WD] or B1H[WD].
|
| 778 |
+
|
| 779 |
+
Raises:
|
| 780 |
+
ValueError: When number of dimensions for input and target are different.
|
| 781 |
+
ValueError: When number of channels for target is neither 1 (without one-hot encoding) nor the same as input.
|
| 782 |
+
|
| 783 |
+
Returns:
|
| 784 |
+
torch.Tensor: value of the loss.
|
| 785 |
+
|
| 786 |
+
"""
|
| 787 |
+
if input.dim() != target.dim():
|
| 788 |
+
raise ValueError(
|
| 789 |
+
"the number of dimensions for input and target should be the same, "
|
| 790 |
+
f"got shape {input.shape} (nb dims: {len(input.shape)}) and {target.shape} (nb dims: {len(target.shape)}). "
|
| 791 |
+
"if target is not one-hot encoded, please provide a tensor with shape B1H[WD]."
|
| 792 |
+
)
|
| 793 |
+
|
| 794 |
+
if target.shape[1] != 1 and target.shape[1] != input.shape[1]:
|
| 795 |
+
raise ValueError(
|
| 796 |
+
"number of channels for target is neither 1 (without one-hot encoding) nor the same as input, "
|
| 797 |
+
f"got shape {input.shape} and {target.shape}."
|
| 798 |
+
)
|
| 799 |
+
|
| 800 |
+
dice_loss = self.dice(input, target)
|
| 801 |
+
ce_loss = self.ce(input, target) if input.shape[1] != 1 else self.bce(input, target)
|
| 802 |
+
total_loss: torch.Tensor = self.lambda_dice * dice_loss + self.lambda_ce * ce_loss
|
| 803 |
+
|
| 804 |
+
return total_loss
|
| 805 |
+
|
| 806 |
+
|
| 807 |
+
class DiceFocalLoss(_Loss):
|
| 808 |
+
"""
|
| 809 |
+
Compute both Dice loss and Focal Loss, and return the weighted sum of these two losses.
|
| 810 |
+
The details of Dice loss is shown in ``monai.losses.DiceLoss``.
|
| 811 |
+
The details of Focal Loss is shown in ``monai.losses.FocalLoss``.
|
| 812 |
+
|
| 813 |
+
``gamma`` and ``lambda_focal`` are only used for the focal loss.
|
| 814 |
+
``include_background``, ``weight`` and ``reduction`` are used for both losses
|
| 815 |
+
and other parameters are only used for dice loss.
|
| 816 |
+
|
| 817 |
+
"""
|
| 818 |
+
|
| 819 |
+
@deprecated_arg(
|
| 820 |
+
"focal_weight", since="1.2", removed="1.4", new_name="weight", msg_suffix="please use `weight` instead."
|
| 821 |
+
)
|
| 822 |
+
def __init__(
|
| 823 |
+
self,
|
| 824 |
+
include_background: bool = True,
|
| 825 |
+
to_onehot_y: bool = False,
|
| 826 |
+
sigmoid: bool = False,
|
| 827 |
+
softmax: bool = False,
|
| 828 |
+
other_act: Callable | None = None,
|
| 829 |
+
squared_pred: bool = False,
|
| 830 |
+
jaccard: bool = False,
|
| 831 |
+
reduction: str = "mean",
|
| 832 |
+
smooth_nr: float = 1e-5,
|
| 833 |
+
smooth_dr: float = 1e-5,
|
| 834 |
+
batch: bool = False,
|
| 835 |
+
gamma: float = 2.0,
|
| 836 |
+
focal_weight: Sequence[float] | float | int | torch.Tensor | None = None,
|
| 837 |
+
weight: Sequence[float] | float | int | torch.Tensor | None = None,
|
| 838 |
+
lambda_dice: float = 1.0,
|
| 839 |
+
lambda_focal: float = 1.0,
|
| 840 |
+
) -> None:
|
| 841 |
+
"""
|
| 842 |
+
Args:
|
| 843 |
+
include_background: if False channel index 0 (background category) is excluded from the calculation.
|
| 844 |
+
to_onehot_y: whether to convert the ``target`` into the one-hot format,
|
| 845 |
+
using the number of classes inferred from `input` (``input.shape[1]``). Defaults to False.
|
| 846 |
+
sigmoid: if True, apply a sigmoid function to the prediction, only used by the `DiceLoss`,
|
| 847 |
+
don't need to specify activation function for `FocalLoss`.
|
| 848 |
+
softmax: if True, apply a softmax function to the prediction, only used by the `DiceLoss`,
|
| 849 |
+
don't need to specify activation function for `FocalLoss`.
|
| 850 |
+
other_act: callable function to execute other activation layers, Defaults to ``None``.
|
| 851 |
+
for example: `other_act = torch.tanh`. only used by the `DiceLoss`, not for `FocalLoss`.
|
| 852 |
+
squared_pred: use squared versions of targets and predictions in the denominator or not.
|
| 853 |
+
jaccard: compute Jaccard Index (soft IoU) instead of dice or not.
|
| 854 |
+
reduction: {``"none"``, ``"mean"``, ``"sum"``}
|
| 855 |
+
Specifies the reduction to apply to the output. Defaults to ``"mean"``.
|
| 856 |
+
|
| 857 |
+
- ``"none"``: no reduction will be applied.
|
| 858 |
+
- ``"mean"``: the sum of the output will be divided by the number of elements in the output.
|
| 859 |
+
- ``"sum"``: the output will be summed.
|
| 860 |
+
|
| 861 |
+
smooth_nr: a small constant added to the numerator to avoid zero.
|
| 862 |
+
smooth_dr: a small constant added to the denominator to avoid nan.
|
| 863 |
+
batch: whether to sum the intersection and union areas over the batch dimension before the dividing.
|
| 864 |
+
Defaults to False, a Dice loss value is computed independently from each item in the batch
|
| 865 |
+
before any `reduction`.
|
| 866 |
+
gamma: value of the exponent gamma in the definition of the Focal loss.
|
| 867 |
+
weight: weights to apply to the voxels of each class. If None no weights are applied.
|
| 868 |
+
The input can be a single value (same weight for all classes), a sequence of values (the length
|
| 869 |
+
of the sequence should be the same as the number of classes).
|
| 870 |
+
lambda_dice: the trade-off weight value for dice loss. The value should be no less than 0.0.
|
| 871 |
+
Defaults to 1.0.
|
| 872 |
+
lambda_focal: the trade-off weight value for focal loss. The value should be no less than 0.0.
|
| 873 |
+
Defaults to 1.0.
|
| 874 |
+
|
| 875 |
+
"""
|
| 876 |
+
super().__init__()
|
| 877 |
+
weight = focal_weight if focal_weight is not None else weight
|
| 878 |
+
self.dice = DiceLoss(
|
| 879 |
+
include_background=include_background,
|
| 880 |
+
to_onehot_y=False,
|
| 881 |
+
sigmoid=sigmoid,
|
| 882 |
+
softmax=softmax,
|
| 883 |
+
other_act=other_act,
|
| 884 |
+
squared_pred=squared_pred,
|
| 885 |
+
jaccard=jaccard,
|
| 886 |
+
reduction=reduction,
|
| 887 |
+
smooth_nr=smooth_nr,
|
| 888 |
+
smooth_dr=smooth_dr,
|
| 889 |
+
batch=batch,
|
| 890 |
+
weight=weight,
|
| 891 |
+
)
|
| 892 |
+
self.focal = FocalLoss(
|
| 893 |
+
include_background=include_background, to_onehot_y=False, gamma=gamma, weight=weight, reduction=reduction
|
| 894 |
+
)
|
| 895 |
+
if lambda_dice < 0.0:
|
| 896 |
+
raise ValueError("lambda_dice should be no less than 0.0.")
|
| 897 |
+
if lambda_focal < 0.0:
|
| 898 |
+
raise ValueError("lambda_focal should be no less than 0.0.")
|
| 899 |
+
self.lambda_dice = lambda_dice
|
| 900 |
+
self.lambda_focal = lambda_focal
|
| 901 |
+
self.to_onehot_y = to_onehot_y
|
| 902 |
+
|
| 903 |
+
def forward(self, input: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
|
| 904 |
+
"""
|
| 905 |
+
Args:
|
| 906 |
+
input: the shape should be BNH[WD]. The input should be the original logits
|
| 907 |
+
due to the restriction of ``monai.losses.FocalLoss``.
|
| 908 |
+
target: the shape should be BNH[WD] or B1H[WD].
|
| 909 |
+
|
| 910 |
+
Raises:
|
| 911 |
+
ValueError: When number of dimensions for input and target are different.
|
| 912 |
+
ValueError: When number of channels for target is neither 1 (without one-hot encoding) nor the same as input.
|
| 913 |
+
|
| 914 |
+
Returns:
|
| 915 |
+
torch.Tensor: value of the loss.
|
| 916 |
+
"""
|
| 917 |
+
if input.dim() != target.dim():
|
| 918 |
+
raise ValueError(
|
| 919 |
+
"the number of dimensions for input and target should be the same, "
|
| 920 |
+
f"got shape {input.shape} (nb dims: {len(input.shape)}) and {target.shape} (nb dims: {len(target.shape)}). "
|
| 921 |
+
"if target is not one-hot encoded, please provide a tensor with shape B1H[WD]."
|
| 922 |
+
)
|
| 923 |
+
|
| 924 |
+
if target.shape[1] != 1 and target.shape[1] != input.shape[1]:
|
| 925 |
+
raise ValueError(
|
| 926 |
+
"number of channels for target is neither 1 (without one-hot encoding) nor the same as input, "
|
| 927 |
+
f"got shape {input.shape} and {target.shape}."
|
| 928 |
+
)
|
| 929 |
+
|
| 930 |
+
if self.to_onehot_y:
|
| 931 |
+
n_pred_ch = input.shape[1]
|
| 932 |
+
if n_pred_ch == 1:
|
| 933 |
+
warnings.warn("single channel prediction, `to_onehot_y=True` ignored.")
|
| 934 |
+
else:
|
| 935 |
+
target = one_hot(target, num_classes=n_pred_ch)
|
| 936 |
+
dice_loss = self.dice(input, target)
|
| 937 |
+
focal_loss = self.focal(input, target)
|
| 938 |
+
total_loss: torch.Tensor = self.lambda_dice * dice_loss + self.lambda_focal * focal_loss
|
| 939 |
+
return total_loss
|
| 940 |
+
|
| 941 |
+
|
| 942 |
+
class GeneralizedDiceFocalLoss(_Loss):
|
| 943 |
+
"""Compute both Generalized Dice Loss and Focal Loss, and return their weighted average. The details of Generalized Dice Loss
|
| 944 |
+
and Focal Loss are available at ``monai.losses.GeneralizedDiceLoss`` and ``monai.losses.FocalLoss``.
|
| 945 |
+
|
| 946 |
+
Args:
|
| 947 |
+
include_background (bool, optional): if False channel index 0 (background category) is excluded from the calculation.
|
| 948 |
+
Defaults to True.
|
| 949 |
+
to_onehot_y: whether to convert the ``target`` into the one-hot format,
|
| 950 |
+
using the number of classes inferred from `input` (``input.shape[1]``). Defaults to False.
|
| 951 |
+
sigmoid (bool, optional): if True, apply a sigmoid function to the prediction. Defaults to False.
|
| 952 |
+
softmax (bool, optional): if True, apply a softmax function to the prediction. Defaults to False.
|
| 953 |
+
other_act (Optional[Callable], optional): callable function to execute other activation layers,
|
| 954 |
+
Defaults to ``None``. for example: `other_act = torch.tanh`.
|
| 955 |
+
only used by the `GeneralizedDiceLoss`, not for the `FocalLoss`.
|
| 956 |
+
w_type (Union[Weight, str], optional): {``"square"``, ``"simple"``, ``"uniform"``}. Type of function to transform
|
| 957 |
+
ground-truth volume to a weight factor. Defaults to ``"square"``.
|
| 958 |
+
reduction (Union[LossReduction, str], optional): {``"none"``, ``"mean"``, ``"sum"``}. Specified the reduction to
|
| 959 |
+
apply to the output. Defaults to ``"mean"``.
|
| 960 |
+
- ``"none"``: no reduction will be applied.
|
| 961 |
+
- ``"mean"``: the sum of the output will be divided by the number of elements in the output.
|
| 962 |
+
- ``"sum"``: the output will be summed.
|
| 963 |
+
smooth_nr (float, optional): a small constant added to the numerator to avoid zero. Defaults to 1e-5.
|
| 964 |
+
smooth_dr (float, optional): a small constant added to the denominator to avoid nan. Defaults to 1e-5.
|
| 965 |
+
batch (bool, optional): whether to sum the intersection and union areas over the batch dimension before the dividing.
|
| 966 |
+
Defaults to False, i.e., the areas are computed for each item in the batch.
|
| 967 |
+
gamma (float, optional): value of the exponent gamma in the definition of the Focal loss. Defaults to 2.0.
|
| 968 |
+
weight (Optional[Union[Sequence[float], float, int, torch.Tensor]], optional): weights to apply to
|
| 969 |
+
the voxels of each class. If None no weights are applied. The input can be a single value
|
| 970 |
+
(same weight for all classes), a sequence of values (the length of the sequence hould be the same as
|
| 971 |
+
the number of classes). Defaults to None.
|
| 972 |
+
lambda_gdl (float, optional): the trade-off weight value for Generalized Dice Loss. The value should be
|
| 973 |
+
no less than 0.0. Defaults to 1.0.
|
| 974 |
+
lambda_focal (float, optional): the trade-off weight value for Focal Loss. The value should be no less
|
| 975 |
+
than 0.0. Defaults to 1.0.
|
| 976 |
+
|
| 977 |
+
Raises:
|
| 978 |
+
ValueError: if either `lambda_gdl` or `lambda_focal` is less than 0.
|
| 979 |
+
"""
|
| 980 |
+
|
| 981 |
+
@deprecated_arg(
|
| 982 |
+
"focal_weight", since="1.2", removed="1.4", new_name="weight", msg_suffix="please use `weight` instead."
|
| 983 |
+
)
|
| 984 |
+
def __init__(
|
| 985 |
+
self,
|
| 986 |
+
include_background: bool = True,
|
| 987 |
+
to_onehot_y: bool = False,
|
| 988 |
+
sigmoid: bool = False,
|
| 989 |
+
softmax: bool = False,
|
| 990 |
+
other_act: Callable | None = None,
|
| 991 |
+
w_type: Weight | str = Weight.SQUARE,
|
| 992 |
+
reduction: LossReduction | str = LossReduction.MEAN,
|
| 993 |
+
smooth_nr: float = 1e-5,
|
| 994 |
+
smooth_dr: float = 1e-5,
|
| 995 |
+
batch: bool = False,
|
| 996 |
+
gamma: float = 2.0,
|
| 997 |
+
focal_weight: Sequence[float] | float | int | torch.Tensor | None = None,
|
| 998 |
+
weight: Sequence[float] | float | int | torch.Tensor | None = None,
|
| 999 |
+
lambda_gdl: float = 1.0,
|
| 1000 |
+
lambda_focal: float = 1.0,
|
| 1001 |
+
) -> None:
|
| 1002 |
+
super().__init__()
|
| 1003 |
+
self.generalized_dice = GeneralizedDiceLoss(
|
| 1004 |
+
include_background=include_background,
|
| 1005 |
+
to_onehot_y=to_onehot_y,
|
| 1006 |
+
sigmoid=sigmoid,
|
| 1007 |
+
softmax=softmax,
|
| 1008 |
+
other_act=other_act,
|
| 1009 |
+
w_type=w_type,
|
| 1010 |
+
reduction=reduction,
|
| 1011 |
+
smooth_nr=smooth_nr,
|
| 1012 |
+
smooth_dr=smooth_dr,
|
| 1013 |
+
batch=batch,
|
| 1014 |
+
)
|
| 1015 |
+
weight = focal_weight if focal_weight is not None else weight
|
| 1016 |
+
self.focal = FocalLoss(
|
| 1017 |
+
include_background=include_background,
|
| 1018 |
+
to_onehot_y=to_onehot_y,
|
| 1019 |
+
gamma=gamma,
|
| 1020 |
+
weight=weight,
|
| 1021 |
+
reduction=reduction,
|
| 1022 |
+
)
|
| 1023 |
+
if lambda_gdl < 0.0:
|
| 1024 |
+
raise ValueError("lambda_gdl should be no less than 0.0.")
|
| 1025 |
+
if lambda_focal < 0.0:
|
| 1026 |
+
raise ValueError("lambda_focal should be no less than 0.0.")
|
| 1027 |
+
self.lambda_gdl = lambda_gdl
|
| 1028 |
+
self.lambda_focal = lambda_focal
|
| 1029 |
+
|
| 1030 |
+
def forward(self, input: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
|
| 1031 |
+
"""
|
| 1032 |
+
Args:
|
| 1033 |
+
input (torch.Tensor): the shape should be BNH[WD]. The input should be the original logits
|
| 1034 |
+
due to the restriction of ``monai.losses.FocalLoss``.
|
| 1035 |
+
target (torch.Tensor): the shape should be BNH[WD] or B1H[WD].
|
| 1036 |
+
|
| 1037 |
+
Raises:
|
| 1038 |
+
ValueError: When number of dimensions for input and target are different.
|
| 1039 |
+
ValueError: When number of channels for target is neither 1 (without one-hot encoding) nor the same as input.
|
| 1040 |
+
|
| 1041 |
+
Returns:
|
| 1042 |
+
torch.Tensor: value of the loss.
|
| 1043 |
+
"""
|
| 1044 |
+
if input.dim() != target.dim():
|
| 1045 |
+
raise ValueError(
|
| 1046 |
+
"the number of dimensions for input and target should be the same, "
|
| 1047 |
+
f"got shape {input.shape} (nb dims: {len(input.shape)}) and {target.shape} (nb dims: {len(target.shape)}). "
|
| 1048 |
+
"if target is not one-hot encoded, please provide a tensor with shape B1H[WD]."
|
| 1049 |
+
)
|
| 1050 |
+
|
| 1051 |
+
if target.shape[1] != 1 and target.shape[1] != input.shape[1]:
|
| 1052 |
+
raise ValueError(
|
| 1053 |
+
"number of channels for target is neither 1 (without one-hot encoding) nor the same as input, "
|
| 1054 |
+
f"got shape {input.shape} and {target.shape}."
|
| 1055 |
+
)
|
| 1056 |
+
|
| 1057 |
+
gdl_loss = self.generalized_dice(input, target)
|
| 1058 |
+
focal_loss = self.focal(input, target)
|
| 1059 |
+
total_loss: torch.Tensor = self.lambda_gdl * gdl_loss + self.lambda_focal * focal_loss
|
| 1060 |
+
return total_loss
|
| 1061 |
+
|
| 1062 |
+
|
| 1063 |
+
Dice = DiceLoss
|
| 1064 |
+
dice_ce = DiceCELoss
|
| 1065 |
+
dice_focal = DiceFocalLoss
|
| 1066 |
+
generalized_dice = GeneralizedDiceLoss
|
| 1067 |
+
generalized_dice_focal = GeneralizedDiceFocalLoss
|
| 1068 |
+
generalized_wasserstein_dice = GeneralizedWassersteinDiceLoss
|
source_code/SegMamba/monai/losses/ds_loss.py
ADDED
|
@@ -0,0 +1,88 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) MONAI Consortium
|
| 2 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 3 |
+
# you may not use this file except in compliance with the License.
|
| 4 |
+
# You may obtain a copy of the License at
|
| 5 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 6 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 7 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 8 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 9 |
+
# See the License for the specific language governing permissions and
|
| 10 |
+
# limitations under the License.
|
| 11 |
+
|
| 12 |
+
from __future__ import annotations
|
| 13 |
+
|
| 14 |
+
from typing import Union
|
| 15 |
+
|
| 16 |
+
import torch
|
| 17 |
+
import torch.nn.functional as F
|
| 18 |
+
from torch.nn.modules.loss import _Loss
|
| 19 |
+
|
| 20 |
+
from monai.utils import pytorch_after
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
class DeepSupervisionLoss(_Loss):
|
| 24 |
+
"""
|
| 25 |
+
Wrapper class around the main loss function to accept a list of tensors returned from a deeply
|
| 26 |
+
supervised networks. The final loss is computed as the sum of weighted losses for each of deep supervision levels.
|
| 27 |
+
"""
|
| 28 |
+
|
| 29 |
+
def __init__(self, loss: _Loss, weight_mode: str = "exp", weights: list[float] | None = None) -> None:
|
| 30 |
+
"""
|
| 31 |
+
Args:
|
| 32 |
+
loss: main loss instance, e.g DiceLoss().
|
| 33 |
+
weight_mode: {``"same"``, ``"exp"``, ``"two"``}
|
| 34 |
+
Specifies the weights calculation for each image level. Defaults to ``"exp"``.
|
| 35 |
+
- ``"same"``: all weights are equal to 1.
|
| 36 |
+
- ``"exp"``: exponentially decreasing weights by a power of 2: 0, 0.5, 0.25, 0.125, etc .
|
| 37 |
+
- ``"two"``: equal smaller weights for lower levels: 1, 0.5, 0.5, 0.5, 0.5, etc
|
| 38 |
+
weights: a list of weights to apply to each deeply supervised sub-loss, if provided, this will be used
|
| 39 |
+
regardless of the weight_mode
|
| 40 |
+
"""
|
| 41 |
+
super().__init__()
|
| 42 |
+
self.loss = loss
|
| 43 |
+
self.weight_mode = weight_mode
|
| 44 |
+
self.weights = weights
|
| 45 |
+
self.interp_mode = "nearest-exact" if pytorch_after(1, 11) else "nearest"
|
| 46 |
+
|
| 47 |
+
def get_weights(self, levels: int = 1) -> list[float]:
|
| 48 |
+
"""
|
| 49 |
+
Calculates weights for a given number of scale levels
|
| 50 |
+
"""
|
| 51 |
+
levels = max(1, levels)
|
| 52 |
+
if self.weights is not None and len(self.weights) >= levels:
|
| 53 |
+
weights = self.weights[:levels]
|
| 54 |
+
elif self.weight_mode == "same":
|
| 55 |
+
weights = [1.0] * levels
|
| 56 |
+
elif self.weight_mode == "exp":
|
| 57 |
+
weights = [max(0.5**l, 0.0625) for l in range(levels)]
|
| 58 |
+
elif self.weight_mode == "two":
|
| 59 |
+
weights = [1.0 if l == 0 else 0.5 for l in range(levels)]
|
| 60 |
+
else:
|
| 61 |
+
weights = [1.0] * levels
|
| 62 |
+
|
| 63 |
+
return weights
|
| 64 |
+
|
| 65 |
+
def get_loss(self, input: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
|
| 66 |
+
"""
|
| 67 |
+
Calculates a loss output accounting for differences in shapes,
|
| 68 |
+
and downsizing targets if necessary (using nearest neighbor interpolation)
|
| 69 |
+
Generally downsizing occurs for all level, except for the first (level==0)
|
| 70 |
+
"""
|
| 71 |
+
if input.shape[2:] != target.shape[2:]:
|
| 72 |
+
target = F.interpolate(target, size=input.shape[2:], mode=self.interp_mode)
|
| 73 |
+
return self.loss(input, target) # type: ignore[no-any-return]
|
| 74 |
+
|
| 75 |
+
def forward(self, input: Union[None, torch.Tensor, list[torch.Tensor]], target: torch.Tensor) -> torch.Tensor:
|
| 76 |
+
if isinstance(input, (list, tuple)):
|
| 77 |
+
weights = self.get_weights(levels=len(input))
|
| 78 |
+
loss = torch.tensor(0, dtype=torch.float, device=target.device)
|
| 79 |
+
for l in range(len(input)):
|
| 80 |
+
loss += weights[l] * self.get_loss(input[l].float(), target)
|
| 81 |
+
return loss
|
| 82 |
+
if input is None:
|
| 83 |
+
raise ValueError("input shouldn't be None.")
|
| 84 |
+
|
| 85 |
+
return self.loss(input.float(), target) # type: ignore[no-any-return]
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
ds_loss = DeepSupervisionLoss
|
source_code/SegMamba/monai/losses/focal_loss.py
ADDED
|
@@ -0,0 +1,255 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) MONAI Consortium
|
| 2 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 3 |
+
# you may not use this file except in compliance with the License.
|
| 4 |
+
# You may obtain a copy of the License at
|
| 5 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 6 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 7 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 8 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 9 |
+
# See the License for the specific language governing permissions and
|
| 10 |
+
# limitations under the License.
|
| 11 |
+
|
| 12 |
+
from __future__ import annotations
|
| 13 |
+
|
| 14 |
+
import warnings
|
| 15 |
+
from collections.abc import Sequence
|
| 16 |
+
from typing import Optional
|
| 17 |
+
|
| 18 |
+
import torch
|
| 19 |
+
import torch.nn.functional as F
|
| 20 |
+
from torch.nn.modules.loss import _Loss
|
| 21 |
+
|
| 22 |
+
from monai.networks import one_hot
|
| 23 |
+
from monai.utils import LossReduction
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
class FocalLoss(_Loss):
|
| 27 |
+
"""
|
| 28 |
+
FocalLoss is an extension of BCEWithLogitsLoss that down-weights loss from
|
| 29 |
+
high confidence correct predictions.
|
| 30 |
+
|
| 31 |
+
Reimplementation of the Focal Loss described in:
|
| 32 |
+
|
| 33 |
+
- ["Focal Loss for Dense Object Detection"](https://arxiv.org/abs/1708.02002), T. Lin et al., ICCV 2017
|
| 34 |
+
- "AnatomyNet: Deep learning for fast and fully automated whole-volume segmentation of head and neck anatomy",
|
| 35 |
+
Zhu et al., Medical Physics 2018
|
| 36 |
+
|
| 37 |
+
Example:
|
| 38 |
+
>>> import torch
|
| 39 |
+
>>> from monai.losses import FocalLoss
|
| 40 |
+
>>> from torch.nn import BCEWithLogitsLoss
|
| 41 |
+
>>> shape = B, N, *DIMS = 2, 3, 5, 7, 11
|
| 42 |
+
>>> input = torch.rand(*shape)
|
| 43 |
+
>>> target = torch.rand(*shape)
|
| 44 |
+
>>> # Demonstrate equivalence to BCE when gamma=0
|
| 45 |
+
>>> fl_g0_criterion = FocalLoss(reduction='none', gamma=0)
|
| 46 |
+
>>> fl_g0_loss = fl_g0_criterion(input, target)
|
| 47 |
+
>>> bce_criterion = BCEWithLogitsLoss(reduction='none')
|
| 48 |
+
>>> bce_loss = bce_criterion(input, target)
|
| 49 |
+
>>> assert torch.allclose(fl_g0_loss, bce_loss)
|
| 50 |
+
>>> # Demonstrate "focus" by setting gamma > 0.
|
| 51 |
+
>>> fl_g2_criterion = FocalLoss(reduction='none', gamma=2)
|
| 52 |
+
>>> fl_g2_loss = fl_g2_criterion(input, target)
|
| 53 |
+
>>> # Mark easy and hard cases
|
| 54 |
+
>>> is_easy = (target > 0.7) & (input > 0.7)
|
| 55 |
+
>>> is_hard = (target > 0.7) & (input < 0.3)
|
| 56 |
+
>>> easy_loss_g0 = fl_g0_loss[is_easy].mean()
|
| 57 |
+
>>> hard_loss_g0 = fl_g0_loss[is_hard].mean()
|
| 58 |
+
>>> easy_loss_g2 = fl_g2_loss[is_easy].mean()
|
| 59 |
+
>>> hard_loss_g2 = fl_g2_loss[is_hard].mean()
|
| 60 |
+
>>> # Gamma > 0 causes the loss function to "focus" on the hard
|
| 61 |
+
>>> # cases. IE, easy cases are downweighted, so hard cases
|
| 62 |
+
>>> # receive a higher proportion of the loss.
|
| 63 |
+
>>> hard_to_easy_ratio_g2 = hard_loss_g2 / easy_loss_g2
|
| 64 |
+
>>> hard_to_easy_ratio_g0 = hard_loss_g0 / easy_loss_g0
|
| 65 |
+
>>> assert hard_to_easy_ratio_g2 > hard_to_easy_ratio_g0
|
| 66 |
+
"""
|
| 67 |
+
|
| 68 |
+
def __init__(
|
| 69 |
+
self,
|
| 70 |
+
include_background: bool = True,
|
| 71 |
+
to_onehot_y: bool = False,
|
| 72 |
+
gamma: float = 2.0,
|
| 73 |
+
alpha: float | None = None,
|
| 74 |
+
weight: Sequence[float] | float | int | torch.Tensor | None = None,
|
| 75 |
+
reduction: LossReduction | str = LossReduction.MEAN,
|
| 76 |
+
use_softmax: bool = False,
|
| 77 |
+
) -> None:
|
| 78 |
+
"""
|
| 79 |
+
Args:
|
| 80 |
+
include_background: if False, channel index 0 (background category) is excluded from the loss calculation.
|
| 81 |
+
If False, `alpha` is invalid when using softmax.
|
| 82 |
+
to_onehot_y: whether to convert the label `y` into the one-hot format. Defaults to False.
|
| 83 |
+
gamma: value of the exponent gamma in the definition of the Focal loss. Defaults to 2.
|
| 84 |
+
alpha: value of the alpha in the definition of the alpha-balanced Focal loss.
|
| 85 |
+
The value should be in [0, 1]. Defaults to None.
|
| 86 |
+
weight: weights to apply to the voxels of each class. If None no weights are applied.
|
| 87 |
+
The input can be a single value (same weight for all classes), a sequence of values (the length
|
| 88 |
+
of the sequence should be the same as the number of classes. If not ``include_background``,
|
| 89 |
+
the number of classes should not include the background category class 0).
|
| 90 |
+
The value/values should be no less than 0. Defaults to None.
|
| 91 |
+
reduction: {``"none"``, ``"mean"``, ``"sum"``}
|
| 92 |
+
Specifies the reduction to apply to the output. Defaults to ``"mean"``.
|
| 93 |
+
|
| 94 |
+
- ``"none"``: no reduction will be applied.
|
| 95 |
+
- ``"mean"``: the sum of the output will be divided by the number of elements in the output.
|
| 96 |
+
- ``"sum"``: the output will be summed.
|
| 97 |
+
|
| 98 |
+
use_softmax: whether to use softmax to transform the original logits into probabilities.
|
| 99 |
+
If True, softmax is used. If False, sigmoid is used. Defaults to False.
|
| 100 |
+
|
| 101 |
+
Example:
|
| 102 |
+
>>> import torch
|
| 103 |
+
>>> from monai.losses import FocalLoss
|
| 104 |
+
>>> pred = torch.tensor([[1, 0], [0, 1], [1, 0]], dtype=torch.float32)
|
| 105 |
+
>>> grnd = torch.tensor([[0], [1], [0]], dtype=torch.int64)
|
| 106 |
+
>>> fl = FocalLoss(to_onehot_y=True)
|
| 107 |
+
>>> fl(pred, grnd)
|
| 108 |
+
"""
|
| 109 |
+
super().__init__(reduction=LossReduction(reduction).value)
|
| 110 |
+
self.include_background = include_background
|
| 111 |
+
self.to_onehot_y = to_onehot_y
|
| 112 |
+
self.gamma = gamma
|
| 113 |
+
self.alpha = alpha
|
| 114 |
+
self.weight = weight
|
| 115 |
+
self.use_softmax = use_softmax
|
| 116 |
+
weight = torch.as_tensor(weight) if weight is not None else None
|
| 117 |
+
self.register_buffer("class_weight", weight)
|
| 118 |
+
self.class_weight: None | torch.Tensor
|
| 119 |
+
|
| 120 |
+
def forward(self, input: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
|
| 121 |
+
"""
|
| 122 |
+
Args:
|
| 123 |
+
input: the shape should be BNH[WD], where N is the number of classes.
|
| 124 |
+
The input should be the original logits since it will be transformed by
|
| 125 |
+
a sigmoid/softmax in the forward function.
|
| 126 |
+
target: the shape should be BNH[WD] or B1H[WD], where N is the number of classes.
|
| 127 |
+
|
| 128 |
+
Raises:
|
| 129 |
+
ValueError: When input and target (after one hot transform if set)
|
| 130 |
+
have different shapes.
|
| 131 |
+
ValueError: When ``self.reduction`` is not one of ["mean", "sum", "none"].
|
| 132 |
+
ValueError: When ``self.weight`` is a sequence and the length is not equal to the
|
| 133 |
+
number of classes.
|
| 134 |
+
ValueError: When ``self.weight`` is/contains a value that is less than 0.
|
| 135 |
+
|
| 136 |
+
"""
|
| 137 |
+
n_pred_ch = input.shape[1]
|
| 138 |
+
|
| 139 |
+
if self.to_onehot_y:
|
| 140 |
+
if n_pred_ch == 1:
|
| 141 |
+
warnings.warn("single channel prediction, `to_onehot_y=True` ignored.")
|
| 142 |
+
else:
|
| 143 |
+
target = one_hot(target, num_classes=n_pred_ch)
|
| 144 |
+
|
| 145 |
+
if not self.include_background:
|
| 146 |
+
if n_pred_ch == 1:
|
| 147 |
+
warnings.warn("single channel prediction, `include_background=False` ignored.")
|
| 148 |
+
else:
|
| 149 |
+
# if skipping background, removing first channel
|
| 150 |
+
target = target[:, 1:]
|
| 151 |
+
input = input[:, 1:]
|
| 152 |
+
|
| 153 |
+
if target.shape != input.shape:
|
| 154 |
+
raise ValueError(f"ground truth has different shape ({target.shape}) from input ({input.shape})")
|
| 155 |
+
|
| 156 |
+
loss: Optional[torch.Tensor] = None
|
| 157 |
+
input = input.float()
|
| 158 |
+
target = target.float()
|
| 159 |
+
if self.use_softmax:
|
| 160 |
+
if not self.include_background and self.alpha is not None:
|
| 161 |
+
self.alpha = None
|
| 162 |
+
warnings.warn("`include_background=False`, `alpha` ignored when using softmax.")
|
| 163 |
+
loss = softmax_focal_loss(input, target, self.gamma, self.alpha)
|
| 164 |
+
else:
|
| 165 |
+
loss = sigmoid_focal_loss(input, target, self.gamma, self.alpha)
|
| 166 |
+
|
| 167 |
+
num_of_classes = target.shape[1]
|
| 168 |
+
if self.class_weight is not None and num_of_classes != 1:
|
| 169 |
+
# make sure the lengths of weights are equal to the number of classes
|
| 170 |
+
if self.class_weight.ndim == 0:
|
| 171 |
+
self.class_weight = torch.as_tensor([self.class_weight] * num_of_classes)
|
| 172 |
+
else:
|
| 173 |
+
if self.class_weight.shape[0] != num_of_classes:
|
| 174 |
+
raise ValueError(
|
| 175 |
+
"""the length of the `weight` sequence should be the same as the number of classes.
|
| 176 |
+
If `include_background=False`, the weight should not include
|
| 177 |
+
the background category class 0."""
|
| 178 |
+
)
|
| 179 |
+
if self.class_weight.min() < 0:
|
| 180 |
+
raise ValueError("the value/values of the `weight` should be no less than 0.")
|
| 181 |
+
# apply class_weight to loss
|
| 182 |
+
self.class_weight = self.class_weight.to(loss)
|
| 183 |
+
broadcast_dims = [-1] + [1] * len(target.shape[2:])
|
| 184 |
+
self.class_weight = self.class_weight.view(broadcast_dims)
|
| 185 |
+
loss = self.class_weight * loss
|
| 186 |
+
|
| 187 |
+
if self.reduction == LossReduction.SUM.value:
|
| 188 |
+
# Previously there was a mean over the last dimension, which did not
|
| 189 |
+
# return a compatible BCE loss. To maintain backwards compatible
|
| 190 |
+
# behavior we have a flag that performs this extra step, disable or
|
| 191 |
+
# parameterize if necessary. (Or justify why the mean should be there)
|
| 192 |
+
average_spatial_dims = True
|
| 193 |
+
if average_spatial_dims:
|
| 194 |
+
loss = loss.mean(dim=list(range(2, len(target.shape))))
|
| 195 |
+
loss = loss.sum()
|
| 196 |
+
elif self.reduction == LossReduction.MEAN.value:
|
| 197 |
+
loss = loss.mean()
|
| 198 |
+
elif self.reduction == LossReduction.NONE.value:
|
| 199 |
+
pass
|
| 200 |
+
else:
|
| 201 |
+
raise ValueError(f'Unsupported reduction: {self.reduction}, available options are ["mean", "sum", "none"].')
|
| 202 |
+
return loss
|
| 203 |
+
|
| 204 |
+
|
| 205 |
+
def softmax_focal_loss(
|
| 206 |
+
input: torch.Tensor, target: torch.Tensor, gamma: float = 2.0, alpha: Optional[float] = None
|
| 207 |
+
) -> torch.Tensor:
|
| 208 |
+
"""
|
| 209 |
+
FL(pt) = -alpha * (1 - pt)**gamma * log(pt)
|
| 210 |
+
|
| 211 |
+
where p_i = exp(s_i) / sum_j exp(s_j), t is the target (ground truth) class, and
|
| 212 |
+
s_j is the unnormalized score for class j.
|
| 213 |
+
"""
|
| 214 |
+
input_ls = input.log_softmax(1)
|
| 215 |
+
loss: torch.Tensor = -(1 - input_ls.exp()).pow(gamma) * input_ls * target
|
| 216 |
+
|
| 217 |
+
if alpha is not None:
|
| 218 |
+
# (1-alpha) for the background class and alpha for the other classes
|
| 219 |
+
alpha_fac = torch.tensor([1 - alpha] + [alpha] * (target.shape[1] - 1)).to(loss)
|
| 220 |
+
broadcast_dims = [-1] + [1] * len(target.shape[2:])
|
| 221 |
+
alpha_fac = alpha_fac.view(broadcast_dims)
|
| 222 |
+
loss = alpha_fac * loss
|
| 223 |
+
|
| 224 |
+
return loss
|
| 225 |
+
|
| 226 |
+
|
| 227 |
+
def sigmoid_focal_loss(
|
| 228 |
+
input: torch.Tensor, target: torch.Tensor, gamma: float = 2.0, alpha: Optional[float] = None
|
| 229 |
+
) -> torch.Tensor:
|
| 230 |
+
"""
|
| 231 |
+
FL(pt) = -alpha * (1 - pt)**gamma * log(pt)
|
| 232 |
+
|
| 233 |
+
where p = sigmoid(x), pt = p if label is 1 or 1 - p if label is 0
|
| 234 |
+
"""
|
| 235 |
+
# computing binary cross entropy with logits
|
| 236 |
+
# equivalent to F.binary_cross_entropy_with_logits(input, target, reduction='none')
|
| 237 |
+
# see also https://github.com/pytorch/pytorch/blob/main/aten/src/ATen/native/Loss.cpp#L363
|
| 238 |
+
loss: torch.Tensor = input - input * target - F.logsigmoid(input)
|
| 239 |
+
|
| 240 |
+
# sigmoid(-i) if t==1; sigmoid(i) if t==0 <=>
|
| 241 |
+
# 1-sigmoid(i) if t==1; sigmoid(i) if t==0 <=>
|
| 242 |
+
# 1-p if t==1; p if t==0 <=>
|
| 243 |
+
# pfac, that is, the term (1 - pt)
|
| 244 |
+
invprobs = F.logsigmoid(-input * (target * 2 - 1)) # reduced chance of overflow
|
| 245 |
+
# (pfac.log() * gamma).exp() <=>
|
| 246 |
+
# pfac.log().exp() ^ gamma <=>
|
| 247 |
+
# pfac ^ gamma
|
| 248 |
+
loss = (invprobs * gamma).exp() * loss
|
| 249 |
+
|
| 250 |
+
if alpha is not None:
|
| 251 |
+
# alpha if t==1; (1-alpha) if t==0
|
| 252 |
+
alpha_factor = target * alpha + (1 - target) * (1 - alpha)
|
| 253 |
+
loss = alpha_factor * loss
|
| 254 |
+
|
| 255 |
+
return loss
|
source_code/SegMamba/monai/losses/hausdorff_loss.py
ADDED
|
@@ -0,0 +1,242 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) MONAI Consortium
|
| 2 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 3 |
+
# you may not use this file except in compliance with the License.
|
| 4 |
+
# You may obtain a copy of the License at
|
| 5 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 6 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 7 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 8 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 9 |
+
# See the License for the specific language governing permissions and
|
| 10 |
+
# limitations under the License.
|
| 11 |
+
|
| 12 |
+
# Hausdorff loss implementation based on paper:
|
| 13 |
+
# https://arxiv.org/pdf/1904.10030.pdf
|
| 14 |
+
|
| 15 |
+
# Repo: https://github.com/PatRyg99/HausdorffLoss
|
| 16 |
+
|
| 17 |
+
from __future__ import annotations
|
| 18 |
+
|
| 19 |
+
import warnings
|
| 20 |
+
from typing import Callable
|
| 21 |
+
|
| 22 |
+
import torch
|
| 23 |
+
from torch.nn.modules.loss import _Loss
|
| 24 |
+
|
| 25 |
+
from monai.networks import one_hot
|
| 26 |
+
from monai.transforms.utils import distance_transform_edt
|
| 27 |
+
from monai.utils import LossReduction
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
class HausdorffDTLoss(_Loss):
|
| 31 |
+
"""
|
| 32 |
+
Compute channel-wise binary Hausdorff loss based on distance transform. It can support both multi-classes and
|
| 33 |
+
multi-labels tasks. The data `input` (BNHW[D] where N is number of classes) is compared with ground truth `target`
|
| 34 |
+
(BNHW[D]).
|
| 35 |
+
|
| 36 |
+
Note that axis N of `input` is expected to be logits or probabilities for each class, if passing logits as input,
|
| 37 |
+
must set `sigmoid=True` or `softmax=True`, or specifying `other_act`. And the same axis of `target`
|
| 38 |
+
can be 1 or N (one-hot format).
|
| 39 |
+
|
| 40 |
+
The original paper: Karimi, D. et. al. (2019) Reducing the Hausdorff Distance in Medical Image Segmentation with
|
| 41 |
+
Convolutional Neural Networks, IEEE Transactions on medical imaging, 39(2), 499-513
|
| 42 |
+
"""
|
| 43 |
+
|
| 44 |
+
def __init__(
|
| 45 |
+
self,
|
| 46 |
+
alpha: float = 2.0,
|
| 47 |
+
include_background: bool = False,
|
| 48 |
+
to_onehot_y: bool = False,
|
| 49 |
+
sigmoid: bool = False,
|
| 50 |
+
softmax: bool = False,
|
| 51 |
+
other_act: Callable | None = None,
|
| 52 |
+
reduction: LossReduction | str = LossReduction.MEAN,
|
| 53 |
+
batch: bool = False,
|
| 54 |
+
) -> None:
|
| 55 |
+
"""
|
| 56 |
+
Args:
|
| 57 |
+
include_background: if False, channel index 0 (background category) is excluded from the calculation.
|
| 58 |
+
if the non-background segmentations are small compared to the total image size they can get overwhelmed
|
| 59 |
+
by the signal from the background so excluding it in such cases helps convergence.
|
| 60 |
+
to_onehot_y: whether to convert the ``target`` into the one-hot format,
|
| 61 |
+
using the number of classes inferred from `input` (``input.shape[1]``). Defaults to False.
|
| 62 |
+
sigmoid: if True, apply a sigmoid function to the prediction.
|
| 63 |
+
softmax: if True, apply a softmax function to the prediction.
|
| 64 |
+
other_act: callable function to execute other activation layers, Defaults to ``None``. for example:
|
| 65 |
+
``other_act = torch.tanh``.
|
| 66 |
+
reduction: {``"none"``, ``"mean"``, ``"sum"``}
|
| 67 |
+
Specifies the reduction to apply to the output. Defaults to ``"mean"``.
|
| 68 |
+
|
| 69 |
+
- ``"none"``: no reduction will be applied.
|
| 70 |
+
- ``"mean"``: the sum of the output will be divided by the number of elements in the output.
|
| 71 |
+
- ``"sum"``: the output will be summed.
|
| 72 |
+
batch: whether to sum the intersection and union areas over the batch dimension before the dividing.
|
| 73 |
+
Defaults to False, a loss value is computed independently from each item in the batch
|
| 74 |
+
before any `reduction`.
|
| 75 |
+
|
| 76 |
+
Raises:
|
| 77 |
+
TypeError: When ``other_act`` is not an ``Optional[Callable]``.
|
| 78 |
+
ValueError: When more than 1 of [``sigmoid=True``, ``softmax=True``, ``other_act is not None``].
|
| 79 |
+
Incompatible values.
|
| 80 |
+
|
| 81 |
+
"""
|
| 82 |
+
super(HausdorffDTLoss, self).__init__(reduction=LossReduction(reduction).value)
|
| 83 |
+
if other_act is not None and not callable(other_act):
|
| 84 |
+
raise TypeError(f"other_act must be None or callable but is {type(other_act).__name__}.")
|
| 85 |
+
if int(sigmoid) + int(softmax) > 1:
|
| 86 |
+
raise ValueError("Incompatible values: more than 1 of [sigmoid=True, softmax=True, other_act is not None].")
|
| 87 |
+
|
| 88 |
+
self.alpha = alpha
|
| 89 |
+
self.include_background = include_background
|
| 90 |
+
self.to_onehot_y = to_onehot_y
|
| 91 |
+
self.sigmoid = sigmoid
|
| 92 |
+
self.softmax = softmax
|
| 93 |
+
self.other_act = other_act
|
| 94 |
+
self.batch = batch
|
| 95 |
+
|
| 96 |
+
@torch.no_grad()
|
| 97 |
+
def distance_field(self, img: torch.Tensor) -> torch.Tensor:
|
| 98 |
+
"""Generate distance transform.
|
| 99 |
+
|
| 100 |
+
Args:
|
| 101 |
+
img (np.ndarray): input mask as NCHWD or NCHW.
|
| 102 |
+
|
| 103 |
+
Returns:
|
| 104 |
+
np.ndarray: Distance field.
|
| 105 |
+
"""
|
| 106 |
+
field = torch.zeros_like(img)
|
| 107 |
+
|
| 108 |
+
for batch_idx in range(len(img)):
|
| 109 |
+
fg_mask = img[batch_idx] > 0.5
|
| 110 |
+
|
| 111 |
+
# For cases where the mask is entirely background or entirely foreground
|
| 112 |
+
# the distance transform is not well defined for all 1s,
|
| 113 |
+
# which always would happen on either foreground or background, so skip
|
| 114 |
+
if fg_mask.any() and not fg_mask.all():
|
| 115 |
+
fg_dist: torch.Tensor = distance_transform_edt(fg_mask) # type: ignore
|
| 116 |
+
bg_mask = ~fg_mask
|
| 117 |
+
bg_dist: torch.Tensor = distance_transform_edt(bg_mask) # type: ignore
|
| 118 |
+
|
| 119 |
+
field[batch_idx] = fg_dist + bg_dist
|
| 120 |
+
|
| 121 |
+
return field
|
| 122 |
+
|
| 123 |
+
def forward(self, input: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
|
| 124 |
+
"""
|
| 125 |
+
Args:
|
| 126 |
+
input: the shape should be BNHW[D], where N is the number of classes.
|
| 127 |
+
target: the shape should be BNHW[D] or B1HW[D], where N is the number of classes.
|
| 128 |
+
|
| 129 |
+
Raises:
|
| 130 |
+
ValueError: If the input is not 2D (NCHW) or 3D (NCHWD).
|
| 131 |
+
AssertionError: When input and target (after one hot transform if set)
|
| 132 |
+
have different shapes.
|
| 133 |
+
ValueError: When ``self.reduction`` is not one of ["mean", "sum", "none"].
|
| 134 |
+
|
| 135 |
+
Example:
|
| 136 |
+
>>> import torch
|
| 137 |
+
>>> from monai.losses.hausdorff_loss import HausdorffDTLoss
|
| 138 |
+
>>> from monai.networks.utils import one_hot
|
| 139 |
+
>>> B, C, H, W = 7, 5, 3, 2
|
| 140 |
+
>>> input = torch.rand(B, C, H, W)
|
| 141 |
+
>>> target_idx = torch.randint(low=0, high=C - 1, size=(B, H, W)).long()
|
| 142 |
+
>>> target = one_hot(target_idx[:, None, ...], num_classes=C)
|
| 143 |
+
>>> self = HausdorffDTLoss(reduction='none')
|
| 144 |
+
>>> loss = self(input, target)
|
| 145 |
+
>>> assert np.broadcast_shapes(loss.shape, input.shape) == input.shape
|
| 146 |
+
"""
|
| 147 |
+
if input.dim() != 4 and input.dim() != 5:
|
| 148 |
+
raise ValueError("Only 2D (NCHW) and 3D (NCHWD) supported")
|
| 149 |
+
|
| 150 |
+
if self.sigmoid:
|
| 151 |
+
input = torch.sigmoid(input)
|
| 152 |
+
|
| 153 |
+
n_pred_ch = input.shape[1]
|
| 154 |
+
if self.softmax:
|
| 155 |
+
if n_pred_ch == 1:
|
| 156 |
+
warnings.warn("single channel prediction, `softmax=True` ignored.")
|
| 157 |
+
else:
|
| 158 |
+
input = torch.softmax(input, 1)
|
| 159 |
+
|
| 160 |
+
if self.other_act is not None:
|
| 161 |
+
input = self.other_act(input)
|
| 162 |
+
|
| 163 |
+
if self.to_onehot_y:
|
| 164 |
+
if n_pred_ch == 1:
|
| 165 |
+
warnings.warn("single channel prediction, `to_onehot_y=True` ignored.")
|
| 166 |
+
else:
|
| 167 |
+
target = one_hot(target, num_classes=n_pred_ch)
|
| 168 |
+
|
| 169 |
+
if not self.include_background:
|
| 170 |
+
if n_pred_ch == 1:
|
| 171 |
+
warnings.warn("single channel prediction, `include_background=False` ignored.")
|
| 172 |
+
else:
|
| 173 |
+
# If skipping background, removing first channel
|
| 174 |
+
target = target[:, 1:]
|
| 175 |
+
input = input[:, 1:]
|
| 176 |
+
|
| 177 |
+
if target.shape != input.shape:
|
| 178 |
+
raise AssertionError(f"ground truth has different shape ({target.shape}) from input ({input.shape})")
|
| 179 |
+
|
| 180 |
+
device = input.device
|
| 181 |
+
all_f = []
|
| 182 |
+
for i in range(input.shape[1]):
|
| 183 |
+
ch_input = input[:, [i]]
|
| 184 |
+
ch_target = target[:, [i]]
|
| 185 |
+
pred_dt = self.distance_field(ch_input.detach()).float()
|
| 186 |
+
target_dt = self.distance_field(ch_target.detach()).float()
|
| 187 |
+
|
| 188 |
+
pred_error = (ch_input - ch_target) ** 2
|
| 189 |
+
distance = pred_dt**self.alpha + target_dt**self.alpha
|
| 190 |
+
|
| 191 |
+
running_f = pred_error * distance.to(device)
|
| 192 |
+
reduce_axis: list[int] = torch.arange(2, len(input.shape)).tolist()
|
| 193 |
+
if self.batch:
|
| 194 |
+
# reducing spatial dimensions and batch
|
| 195 |
+
reduce_axis = [0] + reduce_axis
|
| 196 |
+
all_f.append(running_f.mean(dim=reduce_axis, keepdim=True))
|
| 197 |
+
f = torch.cat(all_f, dim=1)
|
| 198 |
+
if self.reduction == LossReduction.MEAN.value:
|
| 199 |
+
f = torch.mean(f) # the batch and channel average
|
| 200 |
+
elif self.reduction == LossReduction.SUM.value:
|
| 201 |
+
f = torch.sum(f) # sum over the batch and channel dims
|
| 202 |
+
elif self.reduction == LossReduction.NONE.value:
|
| 203 |
+
# If we are not computing voxelwise loss components at least make sure a none reduction maintains a
|
| 204 |
+
# broadcastable shape
|
| 205 |
+
broadcast_shape = list(f.shape[0:2]) + [1] * (len(ch_input.shape) - 2)
|
| 206 |
+
f = f.view(broadcast_shape)
|
| 207 |
+
else:
|
| 208 |
+
raise ValueError(f'Unsupported reduction: {self.reduction}, available options are ["mean", "sum", "none"].')
|
| 209 |
+
|
| 210 |
+
return f
|
| 211 |
+
|
| 212 |
+
|
| 213 |
+
class LogHausdorffDTLoss(HausdorffDTLoss):
|
| 214 |
+
"""
|
| 215 |
+
Compute the logarithm of the Hausdorff Distance Transform Loss.
|
| 216 |
+
|
| 217 |
+
This class computes the logarithm of the Hausdorff Distance Transform Loss, which is based on the distance transform.
|
| 218 |
+
The logarithm is computed to potentially stabilize and scale the loss values, especially when the original loss
|
| 219 |
+
values are very small.
|
| 220 |
+
|
| 221 |
+
The formula for the loss is given by:
|
| 222 |
+
log_loss = log(HausdorffDTLoss + 1)
|
| 223 |
+
|
| 224 |
+
Inherits from the HausdorffDTLoss class to utilize its distance transform computation.
|
| 225 |
+
"""
|
| 226 |
+
|
| 227 |
+
def forward(self, input: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
|
| 228 |
+
"""
|
| 229 |
+
Compute the logarithm of the Hausdorff Distance Transform Loss.
|
| 230 |
+
|
| 231 |
+
Args:
|
| 232 |
+
input (torch.Tensor): The shape should be BNHW[D], where N is the number of classes.
|
| 233 |
+
target (torch.Tensor): The shape should be BNHW[D] or B1HW[D], where N is the number of classes.
|
| 234 |
+
|
| 235 |
+
Returns:
|
| 236 |
+
torch.Tensor: The computed Log Hausdorff Distance Transform Loss for the given input and target.
|
| 237 |
+
|
| 238 |
+
Raises:
|
| 239 |
+
Any exceptions raised by the parent class HausdorffDTLoss.
|
| 240 |
+
"""
|
| 241 |
+
log_loss: torch.Tensor = torch.log(super().forward(input, target) + 1)
|
| 242 |
+
return log_loss
|
source_code/SegMamba/monai/losses/image_dissimilarity.py
ADDED
|
@@ -0,0 +1,329 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) MONAI Consortium
|
| 2 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 3 |
+
# you may not use this file except in compliance with the License.
|
| 4 |
+
# You may obtain a copy of the License at
|
| 5 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 6 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 7 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 8 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 9 |
+
# See the License for the specific language governing permissions and
|
| 10 |
+
# limitations under the License.
|
| 11 |
+
|
| 12 |
+
from __future__ import annotations
|
| 13 |
+
|
| 14 |
+
import torch
|
| 15 |
+
from torch.nn import functional as F
|
| 16 |
+
from torch.nn.modules.loss import _Loss
|
| 17 |
+
|
| 18 |
+
from monai.networks.layers import gaussian_1d, separable_filtering
|
| 19 |
+
from monai.utils import LossReduction
|
| 20 |
+
from monai.utils.module import look_up_option
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
def make_rectangular_kernel(kernel_size: int) -> torch.Tensor:
|
| 24 |
+
return torch.ones(kernel_size)
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
def make_triangular_kernel(kernel_size: int) -> torch.Tensor:
|
| 28 |
+
fsize = (kernel_size + 1) // 2
|
| 29 |
+
if fsize % 2 == 0:
|
| 30 |
+
fsize -= 1
|
| 31 |
+
f = torch.ones((1, 1, fsize), dtype=torch.float).div(fsize)
|
| 32 |
+
padding = (kernel_size - fsize) // 2 + fsize // 2
|
| 33 |
+
return F.conv1d(f, f, padding=padding).reshape(-1)
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
def make_gaussian_kernel(kernel_size: int) -> torch.Tensor:
|
| 37 |
+
sigma = torch.tensor(kernel_size / 3.0)
|
| 38 |
+
kernel = gaussian_1d(sigma=sigma, truncated=kernel_size // 2, approx="sampled", normalize=False) * (
|
| 39 |
+
2.5066282 * sigma
|
| 40 |
+
)
|
| 41 |
+
return kernel[:kernel_size]
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
kernel_dict = {
|
| 45 |
+
"rectangular": make_rectangular_kernel,
|
| 46 |
+
"triangular": make_triangular_kernel,
|
| 47 |
+
"gaussian": make_gaussian_kernel,
|
| 48 |
+
}
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
class LocalNormalizedCrossCorrelationLoss(_Loss):
|
| 52 |
+
"""
|
| 53 |
+
Local squared zero-normalized cross-correlation.
|
| 54 |
+
The loss is based on a moving kernel/window over the y_true/y_pred,
|
| 55 |
+
within the window the square of zncc is calculated.
|
| 56 |
+
The kernel can be a rectangular / triangular / gaussian window.
|
| 57 |
+
The final loss is the averaged loss over all windows.
|
| 58 |
+
|
| 59 |
+
Adapted from:
|
| 60 |
+
https://github.com/voxelmorph/voxelmorph/blob/legacy/src/losses.py
|
| 61 |
+
DeepReg (https://github.com/DeepRegNet/DeepReg)
|
| 62 |
+
"""
|
| 63 |
+
|
| 64 |
+
def __init__(
|
| 65 |
+
self,
|
| 66 |
+
spatial_dims: int = 3,
|
| 67 |
+
kernel_size: int = 3,
|
| 68 |
+
kernel_type: str = "rectangular",
|
| 69 |
+
reduction: LossReduction | str = LossReduction.MEAN,
|
| 70 |
+
smooth_nr: float = 0.0,
|
| 71 |
+
smooth_dr: float = 1e-5,
|
| 72 |
+
) -> None:
|
| 73 |
+
"""
|
| 74 |
+
Args:
|
| 75 |
+
spatial_dims: number of spatial dimensions, {``1``, ``2``, ``3``}. Defaults to 3.
|
| 76 |
+
kernel_size: kernel spatial size, must be odd.
|
| 77 |
+
kernel_type: {``"rectangular"``, ``"triangular"``, ``"gaussian"``}. Defaults to ``"rectangular"``.
|
| 78 |
+
reduction: {``"none"``, ``"mean"``, ``"sum"``}
|
| 79 |
+
Specifies the reduction to apply to the output. Defaults to ``"mean"``.
|
| 80 |
+
|
| 81 |
+
- ``"none"``: no reduction will be applied.
|
| 82 |
+
- ``"mean"``: the sum of the output will be divided by the number of elements in the output.
|
| 83 |
+
- ``"sum"``: the output will be summed.
|
| 84 |
+
smooth_nr: a small constant added to the numerator to avoid nan.
|
| 85 |
+
smooth_dr: a small constant added to the denominator to avoid nan.
|
| 86 |
+
|
| 87 |
+
"""
|
| 88 |
+
super().__init__(reduction=LossReduction(reduction).value)
|
| 89 |
+
|
| 90 |
+
self.ndim = spatial_dims
|
| 91 |
+
if self.ndim not in {1, 2, 3}:
|
| 92 |
+
raise ValueError(f"Unsupported ndim: {self.ndim}-d, only 1-d, 2-d, and 3-d inputs are supported")
|
| 93 |
+
|
| 94 |
+
self.kernel_size = kernel_size
|
| 95 |
+
if self.kernel_size % 2 == 0:
|
| 96 |
+
raise ValueError(f"kernel_size must be odd, got {self.kernel_size}")
|
| 97 |
+
|
| 98 |
+
_kernel = look_up_option(kernel_type, kernel_dict)
|
| 99 |
+
self.kernel = _kernel(self.kernel_size)
|
| 100 |
+
self.kernel.require_grads = False
|
| 101 |
+
self.kernel_vol = self.get_kernel_vol()
|
| 102 |
+
|
| 103 |
+
self.smooth_nr = float(smooth_nr)
|
| 104 |
+
self.smooth_dr = float(smooth_dr)
|
| 105 |
+
|
| 106 |
+
def get_kernel_vol(self):
|
| 107 |
+
vol = self.kernel
|
| 108 |
+
for _ in range(self.ndim - 1):
|
| 109 |
+
vol = torch.matmul(vol.unsqueeze(-1), self.kernel.unsqueeze(0))
|
| 110 |
+
return torch.sum(vol)
|
| 111 |
+
|
| 112 |
+
def forward(self, pred: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
|
| 113 |
+
"""
|
| 114 |
+
Args:
|
| 115 |
+
pred: the shape should be BNH[WD].
|
| 116 |
+
target: the shape should be BNH[WD].
|
| 117 |
+
Raises:
|
| 118 |
+
ValueError: When ``self.reduction`` is not one of ["mean", "sum", "none"].
|
| 119 |
+
"""
|
| 120 |
+
if pred.ndim - 2 != self.ndim:
|
| 121 |
+
raise ValueError(f"expecting pred with {self.ndim} spatial dimensions, got pred of shape {pred.shape}")
|
| 122 |
+
if target.shape != pred.shape:
|
| 123 |
+
raise ValueError(f"ground truth has differing shape ({target.shape}) from pred ({pred.shape})")
|
| 124 |
+
|
| 125 |
+
t2, p2, tp = target * target, pred * pred, target * pred
|
| 126 |
+
kernel, kernel_vol = self.kernel.to(pred), self.kernel_vol.to(pred)
|
| 127 |
+
kernels = [kernel] * self.ndim
|
| 128 |
+
# sum over kernel
|
| 129 |
+
t_sum = separable_filtering(target, kernels=kernels)
|
| 130 |
+
p_sum = separable_filtering(pred, kernels=kernels)
|
| 131 |
+
t2_sum = separable_filtering(t2, kernels=kernels)
|
| 132 |
+
p2_sum = separable_filtering(p2, kernels=kernels)
|
| 133 |
+
tp_sum = separable_filtering(tp, kernels=kernels)
|
| 134 |
+
|
| 135 |
+
# average over kernel
|
| 136 |
+
t_avg = t_sum / kernel_vol
|
| 137 |
+
p_avg = p_sum / kernel_vol
|
| 138 |
+
|
| 139 |
+
# normalized cross correlation between t and p
|
| 140 |
+
# sum[(t - mean[t]) * (p - mean[p])] / std[t] / std[p]
|
| 141 |
+
# denoted by num / denom
|
| 142 |
+
# assume we sum over N values
|
| 143 |
+
# num = sum[t * p - mean[t] * p - t * mean[p] + mean[t] * mean[p]]
|
| 144 |
+
# = sum[t*p] - sum[t] * sum[p] / N * 2 + sum[t] * sum[p] / N
|
| 145 |
+
# = sum[t*p] - sum[t] * sum[p] / N
|
| 146 |
+
# = sum[t*p] - sum[t] * mean[p] = cross
|
| 147 |
+
# the following is actually squared ncc
|
| 148 |
+
cross = tp_sum - p_avg * t_sum
|
| 149 |
+
t_var = torch.max(
|
| 150 |
+
t2_sum - t_avg * t_sum, torch.as_tensor(self.smooth_dr, dtype=t2_sum.dtype, device=t2_sum.device)
|
| 151 |
+
)
|
| 152 |
+
p_var = torch.max(
|
| 153 |
+
p2_sum - p_avg * p_sum, torch.as_tensor(self.smooth_dr, dtype=p2_sum.dtype, device=p2_sum.device)
|
| 154 |
+
)
|
| 155 |
+
ncc: torch.Tensor = (cross * cross + self.smooth_nr) / (t_var * p_var)
|
| 156 |
+
|
| 157 |
+
if self.reduction == LossReduction.SUM.value:
|
| 158 |
+
return torch.sum(ncc).neg() # sum over the batch, channel and spatial ndims
|
| 159 |
+
if self.reduction == LossReduction.NONE.value:
|
| 160 |
+
return ncc.neg()
|
| 161 |
+
if self.reduction == LossReduction.MEAN.value:
|
| 162 |
+
return torch.mean(ncc).neg() # average over the batch, channel and spatial ndims
|
| 163 |
+
raise ValueError(f'Unsupported reduction: {self.reduction}, available options are ["mean", "sum", "none"].')
|
| 164 |
+
|
| 165 |
+
|
| 166 |
+
class GlobalMutualInformationLoss(_Loss):
|
| 167 |
+
"""
|
| 168 |
+
Differentiable global mutual information loss via Parzen windowing method.
|
| 169 |
+
|
| 170 |
+
Reference:
|
| 171 |
+
https://dspace.mit.edu/handle/1721.1/123142, Section 3.1, equation 3.1-3.5, Algorithm 1
|
| 172 |
+
"""
|
| 173 |
+
|
| 174 |
+
def __init__(
|
| 175 |
+
self,
|
| 176 |
+
kernel_type: str = "gaussian",
|
| 177 |
+
num_bins: int = 23,
|
| 178 |
+
sigma_ratio: float = 0.5,
|
| 179 |
+
reduction: LossReduction | str = LossReduction.MEAN,
|
| 180 |
+
smooth_nr: float = 1e-7,
|
| 181 |
+
smooth_dr: float = 1e-7,
|
| 182 |
+
) -> None:
|
| 183 |
+
"""
|
| 184 |
+
Args:
|
| 185 |
+
kernel_type: {``"gaussian"``, ``"b-spline"``}
|
| 186 |
+
``"gaussian"``: adapted from DeepReg
|
| 187 |
+
Reference: https://dspace.mit.edu/handle/1721.1/123142, Section 3.1, equation 3.1-3.5, Algorithm 1.
|
| 188 |
+
``"b-spline"``: based on the method of Mattes et al [1,2] and adapted from ITK
|
| 189 |
+
References:
|
| 190 |
+
[1] "Nonrigid multimodality image registration"
|
| 191 |
+
D. Mattes, D. R. Haynor, H. Vesselle, T. Lewellen and W. Eubank
|
| 192 |
+
Medical Imaging 2001: Image Processing, 2001, pp. 1609-1620.
|
| 193 |
+
[2] "PET-CT Image Registration in the Chest Using Free-form Deformations"
|
| 194 |
+
D. Mattes, D. R. Haynor, H. Vesselle, T. Lewellen and W. Eubank
|
| 195 |
+
IEEE Transactions in Medical Imaging. Vol.22, No.1,
|
| 196 |
+
January 2003. pp.120-128.
|
| 197 |
+
|
| 198 |
+
num_bins: number of bins for intensity
|
| 199 |
+
sigma_ratio: a hyper param for gaussian function
|
| 200 |
+
reduction: {``"none"``, ``"mean"``, ``"sum"``}
|
| 201 |
+
Specifies the reduction to apply to the output. Defaults to ``"mean"``.
|
| 202 |
+
|
| 203 |
+
- ``"none"``: no reduction will be applied.
|
| 204 |
+
- ``"mean"``: the sum of the output will be divided by the number of elements in the output.
|
| 205 |
+
- ``"sum"``: the output will be summed.
|
| 206 |
+
smooth_nr: a small constant added to the numerator to avoid nan.
|
| 207 |
+
smooth_dr: a small constant added to the denominator to avoid nan.
|
| 208 |
+
"""
|
| 209 |
+
super().__init__(reduction=LossReduction(reduction).value)
|
| 210 |
+
if num_bins <= 0:
|
| 211 |
+
raise ValueError("num_bins must > 0, got {num_bins}")
|
| 212 |
+
bin_centers = torch.linspace(0.0, 1.0, num_bins) # (num_bins,)
|
| 213 |
+
sigma = torch.mean(bin_centers[1:] - bin_centers[:-1]) * sigma_ratio
|
| 214 |
+
self.kernel_type = look_up_option(kernel_type, ["gaussian", "b-spline"])
|
| 215 |
+
self.num_bins = num_bins
|
| 216 |
+
self.kernel_type = kernel_type
|
| 217 |
+
if self.kernel_type == "gaussian":
|
| 218 |
+
self.preterm = 1 / (2 * sigma**2)
|
| 219 |
+
self.bin_centers = bin_centers[None, None, ...]
|
| 220 |
+
self.smooth_nr = float(smooth_nr)
|
| 221 |
+
self.smooth_dr = float(smooth_dr)
|
| 222 |
+
|
| 223 |
+
def parzen_windowing(
|
| 224 |
+
self, pred: torch.Tensor, target: torch.Tensor
|
| 225 |
+
) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
|
| 226 |
+
if self.kernel_type == "gaussian":
|
| 227 |
+
pred_weight, pred_probability = self.parzen_windowing_gaussian(pred)
|
| 228 |
+
target_weight, target_probability = self.parzen_windowing_gaussian(target)
|
| 229 |
+
elif self.kernel_type == "b-spline":
|
| 230 |
+
# a third order BSpline kernel is used for the pred image intensity PDF.
|
| 231 |
+
pred_weight, pred_probability = self.parzen_windowing_b_spline(pred, order=3)
|
| 232 |
+
# a zero order (box car) BSpline kernel is used for the target image intensity PDF.
|
| 233 |
+
target_weight, target_probability = self.parzen_windowing_b_spline(target, order=0)
|
| 234 |
+
else:
|
| 235 |
+
raise ValueError
|
| 236 |
+
return pred_weight, pred_probability, target_weight, target_probability
|
| 237 |
+
|
| 238 |
+
def parzen_windowing_b_spline(self, img: torch.Tensor, order: int) -> tuple[torch.Tensor, torch.Tensor]:
|
| 239 |
+
"""
|
| 240 |
+
Parzen windowing with b-spline kernel (adapted from ITK)
|
| 241 |
+
|
| 242 |
+
Args:
|
| 243 |
+
img: the shape should be B[NDHW].
|
| 244 |
+
order: int.
|
| 245 |
+
"""
|
| 246 |
+
|
| 247 |
+
# Compute binsize for the histograms.
|
| 248 |
+
#
|
| 249 |
+
# The binsize for the image intensities needs to be adjusted so that
|
| 250 |
+
# we can avoid dealing with boundary conditions using the cubic
|
| 251 |
+
# spline as the Parzen window. We do this by increasing the size
|
| 252 |
+
# of the bins so that the joint histogram becomes "padded" at the
|
| 253 |
+
# borders. Because we are changing the binsize,
|
| 254 |
+
# we also need to shift the minimum by the padded amount in order to
|
| 255 |
+
# avoid minimum values filling in our padded region.
|
| 256 |
+
#
|
| 257 |
+
# Note that there can still be non-zero bin values in the padded region,
|
| 258 |
+
# it's just that these bins will never be a central bin for the Parzen
|
| 259 |
+
# window.
|
| 260 |
+
_max, _min = torch.max(img), torch.min(img)
|
| 261 |
+
padding = 2
|
| 262 |
+
bin_size = (_max - _min) / (self.num_bins - 2 * padding)
|
| 263 |
+
norm_min = torch.div(_min, bin_size) - padding
|
| 264 |
+
|
| 265 |
+
# assign bin/window index to each voxel
|
| 266 |
+
window_term = torch.div(img, bin_size) - norm_min # B[NDHW]
|
| 267 |
+
# make sure the extreme values are in valid (non-padded) bins
|
| 268 |
+
window_term = torch.clamp(window_term, padding, self.num_bins - padding - 1) # B[NDHW]
|
| 269 |
+
window_term = window_term.reshape(window_term.shape[0], -1, 1) # (batch, num_sample, 1)
|
| 270 |
+
bins = torch.arange(self.num_bins, device=window_term.device).reshape(1, 1, -1) # (1, 1, num_bins)
|
| 271 |
+
sample_bin_matrix = torch.abs(bins - window_term) # (batch, num_sample, num_bins)
|
| 272 |
+
|
| 273 |
+
# b-spleen kernel
|
| 274 |
+
# (4 - 6 * abs ** 2 + 3 * abs ** 3) / 6 when 0 <= abs < 1
|
| 275 |
+
# (2 - abs) ** 3 / 6 when 1 <= abs < 2
|
| 276 |
+
weight = torch.zeros_like(sample_bin_matrix, dtype=torch.float) # (batch, num_sample, num_bins)
|
| 277 |
+
if order == 0:
|
| 278 |
+
weight = weight + (sample_bin_matrix < 0.5) + (sample_bin_matrix == 0.5) * 0.5
|
| 279 |
+
elif order == 3:
|
| 280 |
+
weight = weight + (4 - 6 * sample_bin_matrix**2 + 3 * sample_bin_matrix**3) * (sample_bin_matrix < 1) / 6
|
| 281 |
+
weight = weight + (2 - sample_bin_matrix) ** 3 * (sample_bin_matrix >= 1) * (sample_bin_matrix < 2) / 6
|
| 282 |
+
else:
|
| 283 |
+
raise ValueError(f"Do not support b-spline {order}-order parzen windowing")
|
| 284 |
+
|
| 285 |
+
weight = weight / torch.sum(weight, dim=-1, keepdim=True) # (batch, num_sample, num_bins)
|
| 286 |
+
probability = torch.mean(weight, dim=-2, keepdim=True) # (batch, 1, num_bins)
|
| 287 |
+
return weight, probability
|
| 288 |
+
|
| 289 |
+
def parzen_windowing_gaussian(self, img: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]:
|
| 290 |
+
"""
|
| 291 |
+
Parzen windowing with gaussian kernel (adapted from DeepReg implementation)
|
| 292 |
+
Note: the input is expected to range between 0 and 1
|
| 293 |
+
Args:
|
| 294 |
+
img: the shape should be B[NDHW].
|
| 295 |
+
"""
|
| 296 |
+
img = torch.clamp(img, 0, 1)
|
| 297 |
+
img = img.reshape(img.shape[0], -1, 1) # (batch, num_sample, 1)
|
| 298 |
+
weight = torch.exp(
|
| 299 |
+
-self.preterm.to(img) * (img - self.bin_centers.to(img)) ** 2
|
| 300 |
+
) # (batch, num_sample, num_bin)
|
| 301 |
+
weight = weight / torch.sum(weight, dim=-1, keepdim=True) # (batch, num_sample, num_bin)
|
| 302 |
+
probability = torch.mean(weight, dim=-2, keepdim=True) # (batch, 1, num_bin)
|
| 303 |
+
return weight, probability
|
| 304 |
+
|
| 305 |
+
def forward(self, pred: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
|
| 306 |
+
"""
|
| 307 |
+
Args:
|
| 308 |
+
pred: the shape should be B[NDHW].
|
| 309 |
+
target: the shape should be same as the pred shape.
|
| 310 |
+
Raises:
|
| 311 |
+
ValueError: When ``self.reduction`` is not one of ["mean", "sum", "none"].
|
| 312 |
+
"""
|
| 313 |
+
if target.shape != pred.shape:
|
| 314 |
+
raise ValueError(f"ground truth has differing shape ({target.shape}) from pred ({pred.shape})")
|
| 315 |
+
wa, pa, wb, pb = self.parzen_windowing(pred, target) # (batch, num_sample, num_bin), (batch, 1, num_bin)
|
| 316 |
+
|
| 317 |
+
pab = torch.bmm(wa.permute(0, 2, 1), wb.to(wa)).div(wa.shape[1]) # (batch, num_bins, num_bins)
|
| 318 |
+
papb = torch.bmm(pa.permute(0, 2, 1), pb.to(pa)) # (batch, num_bins, num_bins)
|
| 319 |
+
mi = torch.sum(
|
| 320 |
+
pab * torch.log((pab + self.smooth_nr) / (papb + self.smooth_dr) + self.smooth_dr), dim=(1, 2)
|
| 321 |
+
) # (batch)
|
| 322 |
+
|
| 323 |
+
if self.reduction == LossReduction.SUM.value:
|
| 324 |
+
return torch.sum(mi).neg() # sum over the batch and channel ndims
|
| 325 |
+
if self.reduction == LossReduction.NONE.value:
|
| 326 |
+
return mi.neg()
|
| 327 |
+
if self.reduction == LossReduction.MEAN.value:
|
| 328 |
+
return torch.mean(mi).neg() # average over the batch and channel ndims
|
| 329 |
+
raise ValueError(f'Unsupported reduction: {self.reduction}, available options are ["mean", "sum", "none"].')
|
source_code/SegMamba/monai/losses/multi_scale.py
ADDED
|
@@ -0,0 +1,94 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) MONAI Consortium
|
| 2 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 3 |
+
# you may not use this file except in compliance with the License.
|
| 4 |
+
# You may obtain a copy of the License at
|
| 5 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 6 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 7 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 8 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 9 |
+
# See the License for the specific language governing permissions and
|
| 10 |
+
# limitations under the License.
|
| 11 |
+
|
| 12 |
+
from __future__ import annotations
|
| 13 |
+
|
| 14 |
+
import torch
|
| 15 |
+
from torch.nn.modules.loss import _Loss
|
| 16 |
+
|
| 17 |
+
from monai.networks.layers import gaussian_1d, separable_filtering
|
| 18 |
+
from monai.utils import LossReduction
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
def make_gaussian_kernel(sigma: int) -> torch.Tensor:
|
| 22 |
+
if sigma <= 0:
|
| 23 |
+
raise ValueError(f"expecting positive sigma, got sigma={sigma}")
|
| 24 |
+
return gaussian_1d(sigma=torch.tensor(sigma), truncated=3, approx="sampled", normalize=False)
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
def make_cauchy_kernel(sigma: int) -> torch.Tensor:
|
| 28 |
+
if sigma <= 0:
|
| 29 |
+
raise ValueError(f"expecting positive sigma, got sigma={sigma}")
|
| 30 |
+
tail = int(sigma * 5)
|
| 31 |
+
k = torch.tensor([((x / sigma) ** 2 + 1) for x in range(-tail, tail + 1)])
|
| 32 |
+
k = torch.reciprocal(k)
|
| 33 |
+
k = k / torch.sum(k)
|
| 34 |
+
return k
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
kernel_fn_dict = {"gaussian": make_gaussian_kernel, "cauchy": make_cauchy_kernel}
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
class MultiScaleLoss(_Loss):
|
| 41 |
+
"""
|
| 42 |
+
This is a wrapper class.
|
| 43 |
+
It smooths the input and target at different scales before passing them into the wrapped loss function.
|
| 44 |
+
|
| 45 |
+
Adapted from:
|
| 46 |
+
DeepReg (https://github.com/DeepRegNet/DeepReg)
|
| 47 |
+
"""
|
| 48 |
+
|
| 49 |
+
def __init__(
|
| 50 |
+
self,
|
| 51 |
+
loss: _Loss,
|
| 52 |
+
scales: list | None = None,
|
| 53 |
+
kernel: str = "gaussian",
|
| 54 |
+
reduction: LossReduction | str = LossReduction.MEAN,
|
| 55 |
+
) -> None:
|
| 56 |
+
"""
|
| 57 |
+
Args:
|
| 58 |
+
loss: loss function to be wrapped
|
| 59 |
+
scales: list of scalars or None, if None, do not apply any scaling.
|
| 60 |
+
kernel: gaussian or cauchy.
|
| 61 |
+
"""
|
| 62 |
+
super().__init__(reduction=LossReduction(reduction).value)
|
| 63 |
+
if kernel not in kernel_fn_dict:
|
| 64 |
+
raise ValueError(f"got unsupported kernel type: {kernel}", "only support gaussian and cauchy")
|
| 65 |
+
self.kernel_fn = kernel_fn_dict[kernel]
|
| 66 |
+
self.loss = loss
|
| 67 |
+
self.scales = scales
|
| 68 |
+
|
| 69 |
+
def forward(self, y_true: torch.Tensor, y_pred: torch.Tensor) -> torch.Tensor:
|
| 70 |
+
if self.scales is None:
|
| 71 |
+
loss: torch.Tensor = self.loss(y_pred, y_true)
|
| 72 |
+
else:
|
| 73 |
+
loss_list = []
|
| 74 |
+
for s in self.scales:
|
| 75 |
+
if s == 0:
|
| 76 |
+
# no smoothing
|
| 77 |
+
loss_list.append(self.loss(y_pred, y_true))
|
| 78 |
+
else:
|
| 79 |
+
loss_list.append(
|
| 80 |
+
self.loss(
|
| 81 |
+
separable_filtering(y_pred, [self.kernel_fn(s).to(y_pred)] * (y_true.ndim - 2)),
|
| 82 |
+
separable_filtering(y_true, [self.kernel_fn(s).to(y_pred)] * (y_true.ndim - 2)),
|
| 83 |
+
)
|
| 84 |
+
)
|
| 85 |
+
loss = torch.stack(loss_list, dim=0)
|
| 86 |
+
|
| 87 |
+
if self.reduction == LossReduction.MEAN.value:
|
| 88 |
+
loss = torch.mean(loss) # the batch and channel average
|
| 89 |
+
elif self.reduction == LossReduction.SUM.value:
|
| 90 |
+
loss = torch.sum(loss) # sum over the batch and channel dims
|
| 91 |
+
elif self.reduction != LossReduction.NONE.value:
|
| 92 |
+
raise ValueError(f'Unsupported reduction: {self.reduction}, available options are ["mean", "sum", "none"].')
|
| 93 |
+
|
| 94 |
+
return loss
|
source_code/SegMamba/monai/losses/perceptual.py
ADDED
|
@@ -0,0 +1,437 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) MONAI Consortium
|
| 2 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 3 |
+
# you may not use this file except in compliance with the License.
|
| 4 |
+
# You may obtain a copy of the License at
|
| 5 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 6 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 7 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 8 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 9 |
+
# See the License for the specific language governing permissions and
|
| 10 |
+
# limitations under the License.
|
| 11 |
+
|
| 12 |
+
from __future__ import annotations
|
| 13 |
+
|
| 14 |
+
import warnings
|
| 15 |
+
|
| 16 |
+
import torch
|
| 17 |
+
import torch.nn as nn
|
| 18 |
+
|
| 19 |
+
from monai.utils import optional_import
|
| 20 |
+
from monai.utils.enums import StrEnum
|
| 21 |
+
|
| 22 |
+
LPIPS, _ = optional_import("lpips", name="LPIPS")
|
| 23 |
+
torchvision, _ = optional_import("torchvision")
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
class PercetualNetworkType(StrEnum):
|
| 27 |
+
alex = "alex"
|
| 28 |
+
vgg = "vgg"
|
| 29 |
+
squeeze = "squeeze"
|
| 30 |
+
radimagenet_resnet50 = "radimagenet_resnet50"
|
| 31 |
+
medicalnet_resnet10_23datasets = "medicalnet_resnet10_23datasets"
|
| 32 |
+
medicalnet_resnet50_23datasets = "medicalnet_resnet50_23datasets"
|
| 33 |
+
resnet50 = "resnet50"
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
class PerceptualLoss(nn.Module):
|
| 37 |
+
"""
|
| 38 |
+
Perceptual loss using features from pretrained deep neural networks trained. The function supports networks
|
| 39 |
+
pretrained on: ImageNet that use the LPIPS approach from Zhang, et al. "The unreasonable effectiveness of deep
|
| 40 |
+
features as a perceptual metric." https://arxiv.org/abs/1801.03924 ; RadImagenet from Mei, et al. "RadImageNet: An
|
| 41 |
+
Open Radiologic Deep Learning Research Dataset for Effective Transfer Learning"
|
| 42 |
+
https://pubs.rsna.org/doi/full/10.1148/ryai.210315 ; MedicalNet from Chen et al. "Med3D: Transfer Learning for
|
| 43 |
+
3D Medical Image Analysis" https://arxiv.org/abs/1904.00625 ;
|
| 44 |
+
and ResNet50 from Torchvision: https://pytorch.org/vision/main/models/generated/torchvision.models.resnet50.html .
|
| 45 |
+
|
| 46 |
+
The fake 3D implementation is based on a 2.5D approach where we calculate the 2D perceptual loss on slices from all
|
| 47 |
+
three axes and average. The full 3D approach uses a 3D network to calculate the perceptual loss.
|
| 48 |
+
MedicalNet networks are only compatible with 3D inputs and support channel-wise loss.
|
| 49 |
+
|
| 50 |
+
Args:
|
| 51 |
+
spatial_dims: number of spatial dimensions.
|
| 52 |
+
network_type: {``"alex"``, ``"vgg"``, ``"squeeze"``, ``"radimagenet_resnet50"``,
|
| 53 |
+
``"medicalnet_resnet10_23datasets"``, ``"medicalnet_resnet50_23datasets"``, ``"resnet50"``}
|
| 54 |
+
Specifies the network architecture to use. Defaults to ``"alex"``.
|
| 55 |
+
is_fake_3d: if True use 2.5D approach for a 3D perceptual loss.
|
| 56 |
+
fake_3d_ratio: ratio of how many slices per axis are used in the 2.5D approach.
|
| 57 |
+
cache_dir: path to cache directory to save the pretrained network weights.
|
| 58 |
+
pretrained: whether to load pretrained weights. This argument only works when using networks from
|
| 59 |
+
LIPIS or Torchvision. Defaults to ``"True"``.
|
| 60 |
+
pretrained_path: if `pretrained` is `True`, users can specify a weights file to be loaded
|
| 61 |
+
via using this argument. This argument only works when ``"network_type"`` is "resnet50".
|
| 62 |
+
Defaults to `None`.
|
| 63 |
+
pretrained_state_dict_key: if `pretrained_path` is not `None`, this argument is used to
|
| 64 |
+
extract the expected state dict. This argument only works when ``"network_type"`` is "resnet50".
|
| 65 |
+
Defaults to `None`.
|
| 66 |
+
channel_wise: if True, the loss is returned per channel. Otherwise the loss is averaged over the channels.
|
| 67 |
+
Defaults to ``False``.
|
| 68 |
+
"""
|
| 69 |
+
|
| 70 |
+
def __init__(
|
| 71 |
+
self,
|
| 72 |
+
spatial_dims: int,
|
| 73 |
+
network_type: str = PercetualNetworkType.alex,
|
| 74 |
+
is_fake_3d: bool = True,
|
| 75 |
+
fake_3d_ratio: float = 0.5,
|
| 76 |
+
cache_dir: str | None = None,
|
| 77 |
+
pretrained: bool = True,
|
| 78 |
+
pretrained_path: str | None = None,
|
| 79 |
+
pretrained_state_dict_key: str | None = None,
|
| 80 |
+
channel_wise: bool = False,
|
| 81 |
+
):
|
| 82 |
+
super().__init__()
|
| 83 |
+
|
| 84 |
+
if spatial_dims not in [2, 3]:
|
| 85 |
+
raise NotImplementedError("Perceptual loss is implemented only in 2D and 3D.")
|
| 86 |
+
|
| 87 |
+
if (spatial_dims == 2 or is_fake_3d) and "medicalnet_" in network_type:
|
| 88 |
+
raise ValueError(
|
| 89 |
+
"MedicalNet networks are only compatible with ``spatial_dims=3``."
|
| 90 |
+
"Argument is_fake_3d must be set to False."
|
| 91 |
+
)
|
| 92 |
+
|
| 93 |
+
if channel_wise and "medicalnet_" not in network_type:
|
| 94 |
+
raise ValueError("Channel-wise loss is only compatible with MedicalNet networks.")
|
| 95 |
+
|
| 96 |
+
if network_type.lower() not in list(PercetualNetworkType):
|
| 97 |
+
raise ValueError(
|
| 98 |
+
"Unrecognised criterion entered for Adversarial Loss. Must be one in: %s"
|
| 99 |
+
% ", ".join(PercetualNetworkType)
|
| 100 |
+
)
|
| 101 |
+
|
| 102 |
+
if cache_dir:
|
| 103 |
+
torch.hub.set_dir(cache_dir)
|
| 104 |
+
# raise a warning that this may change the default cache dir for all torch.hub calls
|
| 105 |
+
warnings.warn(
|
| 106 |
+
f"Setting cache_dir to {cache_dir}, this may change the default cache dir for all torch.hub calls."
|
| 107 |
+
)
|
| 108 |
+
|
| 109 |
+
self.spatial_dims = spatial_dims
|
| 110 |
+
self.perceptual_function: nn.Module
|
| 111 |
+
if spatial_dims == 3 and is_fake_3d is False:
|
| 112 |
+
self.perceptual_function = MedicalNetPerceptualSimilarity(
|
| 113 |
+
net=network_type, verbose=False, channel_wise=channel_wise
|
| 114 |
+
)
|
| 115 |
+
elif "radimagenet_" in network_type:
|
| 116 |
+
self.perceptual_function = RadImageNetPerceptualSimilarity(net=network_type, verbose=False)
|
| 117 |
+
elif network_type == "resnet50":
|
| 118 |
+
self.perceptual_function = TorchvisionModelPerceptualSimilarity(
|
| 119 |
+
net=network_type,
|
| 120 |
+
pretrained=pretrained,
|
| 121 |
+
pretrained_path=pretrained_path,
|
| 122 |
+
pretrained_state_dict_key=pretrained_state_dict_key,
|
| 123 |
+
)
|
| 124 |
+
else:
|
| 125 |
+
self.perceptual_function = LPIPS(pretrained=pretrained, net=network_type, verbose=False)
|
| 126 |
+
self.is_fake_3d = is_fake_3d
|
| 127 |
+
self.fake_3d_ratio = fake_3d_ratio
|
| 128 |
+
self.channel_wise = channel_wise
|
| 129 |
+
|
| 130 |
+
def _calculate_axis_loss(self, input: torch.Tensor, target: torch.Tensor, spatial_axis: int) -> torch.Tensor:
|
| 131 |
+
"""
|
| 132 |
+
Calculate perceptual loss in one of the axis used in the 2.5D approach. After the slices of one spatial axis
|
| 133 |
+
is transformed into different instances in the batch, we compute the loss using the 2D approach.
|
| 134 |
+
|
| 135 |
+
Args:
|
| 136 |
+
input: input 5D tensor. BNHWD
|
| 137 |
+
target: target 5D tensor. BNHWD
|
| 138 |
+
spatial_axis: spatial axis to obtain the 2D slices.
|
| 139 |
+
"""
|
| 140 |
+
|
| 141 |
+
def batchify_axis(x: torch.Tensor, fake_3d_perm: tuple) -> torch.Tensor:
|
| 142 |
+
"""
|
| 143 |
+
Transform slices from one spatial axis into different instances in the batch.
|
| 144 |
+
"""
|
| 145 |
+
slices = x.float().permute((0,) + fake_3d_perm).contiguous()
|
| 146 |
+
slices = slices.view(-1, x.shape[fake_3d_perm[1]], x.shape[fake_3d_perm[2]], x.shape[fake_3d_perm[3]])
|
| 147 |
+
|
| 148 |
+
return slices
|
| 149 |
+
|
| 150 |
+
preserved_axes = [2, 3, 4]
|
| 151 |
+
preserved_axes.remove(spatial_axis)
|
| 152 |
+
|
| 153 |
+
channel_axis = 1
|
| 154 |
+
input_slices = batchify_axis(x=input, fake_3d_perm=(spatial_axis, channel_axis) + tuple(preserved_axes))
|
| 155 |
+
indices = torch.randperm(input_slices.shape[0])[: int(input_slices.shape[0] * self.fake_3d_ratio)].to(
|
| 156 |
+
input_slices.device
|
| 157 |
+
)
|
| 158 |
+
input_slices = torch.index_select(input_slices, dim=0, index=indices)
|
| 159 |
+
target_slices = batchify_axis(x=target, fake_3d_perm=(spatial_axis, channel_axis) + tuple(preserved_axes))
|
| 160 |
+
target_slices = torch.index_select(target_slices, dim=0, index=indices)
|
| 161 |
+
|
| 162 |
+
axis_loss = torch.mean(self.perceptual_function(input_slices, target_slices))
|
| 163 |
+
|
| 164 |
+
return axis_loss
|
| 165 |
+
|
| 166 |
+
def forward(self, input: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
|
| 167 |
+
"""
|
| 168 |
+
Args:
|
| 169 |
+
input: the shape should be BNHW[D].
|
| 170 |
+
target: the shape should be BNHW[D].
|
| 171 |
+
"""
|
| 172 |
+
if target.shape != input.shape:
|
| 173 |
+
raise ValueError(f"ground truth has differing shape ({target.shape}) from input ({input.shape})")
|
| 174 |
+
|
| 175 |
+
if self.spatial_dims == 3 and self.is_fake_3d:
|
| 176 |
+
# Compute 2.5D approach
|
| 177 |
+
loss_sagittal = self._calculate_axis_loss(input, target, spatial_axis=2)
|
| 178 |
+
loss_coronal = self._calculate_axis_loss(input, target, spatial_axis=3)
|
| 179 |
+
loss_axial = self._calculate_axis_loss(input, target, spatial_axis=4)
|
| 180 |
+
loss = loss_sagittal + loss_axial + loss_coronal
|
| 181 |
+
else:
|
| 182 |
+
# 2D and real 3D cases
|
| 183 |
+
loss = self.perceptual_function(input, target)
|
| 184 |
+
|
| 185 |
+
if self.channel_wise:
|
| 186 |
+
loss = torch.mean(loss.squeeze(), dim=0)
|
| 187 |
+
else:
|
| 188 |
+
loss = torch.mean(loss)
|
| 189 |
+
|
| 190 |
+
return loss
|
| 191 |
+
|
| 192 |
+
|
| 193 |
+
class MedicalNetPerceptualSimilarity(nn.Module):
|
| 194 |
+
"""
|
| 195 |
+
Component to perform the perceptual evaluation with the networks pretrained by Chen, et al. "Med3D: Transfer
|
| 196 |
+
Learning for 3D Medical Image Analysis". This class uses torch Hub to download the networks from
|
| 197 |
+
"Warvito/MedicalNet-models".
|
| 198 |
+
|
| 199 |
+
Args:
|
| 200 |
+
net: {``"medicalnet_resnet10_23datasets"``, ``"medicalnet_resnet50_23datasets"``}
|
| 201 |
+
Specifies the network architecture to use. Defaults to ``"medicalnet_resnet10_23datasets"``.
|
| 202 |
+
verbose: if false, mute messages from torch Hub load function.
|
| 203 |
+
channel_wise: if True, the loss is returned per channel. Otherwise the loss is averaged over the channels.
|
| 204 |
+
Defaults to ``False``.
|
| 205 |
+
"""
|
| 206 |
+
|
| 207 |
+
def __init__(
|
| 208 |
+
self, net: str = "medicalnet_resnet10_23datasets", verbose: bool = False, channel_wise: bool = False
|
| 209 |
+
) -> None:
|
| 210 |
+
super().__init__()
|
| 211 |
+
torch.hub._validate_not_a_forked_repo = lambda a, b, c: True
|
| 212 |
+
self.model = torch.hub.load("warvito/MedicalNet-models", model=net, verbose=verbose)
|
| 213 |
+
self.eval()
|
| 214 |
+
|
| 215 |
+
self.channel_wise = channel_wise
|
| 216 |
+
|
| 217 |
+
for param in self.parameters():
|
| 218 |
+
param.requires_grad = False
|
| 219 |
+
|
| 220 |
+
def forward(self, input: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
|
| 221 |
+
"""
|
| 222 |
+
Compute perceptual loss using MedicalNet 3D networks. The input and target tensors are inputted in the
|
| 223 |
+
pre-trained MedicalNet that is used for feature extraction. Then, these extracted features are normalised across
|
| 224 |
+
the channels. Finally, we compute the difference between the input and target features and calculate the mean
|
| 225 |
+
value from the spatial dimensions to obtain the perceptual loss.
|
| 226 |
+
|
| 227 |
+
Args:
|
| 228 |
+
input: 3D input tensor with shape BCDHW.
|
| 229 |
+
target: 3D target tensor with shape BCDHW.
|
| 230 |
+
|
| 231 |
+
"""
|
| 232 |
+
input = medicalnet_intensity_normalisation(input)
|
| 233 |
+
target = medicalnet_intensity_normalisation(target)
|
| 234 |
+
|
| 235 |
+
# Get model outputs
|
| 236 |
+
feats_per_ch = 0
|
| 237 |
+
for ch_idx in range(input.shape[1]):
|
| 238 |
+
input_channel = input[:, ch_idx, ...].unsqueeze(1)
|
| 239 |
+
target_channel = target[:, ch_idx, ...].unsqueeze(1)
|
| 240 |
+
|
| 241 |
+
if ch_idx == 0:
|
| 242 |
+
outs_input = self.model.forward(input_channel)
|
| 243 |
+
outs_target = self.model.forward(target_channel)
|
| 244 |
+
feats_per_ch = outs_input.shape[1]
|
| 245 |
+
else:
|
| 246 |
+
outs_input = torch.cat([outs_input, self.model.forward(input_channel)], dim=1)
|
| 247 |
+
outs_target = torch.cat([outs_target, self.model.forward(target_channel)], dim=1)
|
| 248 |
+
|
| 249 |
+
# Normalise through the channels
|
| 250 |
+
feats_input = normalize_tensor(outs_input)
|
| 251 |
+
feats_target = normalize_tensor(outs_target)
|
| 252 |
+
|
| 253 |
+
feats_diff: torch.Tensor = (feats_input - feats_target) ** 2
|
| 254 |
+
if self.channel_wise:
|
| 255 |
+
results = torch.zeros(
|
| 256 |
+
feats_diff.shape[0], input.shape[1], feats_diff.shape[2], feats_diff.shape[3], feats_diff.shape[4]
|
| 257 |
+
)
|
| 258 |
+
for i in range(input.shape[1]):
|
| 259 |
+
l_idx = i * feats_per_ch
|
| 260 |
+
r_idx = (i + 1) * feats_per_ch
|
| 261 |
+
results[:, i, ...] = feats_diff[:, l_idx : i + r_idx, ...].sum(dim=1)
|
| 262 |
+
else:
|
| 263 |
+
results = feats_diff.sum(dim=1, keepdim=True)
|
| 264 |
+
|
| 265 |
+
results = spatial_average_3d(results, keepdim=True)
|
| 266 |
+
|
| 267 |
+
return results
|
| 268 |
+
|
| 269 |
+
|
| 270 |
+
def spatial_average_3d(x: torch.Tensor, keepdim: bool = True) -> torch.Tensor:
|
| 271 |
+
return x.mean([2, 3, 4], keepdim=keepdim)
|
| 272 |
+
|
| 273 |
+
|
| 274 |
+
def normalize_tensor(x: torch.Tensor, eps: float = 1e-10) -> torch.Tensor:
|
| 275 |
+
norm_factor = torch.sqrt(torch.sum(x**2, dim=1, keepdim=True))
|
| 276 |
+
return x / (norm_factor + eps)
|
| 277 |
+
|
| 278 |
+
|
| 279 |
+
def medicalnet_intensity_normalisation(volume):
|
| 280 |
+
"""Based on https://github.com/Tencent/MedicalNet/blob/18c8bb6cd564eb1b964bffef1f4c2283f1ae6e7b/datasets/brains18.py#L133"""
|
| 281 |
+
mean = volume.mean()
|
| 282 |
+
std = volume.std()
|
| 283 |
+
return (volume - mean) / std
|
| 284 |
+
|
| 285 |
+
|
| 286 |
+
class RadImageNetPerceptualSimilarity(nn.Module):
|
| 287 |
+
"""
|
| 288 |
+
Component to perform the perceptual evaluation with the networks pretrained on RadImagenet (pretrained by Mei, et
|
| 289 |
+
al. "RadImageNet: An Open Radiologic Deep Learning Research Dataset for Effective Transfer Learning"). This class
|
| 290 |
+
uses torch Hub to download the networks from "Warvito/radimagenet-models".
|
| 291 |
+
|
| 292 |
+
Args:
|
| 293 |
+
net: {``"radimagenet_resnet50"``}
|
| 294 |
+
Specifies the network architecture to use. Defaults to ``"radimagenet_resnet50"``.
|
| 295 |
+
verbose: if false, mute messages from torch Hub load function.
|
| 296 |
+
"""
|
| 297 |
+
|
| 298 |
+
def __init__(self, net: str = "radimagenet_resnet50", verbose: bool = False) -> None:
|
| 299 |
+
super().__init__()
|
| 300 |
+
self.model = torch.hub.load("Warvito/radimagenet-models", model=net, verbose=verbose)
|
| 301 |
+
self.eval()
|
| 302 |
+
|
| 303 |
+
for param in self.parameters():
|
| 304 |
+
param.requires_grad = False
|
| 305 |
+
|
| 306 |
+
def forward(self, input: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
|
| 307 |
+
"""
|
| 308 |
+
We expect that the input is normalised between [0, 1]. Given the preprocessing performed during the training at
|
| 309 |
+
https://github.com/BMEII-AI/RadImageNet, we make sure that the input and target have 3 channels, reorder it from
|
| 310 |
+
'RGB' to 'BGR', and then remove the mean components of each input data channel. The outputs are normalised
|
| 311 |
+
across the channels, and we obtain the mean from the spatial dimensions (similar approach to the lpips package).
|
| 312 |
+
"""
|
| 313 |
+
# If input has just 1 channel, repeat channel to have 3 channels
|
| 314 |
+
if input.shape[1] == 1 and target.shape[1] == 1:
|
| 315 |
+
input = input.repeat(1, 3, 1, 1)
|
| 316 |
+
target = target.repeat(1, 3, 1, 1)
|
| 317 |
+
|
| 318 |
+
# Change order from 'RGB' to 'BGR'
|
| 319 |
+
input = input[:, [2, 1, 0], ...]
|
| 320 |
+
target = target[:, [2, 1, 0], ...]
|
| 321 |
+
|
| 322 |
+
# Subtract mean used during training
|
| 323 |
+
input = subtract_mean(input)
|
| 324 |
+
target = subtract_mean(target)
|
| 325 |
+
|
| 326 |
+
# Get model outputs
|
| 327 |
+
outs_input = self.model.forward(input)
|
| 328 |
+
outs_target = self.model.forward(target)
|
| 329 |
+
|
| 330 |
+
# Normalise through the channels
|
| 331 |
+
feats_input = normalize_tensor(outs_input)
|
| 332 |
+
feats_target = normalize_tensor(outs_target)
|
| 333 |
+
|
| 334 |
+
results: torch.Tensor = (feats_input - feats_target) ** 2
|
| 335 |
+
results = spatial_average(results.sum(dim=1, keepdim=True), keepdim=True)
|
| 336 |
+
|
| 337 |
+
return results
|
| 338 |
+
|
| 339 |
+
|
| 340 |
+
class TorchvisionModelPerceptualSimilarity(nn.Module):
|
| 341 |
+
"""
|
| 342 |
+
Component to perform the perceptual evaluation with TorchVision models.
|
| 343 |
+
Currently, only ResNet50 is supported. The network structure is based on:
|
| 344 |
+
https://pytorch.org/vision/main/models/generated/torchvision.models.resnet50.html
|
| 345 |
+
|
| 346 |
+
Args:
|
| 347 |
+
net: {``"resnet50"``}
|
| 348 |
+
Specifies the network architecture to use. Defaults to ``"resnet50"``.
|
| 349 |
+
pretrained: whether to load pretrained weights. Defaults to `True`.
|
| 350 |
+
pretrained_path: if `pretrained` is `True`, users can specify a weights file to be loaded
|
| 351 |
+
via using this argument. Defaults to `None`.
|
| 352 |
+
pretrained_state_dict_key: if `pretrained_path` is not `None`, this argument is used to
|
| 353 |
+
extract the expected state dict. Defaults to `None`.
|
| 354 |
+
"""
|
| 355 |
+
|
| 356 |
+
def __init__(
|
| 357 |
+
self,
|
| 358 |
+
net: str = "resnet50",
|
| 359 |
+
pretrained: bool = True,
|
| 360 |
+
pretrained_path: str | None = None,
|
| 361 |
+
pretrained_state_dict_key: str | None = None,
|
| 362 |
+
) -> None:
|
| 363 |
+
super().__init__()
|
| 364 |
+
supported_networks = ["resnet50"]
|
| 365 |
+
if net not in supported_networks:
|
| 366 |
+
raise NotImplementedError(
|
| 367 |
+
f"'net' {net} is not supported, please select a network from {supported_networks}."
|
| 368 |
+
)
|
| 369 |
+
|
| 370 |
+
if pretrained_path is None:
|
| 371 |
+
network = torchvision.models.resnet50(
|
| 372 |
+
weights=torchvision.models.ResNet50_Weights.DEFAULT if pretrained else None
|
| 373 |
+
)
|
| 374 |
+
else:
|
| 375 |
+
network = torchvision.models.resnet50(weights=None)
|
| 376 |
+
if pretrained is True:
|
| 377 |
+
state_dict = torch.load(pretrained_path)
|
| 378 |
+
if pretrained_state_dict_key is not None:
|
| 379 |
+
state_dict = state_dict[pretrained_state_dict_key]
|
| 380 |
+
network.load_state_dict(state_dict)
|
| 381 |
+
self.final_layer = "layer4.2.relu_2"
|
| 382 |
+
self.model = torchvision.models.feature_extraction.create_feature_extractor(network, [self.final_layer])
|
| 383 |
+
self.eval()
|
| 384 |
+
|
| 385 |
+
for param in self.parameters():
|
| 386 |
+
param.requires_grad = False
|
| 387 |
+
|
| 388 |
+
def forward(self, input: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
|
| 389 |
+
"""
|
| 390 |
+
We expect that the input is normalised between [0, 1]. Given the preprocessing performed during the training at
|
| 391 |
+
https://pytorch.org/vision/main/models/generated/torchvision.models.resnet50.html#torchvision.models.ResNet50_Weights,
|
| 392 |
+
we make sure that the input and target have 3 channels, and then do Z-Score normalization.
|
| 393 |
+
The outputs are normalised across the channels, and we obtain the mean from the spatial dimensions (similar
|
| 394 |
+
approach to the lpips package).
|
| 395 |
+
"""
|
| 396 |
+
# If input has just 1 channel, repeat channel to have 3 channels
|
| 397 |
+
if input.shape[1] == 1 and target.shape[1] == 1:
|
| 398 |
+
input = input.repeat(1, 3, 1, 1)
|
| 399 |
+
target = target.repeat(1, 3, 1, 1)
|
| 400 |
+
|
| 401 |
+
# Input normalization
|
| 402 |
+
input = torchvision_zscore_norm(input)
|
| 403 |
+
target = torchvision_zscore_norm(target)
|
| 404 |
+
|
| 405 |
+
# Get model outputs
|
| 406 |
+
outs_input = self.model.forward(input)[self.final_layer]
|
| 407 |
+
outs_target = self.model.forward(target)[self.final_layer]
|
| 408 |
+
|
| 409 |
+
# Normalise through the channels
|
| 410 |
+
feats_input = normalize_tensor(outs_input)
|
| 411 |
+
feats_target = normalize_tensor(outs_target)
|
| 412 |
+
|
| 413 |
+
results: torch.Tensor = (feats_input - feats_target) ** 2
|
| 414 |
+
results = spatial_average(results.sum(dim=1, keepdim=True), keepdim=True)
|
| 415 |
+
|
| 416 |
+
return results
|
| 417 |
+
|
| 418 |
+
|
| 419 |
+
def spatial_average(x: torch.Tensor, keepdim: bool = True) -> torch.Tensor:
|
| 420 |
+
return x.mean([2, 3], keepdim=keepdim)
|
| 421 |
+
|
| 422 |
+
|
| 423 |
+
def torchvision_zscore_norm(x: torch.Tensor) -> torch.Tensor:
|
| 424 |
+
mean = [0.485, 0.456, 0.406]
|
| 425 |
+
std = [0.229, 0.224, 0.225]
|
| 426 |
+
x[:, 0, :, :] = (x[:, 0, :, :] - mean[0]) / std[0]
|
| 427 |
+
x[:, 1, :, :] = (x[:, 1, :, :] - mean[1]) / std[1]
|
| 428 |
+
x[:, 2, :, :] = (x[:, 2, :, :] - mean[2]) / std[2]
|
| 429 |
+
return x
|
| 430 |
+
|
| 431 |
+
|
| 432 |
+
def subtract_mean(x: torch.Tensor) -> torch.Tensor:
|
| 433 |
+
mean = [0.406, 0.456, 0.485]
|
| 434 |
+
x[:, 0, :, :] -= mean[0]
|
| 435 |
+
x[:, 1, :, :] -= mean[1]
|
| 436 |
+
x[:, 2, :, :] -= mean[2]
|
| 437 |
+
return x
|
source_code/SegMamba/monai/losses/spatial_mask.py
ADDED
|
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) MONAI Consortium
|
| 2 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 3 |
+
# you may not use this file except in compliance with the License.
|
| 4 |
+
# You may obtain a copy of the License at
|
| 5 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 6 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 7 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 8 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 9 |
+
# See the License for the specific language governing permissions and
|
| 10 |
+
# limitations under the License.
|
| 11 |
+
|
| 12 |
+
from __future__ import annotations
|
| 13 |
+
|
| 14 |
+
import inspect
|
| 15 |
+
import warnings
|
| 16 |
+
from collections.abc import Callable
|
| 17 |
+
from typing import Any, Optional
|
| 18 |
+
|
| 19 |
+
import torch
|
| 20 |
+
from torch.nn.modules.loss import _Loss
|
| 21 |
+
|
| 22 |
+
__all__ = ["MaskedLoss"]
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
class MaskedLoss(_Loss):
|
| 26 |
+
"""
|
| 27 |
+
This is a wrapper class for the loss functions. It allows for additional
|
| 28 |
+
weighting masks to be applied to both input and target.
|
| 29 |
+
|
| 30 |
+
See Also:
|
| 31 |
+
- :py:class:`monai.losses.MaskedDiceLoss`
|
| 32 |
+
"""
|
| 33 |
+
|
| 34 |
+
def __init__(
|
| 35 |
+
self, loss: Callable[[torch.Tensor, torch.Tensor], torch.Tensor] | _Loss, *loss_args: Any, **loss_kwargs: Any
|
| 36 |
+
) -> None:
|
| 37 |
+
"""
|
| 38 |
+
Args:
|
| 39 |
+
loss: loss function to be wrapped, this could be a loss class or an instance of a loss class.
|
| 40 |
+
loss_args: arguments to the loss function's constructor if `loss` is a class.
|
| 41 |
+
loss_kwargs: keyword arguments to the loss function's constructor if `loss` is a class.
|
| 42 |
+
"""
|
| 43 |
+
super().__init__()
|
| 44 |
+
self.loss: Callable[[torch.Tensor, torch.Tensor], torch.Tensor] = (
|
| 45 |
+
loss(*loss_args, **loss_kwargs) if inspect.isclass(loss) else loss
|
| 46 |
+
)
|
| 47 |
+
if not callable(self.loss):
|
| 48 |
+
raise ValueError("The loss function is not callable.")
|
| 49 |
+
|
| 50 |
+
def forward(self, input: torch.Tensor, target: torch.Tensor, mask: Optional[torch.Tensor] = None) -> torch.Tensor:
|
| 51 |
+
"""
|
| 52 |
+
Args:
|
| 53 |
+
input: the shape should be BNH[WD].
|
| 54 |
+
target: the shape should be BNH[WD].
|
| 55 |
+
mask: the shape should be B1H[WD] or 11H[WD].
|
| 56 |
+
"""
|
| 57 |
+
if mask is None:
|
| 58 |
+
warnings.warn("No mask value specified for the MaskedLoss.")
|
| 59 |
+
return self.loss(input, target)
|
| 60 |
+
|
| 61 |
+
if input.dim() != mask.dim():
|
| 62 |
+
warnings.warn(f"Dim of input ({input.shape}) is different from mask ({mask.shape}).")
|
| 63 |
+
if input.shape[0] != mask.shape[0] and mask.shape[0] != 1:
|
| 64 |
+
raise ValueError(f"Batch size of mask ({mask.shape}) must be one or equal to input ({input.shape}).")
|
| 65 |
+
if target.dim() > 1:
|
| 66 |
+
if mask.shape[1] != 1:
|
| 67 |
+
raise ValueError(f"Mask ({mask.shape}) must have only one channel.")
|
| 68 |
+
if input.shape[2:] != mask.shape[2:]:
|
| 69 |
+
warnings.warn(f"Spatial size of input ({input.shape}) is different from mask ({mask.shape}).")
|
| 70 |
+
return self.loss(input * mask, target * mask)
|
source_code/SegMamba/monai/losses/spectral_loss.py
ADDED
|
@@ -0,0 +1,88 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) MONAI Consortium
|
| 2 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 3 |
+
# you may not use this file except in compliance with the License.
|
| 4 |
+
# You may obtain a copy of the License at
|
| 5 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 6 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 7 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 8 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 9 |
+
# See the License for the specific language governing permissions and
|
| 10 |
+
# limitations under the License.
|
| 11 |
+
|
| 12 |
+
from __future__ import annotations
|
| 13 |
+
|
| 14 |
+
import torch
|
| 15 |
+
import torch.nn.functional as F
|
| 16 |
+
from torch.fft import fftn
|
| 17 |
+
from torch.nn.modules.loss import _Loss
|
| 18 |
+
|
| 19 |
+
from monai.utils import LossReduction
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
class JukeboxLoss(_Loss):
|
| 23 |
+
"""
|
| 24 |
+
Calculate spectral component based on the magnitude of Fast Fourier Transform (FFT).
|
| 25 |
+
|
| 26 |
+
Based on:
|
| 27 |
+
Dhariwal, et al. 'Jukebox: A generative model for music.' https://arxiv.org/abs/2005.00341
|
| 28 |
+
|
| 29 |
+
Args:
|
| 30 |
+
spatial_dims: number of spatial dimensions.
|
| 31 |
+
fft_signal_size: signal size in the transformed dimensions. See torch.fft.fftn() for more information.
|
| 32 |
+
fft_norm: {``"forward"``, ``"backward"``, ``"ortho"``} Specifies the normalization mode in the fft. See
|
| 33 |
+
torch.fft.fftn() for more information.
|
| 34 |
+
|
| 35 |
+
reduction: {``"none"``, ``"mean"``, ``"sum"``}
|
| 36 |
+
Specifies the reduction to apply to the output. Defaults to ``"mean"``.
|
| 37 |
+
|
| 38 |
+
- ``"none"``: no reduction will be applied.
|
| 39 |
+
- ``"mean"``: the sum of the output will be divided by the number of elements in the output.
|
| 40 |
+
- ``"sum"``: the output will be summed.
|
| 41 |
+
"""
|
| 42 |
+
|
| 43 |
+
def __init__(
|
| 44 |
+
self,
|
| 45 |
+
spatial_dims: int,
|
| 46 |
+
fft_signal_size: tuple[int] | None = None,
|
| 47 |
+
fft_norm: str = "ortho",
|
| 48 |
+
reduction: LossReduction | str = LossReduction.MEAN,
|
| 49 |
+
) -> None:
|
| 50 |
+
super().__init__(reduction=LossReduction(reduction).value)
|
| 51 |
+
|
| 52 |
+
self.spatial_dims = spatial_dims
|
| 53 |
+
self.fft_signal_size = fft_signal_size
|
| 54 |
+
self.fft_dim = tuple(range(1, spatial_dims + 2))
|
| 55 |
+
self.fft_norm = fft_norm
|
| 56 |
+
|
| 57 |
+
def forward(self, input: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
|
| 58 |
+
input_amplitude = self._get_fft_amplitude(target)
|
| 59 |
+
target_amplitude = self._get_fft_amplitude(input)
|
| 60 |
+
|
| 61 |
+
# Compute distance between amplitude of frequency components
|
| 62 |
+
# See Section 3.3 from https://arxiv.org/abs/2005.00341
|
| 63 |
+
loss = F.mse_loss(target_amplitude, input_amplitude, reduction="none")
|
| 64 |
+
|
| 65 |
+
if self.reduction == LossReduction.MEAN.value:
|
| 66 |
+
loss = loss.mean()
|
| 67 |
+
elif self.reduction == LossReduction.SUM.value:
|
| 68 |
+
loss = loss.sum()
|
| 69 |
+
elif self.reduction == LossReduction.NONE.value:
|
| 70 |
+
pass
|
| 71 |
+
|
| 72 |
+
return loss
|
| 73 |
+
|
| 74 |
+
def _get_fft_amplitude(self, images: torch.Tensor) -> torch.Tensor:
|
| 75 |
+
"""
|
| 76 |
+
Calculate the amplitude of the fourier transformations representation of the images
|
| 77 |
+
|
| 78 |
+
Args:
|
| 79 |
+
images: Images that are to undergo fftn
|
| 80 |
+
|
| 81 |
+
Returns:
|
| 82 |
+
fourier transformation amplitude
|
| 83 |
+
"""
|
| 84 |
+
img_fft = fftn(images, s=self.fft_signal_size, dim=self.fft_dim, norm=self.fft_norm)
|
| 85 |
+
|
| 86 |
+
amplitude = torch.sqrt(torch.real(img_fft) ** 2 + torch.imag(img_fft) ** 2)
|
| 87 |
+
|
| 88 |
+
return amplitude
|
source_code/SegMamba/monai/losses/ssim_loss.py
ADDED
|
@@ -0,0 +1,134 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) MONAI Consortium
|
| 2 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 3 |
+
# you may not use this file except in compliance with the License.
|
| 4 |
+
# You may obtain a copy of the License at
|
| 5 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 6 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 7 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 8 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 9 |
+
# See the License for the specific language governing permissions and
|
| 10 |
+
# limitations under the License.
|
| 11 |
+
|
| 12 |
+
from __future__ import annotations
|
| 13 |
+
|
| 14 |
+
from collections.abc import Sequence
|
| 15 |
+
|
| 16 |
+
import torch
|
| 17 |
+
from torch.nn.modules.loss import _Loss
|
| 18 |
+
|
| 19 |
+
from monai.metrics.regression import KernelType, SSIMMetric
|
| 20 |
+
from monai.utils import LossReduction, ensure_tuple_rep
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
class SSIMLoss(_Loss):
|
| 24 |
+
"""
|
| 25 |
+
Compute the loss function based on the Structural Similarity Index Measure (SSIM) Metric.
|
| 26 |
+
|
| 27 |
+
For more info, visit
|
| 28 |
+
https://vicuesoft.com/glossary/term/ssim-ms-ssim/
|
| 29 |
+
|
| 30 |
+
SSIM reference paper:
|
| 31 |
+
Wang, Zhou, et al. "Image quality assessment: from error visibility to structural
|
| 32 |
+
similarity." IEEE transactions on image processing 13.4 (2004): 600-612.
|
| 33 |
+
"""
|
| 34 |
+
|
| 35 |
+
def __init__(
|
| 36 |
+
self,
|
| 37 |
+
spatial_dims: int,
|
| 38 |
+
data_range: float = 1.0,
|
| 39 |
+
kernel_type: KernelType | str = KernelType.GAUSSIAN,
|
| 40 |
+
win_size: int | Sequence[int] = 11,
|
| 41 |
+
kernel_sigma: float | Sequence[float] = 1.5,
|
| 42 |
+
k1: float = 0.01,
|
| 43 |
+
k2: float = 0.03,
|
| 44 |
+
reduction: LossReduction | str = LossReduction.MEAN,
|
| 45 |
+
):
|
| 46 |
+
"""
|
| 47 |
+
Args:
|
| 48 |
+
spatial_dims: number of spatial dimensions of the input images.
|
| 49 |
+
data_range: value range of input images. (usually 1.0 or 255)
|
| 50 |
+
kernel_type: type of kernel, can be "gaussian" or "uniform".
|
| 51 |
+
win_size: window size of kernel
|
| 52 |
+
kernel_sigma: standard deviation for Gaussian kernel.
|
| 53 |
+
k1: stability constant used in the luminance denominator
|
| 54 |
+
k2: stability constant used in the contrast denominator
|
| 55 |
+
reduction: {``"none"``, ``"mean"``, ``"sum"``}
|
| 56 |
+
Specifies the reduction to apply to the output. Defaults to ``"mean"``.
|
| 57 |
+
- ``"none"``: no reduction will be applied.
|
| 58 |
+
- ``"mean"``: the sum of the output will be divided by the number of elements in the output.
|
| 59 |
+
- ``"sum"``: the output will be summed.
|
| 60 |
+
|
| 61 |
+
"""
|
| 62 |
+
super().__init__(reduction=LossReduction(reduction).value)
|
| 63 |
+
self.spatial_dims = spatial_dims
|
| 64 |
+
self._data_range = data_range
|
| 65 |
+
self.kernel_type = kernel_type
|
| 66 |
+
|
| 67 |
+
if not isinstance(win_size, Sequence):
|
| 68 |
+
win_size = ensure_tuple_rep(win_size, spatial_dims)
|
| 69 |
+
self.kernel_size = win_size
|
| 70 |
+
|
| 71 |
+
if not isinstance(kernel_sigma, Sequence):
|
| 72 |
+
kernel_sigma = ensure_tuple_rep(kernel_sigma, spatial_dims)
|
| 73 |
+
self.kernel_sigma = kernel_sigma
|
| 74 |
+
|
| 75 |
+
self.k1 = k1
|
| 76 |
+
self.k2 = k2
|
| 77 |
+
|
| 78 |
+
self.ssim_metric = SSIMMetric(
|
| 79 |
+
spatial_dims=self.spatial_dims,
|
| 80 |
+
data_range=self._data_range,
|
| 81 |
+
kernel_type=self.kernel_type,
|
| 82 |
+
win_size=self.kernel_size,
|
| 83 |
+
kernel_sigma=self.kernel_sigma,
|
| 84 |
+
k1=self.k1,
|
| 85 |
+
k2=self.k2,
|
| 86 |
+
)
|
| 87 |
+
|
| 88 |
+
@property
|
| 89 |
+
def data_range(self) -> float:
|
| 90 |
+
return self._data_range
|
| 91 |
+
|
| 92 |
+
@data_range.setter
|
| 93 |
+
def data_range(self, value: float) -> None:
|
| 94 |
+
self._data_range = value
|
| 95 |
+
self.ssim_metric.data_range = value
|
| 96 |
+
|
| 97 |
+
def forward(self, input: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
|
| 98 |
+
"""
|
| 99 |
+
Args:
|
| 100 |
+
input: batch of predicted images with shape (batch_size, channels, spatial_dim1, spatial_dim2[, spatial_dim3])
|
| 101 |
+
target: batch of target images with shape (batch_size, channels, spatial_dim1, spatial_dim2[, spatial_dim3])
|
| 102 |
+
|
| 103 |
+
Returns:
|
| 104 |
+
1 minus the ssim index (recall this is meant to be a loss function)
|
| 105 |
+
|
| 106 |
+
Example:
|
| 107 |
+
.. code-block:: python
|
| 108 |
+
|
| 109 |
+
import torch
|
| 110 |
+
|
| 111 |
+
# 2D data
|
| 112 |
+
x = torch.ones([1,1,10,10])/2
|
| 113 |
+
y = torch.ones([1,1,10,10])/2
|
| 114 |
+
print(1-SSIMLoss(spatial_dims=2)(x,y))
|
| 115 |
+
|
| 116 |
+
# pseudo-3D data
|
| 117 |
+
x = torch.ones([1,5,10,10])/2 # 5 could represent number of slices
|
| 118 |
+
y = torch.ones([1,5,10,10])/2
|
| 119 |
+
print(1-SSIMLoss(spatial_dims=2)(x,y))
|
| 120 |
+
|
| 121 |
+
# 3D data
|
| 122 |
+
x = torch.ones([1,1,10,10,10])/2
|
| 123 |
+
y = torch.ones([1,1,10,10,10])/2
|
| 124 |
+
print(1-SSIMLoss(spatial_dims=3)(x,y))
|
| 125 |
+
"""
|
| 126 |
+
ssim_value = self.ssim_metric._compute_tensor(input, target).view(-1, 1)
|
| 127 |
+
loss: torch.Tensor = 1 - ssim_value
|
| 128 |
+
|
| 129 |
+
if self.reduction == LossReduction.MEAN.value:
|
| 130 |
+
loss = torch.mean(loss) # the batch average
|
| 131 |
+
elif self.reduction == LossReduction.SUM.value:
|
| 132 |
+
loss = torch.sum(loss) # sum over the batch
|
| 133 |
+
|
| 134 |
+
return loss
|
source_code/SegMamba/monai/losses/sure_loss.py
ADDED
|
@@ -0,0 +1,200 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) MONAI Consortium
|
| 2 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 3 |
+
# you may not use this file except in compliance with the License.
|
| 4 |
+
# You may obtain a copy of the License at
|
| 5 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 6 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 7 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 8 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 9 |
+
# See the License for the specific language governing permissions and
|
| 10 |
+
# limitations under the License.
|
| 11 |
+
|
| 12 |
+
from __future__ import annotations
|
| 13 |
+
|
| 14 |
+
from typing import Callable, Optional
|
| 15 |
+
|
| 16 |
+
import torch
|
| 17 |
+
import torch.nn as nn
|
| 18 |
+
from torch.nn.modules.loss import _Loss
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
def complex_diff_abs_loss(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
|
| 22 |
+
"""
|
| 23 |
+
First compute the difference in the complex domain,
|
| 24 |
+
then get the absolute value and take the mse
|
| 25 |
+
|
| 26 |
+
Args:
|
| 27 |
+
x, y - B, 2, H, W real valued tensors representing complex numbers
|
| 28 |
+
or B,1,H,W complex valued tensors
|
| 29 |
+
Returns:
|
| 30 |
+
l2_loss - scalar
|
| 31 |
+
"""
|
| 32 |
+
if not x.is_complex():
|
| 33 |
+
x = torch.view_as_complex(x.permute(0, 2, 3, 1).contiguous())
|
| 34 |
+
if not y.is_complex():
|
| 35 |
+
y = torch.view_as_complex(y.permute(0, 2, 3, 1).contiguous())
|
| 36 |
+
|
| 37 |
+
diff = torch.abs(x - y)
|
| 38 |
+
return nn.functional.mse_loss(diff, torch.zeros_like(diff), reduction="mean")
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
def sure_loss_function(
|
| 42 |
+
operator: Callable,
|
| 43 |
+
x: torch.Tensor,
|
| 44 |
+
y_pseudo_gt: torch.Tensor,
|
| 45 |
+
y_ref: Optional[torch.Tensor] = None,
|
| 46 |
+
eps: Optional[float] = -1.0,
|
| 47 |
+
perturb_noise: Optional[torch.Tensor] = None,
|
| 48 |
+
complex_input: Optional[bool] = False,
|
| 49 |
+
) -> torch.Tensor:
|
| 50 |
+
"""
|
| 51 |
+
Args:
|
| 52 |
+
operator (function): The operator function that takes in an input
|
| 53 |
+
tensor x and returns an output tensor y. We will use this to compute
|
| 54 |
+
the divergence. More specifically, we will perturb the input x by a
|
| 55 |
+
small amount and compute the divergence between the perturbed output
|
| 56 |
+
and the reference output
|
| 57 |
+
|
| 58 |
+
x (torch.Tensor): The input tensor of shape (B, C, H, W) to the
|
| 59 |
+
operator. For complex input, the shape is (B, 2, H, W) aka C=2 real.
|
| 60 |
+
For real input, the shape is (B, 1, H, W) real.
|
| 61 |
+
|
| 62 |
+
y_pseudo_gt (torch.Tensor): The pseudo ground truth tensor of shape
|
| 63 |
+
(B, C, H, W) used to compute the L2 loss. For complex input, the shape is
|
| 64 |
+
(B, 2, H, W) aka C=2 real. For real input, the shape is (B, 1, H, W)
|
| 65 |
+
real.
|
| 66 |
+
|
| 67 |
+
y_ref (torch.Tensor, optional): The reference output tensor of shape
|
| 68 |
+
(B, C, H, W) used to compute the divergence. Defaults to None. For
|
| 69 |
+
complex input, the shape is (B, 2, H, W) aka C=2 real. For real input,
|
| 70 |
+
the shape is (B, 1, H, W) real.
|
| 71 |
+
|
| 72 |
+
eps (float, optional): The perturbation scalar. Set to -1 to set it
|
| 73 |
+
automatically estimated based on y_pseudo_gtk
|
| 74 |
+
|
| 75 |
+
perturb_noise (torch.Tensor, optional): The noise vector of shape (B, C, H, W).
|
| 76 |
+
Defaults to None. For complex input, the shape is (B, 2, H, W) aka C=2 real.
|
| 77 |
+
For real input, the shape is (B, 1, H, W) real.
|
| 78 |
+
|
| 79 |
+
complex_input(bool, optional): Whether the input is complex or not.
|
| 80 |
+
Defaults to False.
|
| 81 |
+
|
| 82 |
+
Returns:
|
| 83 |
+
sure_loss (torch.Tensor): The SURE loss scalar.
|
| 84 |
+
"""
|
| 85 |
+
# perturb input
|
| 86 |
+
if perturb_noise is None:
|
| 87 |
+
perturb_noise = torch.randn_like(x)
|
| 88 |
+
if eps == -1.0:
|
| 89 |
+
eps = float(torch.abs(y_pseudo_gt.max())) / 1000
|
| 90 |
+
# get y_ref if not provided
|
| 91 |
+
if y_ref is None:
|
| 92 |
+
y_ref = operator(x)
|
| 93 |
+
|
| 94 |
+
# get perturbed output
|
| 95 |
+
x_perturbed = x + eps * perturb_noise
|
| 96 |
+
y_perturbed = operator(x_perturbed)
|
| 97 |
+
# divergence
|
| 98 |
+
divergence = torch.sum(1.0 / eps * torch.matmul(perturb_noise.permute(0, 1, 3, 2), y_perturbed - y_ref)) # type: ignore
|
| 99 |
+
# l2 loss between y_ref, y_pseudo_gt
|
| 100 |
+
if complex_input:
|
| 101 |
+
l2_loss = complex_diff_abs_loss(y_ref, y_pseudo_gt)
|
| 102 |
+
else:
|
| 103 |
+
# real input
|
| 104 |
+
l2_loss = nn.functional.mse_loss(y_ref, y_pseudo_gt, reduction="mean")
|
| 105 |
+
|
| 106 |
+
# sure loss
|
| 107 |
+
sure_loss = l2_loss * divergence / (x.shape[0] * x.shape[2] * x.shape[3])
|
| 108 |
+
return sure_loss
|
| 109 |
+
|
| 110 |
+
|
| 111 |
+
class SURELoss(_Loss):
|
| 112 |
+
"""
|
| 113 |
+
Calculate the Stein's Unbiased Risk Estimator (SURE) loss for a given operator.
|
| 114 |
+
|
| 115 |
+
This is a differentiable loss function that can be used to train/guide an
|
| 116 |
+
operator (e.g. neural network), where the pseudo ground truth is available
|
| 117 |
+
but the reference ground truth is not. For example, in the MRI
|
| 118 |
+
reconstruction, the pseudo ground truth is the zero-filled reconstruction
|
| 119 |
+
and the reference ground truth is the fully sampled reconstruction. Often,
|
| 120 |
+
the reference ground truth is not available due to the lack of fully sampled
|
| 121 |
+
data.
|
| 122 |
+
|
| 123 |
+
The original SURE loss is proposed in [1]. The SURE loss used for guiding
|
| 124 |
+
the diffusion model based MRI reconstruction is proposed in [2].
|
| 125 |
+
|
| 126 |
+
Reference
|
| 127 |
+
|
| 128 |
+
[1] Stein, C.M.: Estimation of the mean of a multivariate normal distribution. Annals of Statistics
|
| 129 |
+
|
| 130 |
+
[2] B. Ozturkler et al. SMRD: SURE-based Robust MRI Reconstruction with Diffusion Models.
|
| 131 |
+
(https://arxiv.org/pdf/2310.01799.pdf)
|
| 132 |
+
"""
|
| 133 |
+
|
| 134 |
+
def __init__(self, perturb_noise: Optional[torch.Tensor] = None, eps: Optional[float] = None) -> None:
|
| 135 |
+
"""
|
| 136 |
+
Args:
|
| 137 |
+
perturb_noise (torch.Tensor, optional): The noise vector of shape
|
| 138 |
+
(B, C, H, W). Defaults to None. For complex input, the shape is (B, 2, H, W) aka C=2 real.
|
| 139 |
+
For real input, the shape is (B, 1, H, W) real.
|
| 140 |
+
|
| 141 |
+
eps (float, optional): The perturbation scalar. Defaults to None.
|
| 142 |
+
"""
|
| 143 |
+
super().__init__()
|
| 144 |
+
self.perturb_noise = perturb_noise
|
| 145 |
+
self.eps = eps
|
| 146 |
+
|
| 147 |
+
def forward(
|
| 148 |
+
self,
|
| 149 |
+
operator: Callable,
|
| 150 |
+
x: torch.Tensor,
|
| 151 |
+
y_pseudo_gt: torch.Tensor,
|
| 152 |
+
y_ref: Optional[torch.Tensor] = None,
|
| 153 |
+
complex_input: Optional[bool] = False,
|
| 154 |
+
) -> torch.Tensor:
|
| 155 |
+
"""
|
| 156 |
+
Args:
|
| 157 |
+
operator (function): The operator function that takes in an input
|
| 158 |
+
tensor x and returns an output tensor y. We will use this to compute
|
| 159 |
+
the divergence. More specifically, we will perturb the input x by a
|
| 160 |
+
small amount and compute the divergence between the perturbed output
|
| 161 |
+
and the reference output
|
| 162 |
+
|
| 163 |
+
x (torch.Tensor): The input tensor of shape (B, C, H, W) to the
|
| 164 |
+
operator. C=1 or 2: For complex input, the shape is (B, 2, H, W) aka
|
| 165 |
+
C=2 real. For real input, the shape is (B, 1, H, W) real.
|
| 166 |
+
|
| 167 |
+
y_pseudo_gt (torch.Tensor): The pseudo ground truth tensor of shape
|
| 168 |
+
(B, C, H, W) used to compute the L2 loss. C=1 or 2: For complex
|
| 169 |
+
input, the shape is (B, 2, H, W) aka C=2 real. For real input, the
|
| 170 |
+
shape is (B, 1, H, W) real.
|
| 171 |
+
|
| 172 |
+
y_ref (torch.Tensor, optional): The reference output tensor of the
|
| 173 |
+
same shape as y_pseudo_gt
|
| 174 |
+
|
| 175 |
+
Returns:
|
| 176 |
+
sure_loss (torch.Tensor): The SURE loss scalar.
|
| 177 |
+
"""
|
| 178 |
+
|
| 179 |
+
# check inputs shapes
|
| 180 |
+
if x.dim() != 4:
|
| 181 |
+
raise ValueError(f"Input tensor x should be 4D, got {x.dim()}.")
|
| 182 |
+
if y_pseudo_gt.dim() != 4:
|
| 183 |
+
raise ValueError(f"Input tensor y_pseudo_gt should be 4D, but got {y_pseudo_gt.dim()}.")
|
| 184 |
+
if y_ref is not None and y_ref.dim() != 4:
|
| 185 |
+
raise ValueError(f"Input tensor y_ref should be 4D, but got {y_ref.dim()}.")
|
| 186 |
+
if x.shape != y_pseudo_gt.shape:
|
| 187 |
+
raise ValueError(
|
| 188 |
+
f"Input tensor x and y_pseudo_gt should have the same shape, but got x shape {x.shape}, "
|
| 189 |
+
f"y_pseudo_gt shape {y_pseudo_gt.shape}."
|
| 190 |
+
)
|
| 191 |
+
if y_ref is not None and y_pseudo_gt.shape != y_ref.shape:
|
| 192 |
+
raise ValueError(
|
| 193 |
+
f"Input tensor y_pseudo_gt and y_ref should have the same shape, but got y_pseudo_gt shape {y_pseudo_gt.shape}, "
|
| 194 |
+
f"y_ref shape {y_ref.shape}."
|
| 195 |
+
)
|
| 196 |
+
|
| 197 |
+
# compute loss
|
| 198 |
+
loss = sure_loss_function(operator, x, y_pseudo_gt, y_ref, self.eps, self.perturb_noise, complex_input)
|
| 199 |
+
|
| 200 |
+
return loss
|
source_code/SegMamba/monai/metrics/confusion_matrix.py
ADDED
|
@@ -0,0 +1,322 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) MONAI Consortium
|
| 2 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 3 |
+
# you may not use this file except in compliance with the License.
|
| 4 |
+
# You may obtain a copy of the License at
|
| 5 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 6 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 7 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 8 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 9 |
+
# See the License for the specific language governing permissions and
|
| 10 |
+
# limitations under the License.
|
| 11 |
+
|
| 12 |
+
from __future__ import annotations
|
| 13 |
+
|
| 14 |
+
import warnings
|
| 15 |
+
from collections.abc import Sequence
|
| 16 |
+
|
| 17 |
+
import torch
|
| 18 |
+
|
| 19 |
+
from monai.metrics.utils import do_metric_reduction, ignore_background
|
| 20 |
+
from monai.utils import MetricReduction, ensure_tuple
|
| 21 |
+
|
| 22 |
+
from .metric import CumulativeIterationMetric
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
class ConfusionMatrixMetric(CumulativeIterationMetric):
|
| 26 |
+
"""
|
| 27 |
+
Compute confusion matrix related metrics. This function supports to calculate all metrics mentioned in:
|
| 28 |
+
`Confusion matrix <https://en.wikipedia.org/wiki/Confusion_matrix>`_.
|
| 29 |
+
It can support both multi-classes and multi-labels classification and segmentation tasks.
|
| 30 |
+
`y_preds` is expected to have binarized predictions and `y` should be in one-hot format. You can use suitable transforms
|
| 31 |
+
in ``monai.transforms.post`` first to achieve binarized values.
|
| 32 |
+
The `include_background` parameter can be set to ``False`` for an instance to exclude
|
| 33 |
+
the first category (channel index 0) which is by convention assumed to be background. If the non-background
|
| 34 |
+
segmentations are small compared to the total image size they can get overwhelmed by the signal from the
|
| 35 |
+
background.
|
| 36 |
+
|
| 37 |
+
Example of the typical execution steps of this metric class follows :py:class:`monai.metrics.metric.Cumulative`.
|
| 38 |
+
|
| 39 |
+
Args:
|
| 40 |
+
include_background: whether to include metric computation on the first channel of
|
| 41 |
+
the predicted output. Defaults to True.
|
| 42 |
+
metric_name: [``"sensitivity"``, ``"specificity"``, ``"precision"``, ``"negative predictive value"``,
|
| 43 |
+
``"miss rate"``, ``"fall out"``, ``"false discovery rate"``, ``"false omission rate"``,
|
| 44 |
+
``"prevalence threshold"``, ``"threat score"``, ``"accuracy"``, ``"balanced accuracy"``,
|
| 45 |
+
``"f1 score"``, ``"matthews correlation coefficient"``, ``"fowlkes mallows index"``,
|
| 46 |
+
``"informedness"``, ``"markedness"``]
|
| 47 |
+
Some of the metrics have multiple aliases (as shown in the wikipedia page aforementioned),
|
| 48 |
+
and you can also input those names instead.
|
| 49 |
+
Except for input only one metric, multiple metrics are also supported via input a sequence of metric names, such as
|
| 50 |
+
("sensitivity", "precision", "recall"), if ``compute_sample`` is ``True``, multiple ``f`` and ``not_nans`` will be
|
| 51 |
+
returned with the same order as input names when calling the class.
|
| 52 |
+
compute_sample: when reducing, if ``True``, each sample's metric will be computed based on each confusion matrix first.
|
| 53 |
+
if ``False``, compute reduction on the confusion matrices first, defaults to ``False``.
|
| 54 |
+
reduction: define mode of reduction to the metrics, will only apply reduction on `not-nan` values,
|
| 55 |
+
available reduction modes: {``"none"``, ``"mean"``, ``"sum"``, ``"mean_batch"``, ``"sum_batch"``,
|
| 56 |
+
``"mean_channel"``, ``"sum_channel"``}, default to ``"mean"``. if "none", will not do reduction.
|
| 57 |
+
get_not_nans: whether to return the `not_nans` count, if True, aggregate() returns [(metric, not_nans), ...]. If False,
|
| 58 |
+
aggregate() returns [metric, ...].
|
| 59 |
+
Here `not_nans` count the number of not nans for True Positive, False Positive, True Negative and False Negative.
|
| 60 |
+
Its shape depends on the shape of the metric, and it has one more dimension with size 4. For example, if the shape
|
| 61 |
+
of the metric is [3, 3], `not_nans` has the shape [3, 3, 4].
|
| 62 |
+
|
| 63 |
+
"""
|
| 64 |
+
|
| 65 |
+
def __init__(
|
| 66 |
+
self,
|
| 67 |
+
include_background: bool = True,
|
| 68 |
+
metric_name: Sequence[str] | str = "hit_rate",
|
| 69 |
+
compute_sample: bool = False,
|
| 70 |
+
reduction: MetricReduction | str = MetricReduction.MEAN,
|
| 71 |
+
get_not_nans: bool = False,
|
| 72 |
+
) -> None:
|
| 73 |
+
super().__init__()
|
| 74 |
+
self.include_background = include_background
|
| 75 |
+
self.metric_name = ensure_tuple(metric_name)
|
| 76 |
+
self.compute_sample = compute_sample
|
| 77 |
+
self.reduction = reduction
|
| 78 |
+
self.get_not_nans = get_not_nans
|
| 79 |
+
|
| 80 |
+
def _compute_tensor(self, y_pred: torch.Tensor, y: torch.Tensor) -> torch.Tensor: # type: ignore[override]
|
| 81 |
+
"""
|
| 82 |
+
Args:
|
| 83 |
+
y_pred: input data to compute. It must be one-hot format and first dim is batch.
|
| 84 |
+
The values should be binarized.
|
| 85 |
+
y: ground truth to compute the metric. It must be one-hot format and first dim is batch.
|
| 86 |
+
The values should be binarized.
|
| 87 |
+
Raises:
|
| 88 |
+
ValueError: when `y_pred` has less than two dimensions.
|
| 89 |
+
"""
|
| 90 |
+
# check dimension
|
| 91 |
+
dims = y_pred.ndimension()
|
| 92 |
+
if dims < 2:
|
| 93 |
+
raise ValueError("y_pred should have at least two dimensions.")
|
| 94 |
+
if dims == 2 or (dims == 3 and y_pred.shape[-1] == 1):
|
| 95 |
+
if self.compute_sample:
|
| 96 |
+
warnings.warn("As for classification task, compute_sample should be False.")
|
| 97 |
+
self.compute_sample = False
|
| 98 |
+
|
| 99 |
+
return get_confusion_matrix(y_pred=y_pred, y=y, include_background=self.include_background)
|
| 100 |
+
|
| 101 |
+
def aggregate(
|
| 102 |
+
self, compute_sample: bool = False, reduction: MetricReduction | str | None = None
|
| 103 |
+
) -> list[torch.Tensor | tuple[torch.Tensor, torch.Tensor]]:
|
| 104 |
+
"""
|
| 105 |
+
Execute reduction for the confusion matrix values.
|
| 106 |
+
|
| 107 |
+
Args:
|
| 108 |
+
compute_sample: when reducing, if ``True``, each sample's metric will be computed based on each confusion matrix first.
|
| 109 |
+
if ``False``, compute reduction on the confusion matrices first, defaults to ``False``.
|
| 110 |
+
reduction: define mode of reduction to the metrics, will only apply reduction on `not-nan` values,
|
| 111 |
+
available reduction modes: {``"none"``, ``"mean"``, ``"sum"``, ``"mean_batch"``, ``"sum_batch"``,
|
| 112 |
+
``"mean_channel"``, ``"sum_channel"``}, default to `self.reduction`. if "none", will not do reduction.
|
| 113 |
+
|
| 114 |
+
"""
|
| 115 |
+
data = self.get_buffer()
|
| 116 |
+
if not isinstance(data, torch.Tensor):
|
| 117 |
+
raise ValueError("the data to aggregate must be PyTorch Tensor.")
|
| 118 |
+
|
| 119 |
+
results: list[torch.Tensor | tuple[torch.Tensor, torch.Tensor]] = []
|
| 120 |
+
for metric_name in self.metric_name:
|
| 121 |
+
if compute_sample or self.compute_sample:
|
| 122 |
+
sub_confusion_matrix = compute_confusion_matrix_metric(metric_name, data)
|
| 123 |
+
f, not_nans = do_metric_reduction(sub_confusion_matrix, reduction or self.reduction)
|
| 124 |
+
else:
|
| 125 |
+
f, not_nans = do_metric_reduction(data, reduction or self.reduction)
|
| 126 |
+
f = compute_confusion_matrix_metric(metric_name, f)
|
| 127 |
+
if self.get_not_nans:
|
| 128 |
+
results.append((f, not_nans))
|
| 129 |
+
else:
|
| 130 |
+
results.append(f)
|
| 131 |
+
return results
|
| 132 |
+
|
| 133 |
+
|
| 134 |
+
def get_confusion_matrix(y_pred: torch.Tensor, y: torch.Tensor, include_background: bool = True) -> torch.Tensor:
|
| 135 |
+
"""
|
| 136 |
+
Compute confusion matrix. A tensor with the shape [BC4] will be returned. Where, the third dimension
|
| 137 |
+
represents the number of true positive, false positive, true negative and false negative values for
|
| 138 |
+
each channel of each sample within the input batch. Where, B equals to the batch size and C equals to
|
| 139 |
+
the number of classes that need to be computed.
|
| 140 |
+
|
| 141 |
+
Args:
|
| 142 |
+
y_pred: input data to compute. It must be one-hot format and first dim is batch.
|
| 143 |
+
The values should be binarized.
|
| 144 |
+
y: ground truth to compute the metric. It must be one-hot format and first dim is batch.
|
| 145 |
+
The values should be binarized.
|
| 146 |
+
include_background: whether to include metric computation on the first channel of
|
| 147 |
+
the predicted output. Defaults to True.
|
| 148 |
+
|
| 149 |
+
Raises:
|
| 150 |
+
ValueError: when `y_pred` and `y` have different shapes.
|
| 151 |
+
"""
|
| 152 |
+
|
| 153 |
+
if not include_background:
|
| 154 |
+
y_pred, y = ignore_background(y_pred=y_pred, y=y)
|
| 155 |
+
|
| 156 |
+
if y.shape != y_pred.shape:
|
| 157 |
+
raise ValueError(f"y_pred and y should have same shapes, got {y_pred.shape} and {y.shape}.")
|
| 158 |
+
|
| 159 |
+
# get confusion matrix related metric
|
| 160 |
+
batch_size, n_class = y_pred.shape[:2]
|
| 161 |
+
# convert to [BNS], where S is the number of pixels for one sample.
|
| 162 |
+
# As for classification tasks, S equals to 1.
|
| 163 |
+
y_pred = y_pred.reshape(batch_size, n_class, -1)
|
| 164 |
+
y = y.reshape(batch_size, n_class, -1)
|
| 165 |
+
tp = (y_pred + y) == 2
|
| 166 |
+
tn = (y_pred + y) == 0
|
| 167 |
+
|
| 168 |
+
tp = tp.sum(dim=[2]).float()
|
| 169 |
+
tn = tn.sum(dim=[2]).float()
|
| 170 |
+
p = y.sum(dim=[2]).float()
|
| 171 |
+
n = y.shape[-1] - p
|
| 172 |
+
|
| 173 |
+
fn = p - tp
|
| 174 |
+
fp = n - tn
|
| 175 |
+
|
| 176 |
+
return torch.stack([tp, fp, tn, fn], dim=-1)
|
| 177 |
+
|
| 178 |
+
|
| 179 |
+
def compute_confusion_matrix_metric(metric_name: str, confusion_matrix: torch.Tensor) -> torch.Tensor:
|
| 180 |
+
"""
|
| 181 |
+
This function is used to compute confusion matrix related metric.
|
| 182 |
+
|
| 183 |
+
Args:
|
| 184 |
+
metric_name: [``"sensitivity"``, ``"specificity"``, ``"precision"``, ``"negative predictive value"``,
|
| 185 |
+
``"miss rate"``, ``"fall out"``, ``"false discovery rate"``, ``"false omission rate"``,
|
| 186 |
+
``"prevalence threshold"``, ``"threat score"``, ``"accuracy"``, ``"balanced accuracy"``,
|
| 187 |
+
``"f1 score"``, ``"matthews correlation coefficient"``, ``"fowlkes mallows index"``,
|
| 188 |
+
``"informedness"``, ``"markedness"``]
|
| 189 |
+
Some of the metrics have multiple aliases (as shown in the wikipedia page aforementioned),
|
| 190 |
+
and you can also input those names instead.
|
| 191 |
+
confusion_matrix: Please see the doc string of the function ``get_confusion_matrix`` for more details.
|
| 192 |
+
|
| 193 |
+
Raises:
|
| 194 |
+
ValueError: when the size of the last dimension of confusion_matrix is not 4.
|
| 195 |
+
NotImplementedError: when specify a not implemented metric_name.
|
| 196 |
+
|
| 197 |
+
"""
|
| 198 |
+
|
| 199 |
+
metric = check_confusion_matrix_metric_name(metric_name)
|
| 200 |
+
|
| 201 |
+
input_dim = confusion_matrix.ndimension()
|
| 202 |
+
if input_dim == 1:
|
| 203 |
+
confusion_matrix = confusion_matrix.unsqueeze(dim=0)
|
| 204 |
+
if confusion_matrix.shape[-1] != 4:
|
| 205 |
+
raise ValueError("the size of the last dimension of confusion_matrix should be 4.")
|
| 206 |
+
|
| 207 |
+
tp = confusion_matrix[..., 0]
|
| 208 |
+
fp = confusion_matrix[..., 1]
|
| 209 |
+
tn = confusion_matrix[..., 2]
|
| 210 |
+
fn = confusion_matrix[..., 3]
|
| 211 |
+
p = tp + fn
|
| 212 |
+
n = fp + tn
|
| 213 |
+
# calculate metric
|
| 214 |
+
numerator: torch.Tensor
|
| 215 |
+
denominator: torch.Tensor | float
|
| 216 |
+
nan_tensor = torch.tensor(float("nan"), device=confusion_matrix.device)
|
| 217 |
+
if metric == "tpr":
|
| 218 |
+
numerator, denominator = tp, p
|
| 219 |
+
elif metric == "tnr":
|
| 220 |
+
numerator, denominator = tn, n
|
| 221 |
+
elif metric == "ppv":
|
| 222 |
+
numerator, denominator = tp, (tp + fp)
|
| 223 |
+
elif metric == "npv":
|
| 224 |
+
numerator, denominator = tn, (tn + fn)
|
| 225 |
+
elif metric == "fnr":
|
| 226 |
+
numerator, denominator = fn, p
|
| 227 |
+
elif metric == "fpr":
|
| 228 |
+
numerator, denominator = fp, n
|
| 229 |
+
elif metric == "fdr":
|
| 230 |
+
numerator, denominator = fp, (fp + tp)
|
| 231 |
+
elif metric == "for":
|
| 232 |
+
numerator, denominator = fn, (fn + tn)
|
| 233 |
+
elif metric == "pt":
|
| 234 |
+
tpr = torch.where(p > 0, tp / p, nan_tensor)
|
| 235 |
+
tnr = torch.where(n > 0, tn / n, nan_tensor)
|
| 236 |
+
numerator = torch.sqrt(tpr * (1.0 - tnr)) + tnr - 1.0
|
| 237 |
+
denominator = tpr + tnr - 1.0
|
| 238 |
+
elif metric == "ts":
|
| 239 |
+
numerator, denominator = tp, (tp + fn + fp)
|
| 240 |
+
elif metric == "acc":
|
| 241 |
+
numerator, denominator = (tp + tn), (p + n)
|
| 242 |
+
elif metric == "ba":
|
| 243 |
+
tpr = torch.where(p > 0, tp / p, nan_tensor)
|
| 244 |
+
tnr = torch.where(n > 0, tn / n, nan_tensor)
|
| 245 |
+
numerator, denominator = (tpr + tnr), 2.0
|
| 246 |
+
elif metric == "f1":
|
| 247 |
+
numerator, denominator = tp * 2.0, (tp * 2.0 + fn + fp)
|
| 248 |
+
elif metric == "mcc":
|
| 249 |
+
numerator = tp * tn - fp * fn
|
| 250 |
+
denominator = torch.sqrt((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn))
|
| 251 |
+
elif metric == "fm":
|
| 252 |
+
tpr = torch.where(p > 0, tp / p, nan_tensor)
|
| 253 |
+
ppv = torch.where((tp + fp) > 0, tp / (tp + fp), nan_tensor)
|
| 254 |
+
numerator = torch.sqrt(ppv * tpr)
|
| 255 |
+
denominator = 1.0
|
| 256 |
+
elif metric == "bm":
|
| 257 |
+
tpr = torch.where(p > 0, tp / p, nan_tensor)
|
| 258 |
+
tnr = torch.where(n > 0, tn / n, nan_tensor)
|
| 259 |
+
numerator = tpr + tnr - 1.0
|
| 260 |
+
denominator = 1.0
|
| 261 |
+
elif metric == "mk":
|
| 262 |
+
ppv = torch.where((tp + fp) > 0, tp / (tp + fp), nan_tensor)
|
| 263 |
+
npv = torch.where((tn + fn) > 0, tn / (tn + fn), nan_tensor)
|
| 264 |
+
numerator = ppv + npv - 1.0
|
| 265 |
+
denominator = 1.0
|
| 266 |
+
else:
|
| 267 |
+
raise NotImplementedError("the metric is not implemented.")
|
| 268 |
+
|
| 269 |
+
if isinstance(denominator, torch.Tensor):
|
| 270 |
+
return torch.where(denominator != 0, numerator / denominator, nan_tensor)
|
| 271 |
+
return numerator / denominator
|
| 272 |
+
|
| 273 |
+
|
| 274 |
+
def check_confusion_matrix_metric_name(metric_name: str) -> str:
|
| 275 |
+
"""
|
| 276 |
+
There are many metrics related to confusion matrix, and some of the metrics have
|
| 277 |
+
more than one names. In addition, some of the names are very long.
|
| 278 |
+
Therefore, this function is used to check and simplify the name.
|
| 279 |
+
|
| 280 |
+
Returns:
|
| 281 |
+
Simplified metric name.
|
| 282 |
+
|
| 283 |
+
Raises:
|
| 284 |
+
NotImplementedError: when the metric is not implemented.
|
| 285 |
+
"""
|
| 286 |
+
metric_name = metric_name.replace(" ", "_")
|
| 287 |
+
metric_name = metric_name.lower()
|
| 288 |
+
if metric_name in ["sensitivity", "recall", "hit_rate", "true_positive_rate", "tpr"]:
|
| 289 |
+
return "tpr"
|
| 290 |
+
if metric_name in ["specificity", "selectivity", "true_negative_rate", "tnr"]:
|
| 291 |
+
return "tnr"
|
| 292 |
+
if metric_name in ["precision", "positive_predictive_value", "ppv"]:
|
| 293 |
+
return "ppv"
|
| 294 |
+
if metric_name in ["negative_predictive_value", "npv"]:
|
| 295 |
+
return "npv"
|
| 296 |
+
if metric_name in ["miss_rate", "false_negative_rate", "fnr"]:
|
| 297 |
+
return "fnr"
|
| 298 |
+
if metric_name in ["fall_out", "false_positive_rate", "fpr"]:
|
| 299 |
+
return "fpr"
|
| 300 |
+
if metric_name in ["false_discovery_rate", "fdr"]:
|
| 301 |
+
return "fdr"
|
| 302 |
+
if metric_name in ["false_omission_rate", "for"]:
|
| 303 |
+
return "for"
|
| 304 |
+
if metric_name in ["prevalence_threshold", "pt"]:
|
| 305 |
+
return "pt"
|
| 306 |
+
if metric_name in ["threat_score", "critical_success_index", "ts", "csi"]:
|
| 307 |
+
return "ts"
|
| 308 |
+
if metric_name in ["accuracy", "acc"]:
|
| 309 |
+
return "acc"
|
| 310 |
+
if metric_name in ["balanced_accuracy", "ba"]:
|
| 311 |
+
return "ba"
|
| 312 |
+
if metric_name in ["f1_score", "f1"]:
|
| 313 |
+
return "f1"
|
| 314 |
+
if metric_name in ["matthews_correlation_coefficient", "mcc"]:
|
| 315 |
+
return "mcc"
|
| 316 |
+
if metric_name in ["fowlkes_mallows_index", "fm"]:
|
| 317 |
+
return "fm"
|
| 318 |
+
if metric_name in ["informedness", "bookmaker_informedness", "bm", "youden_index", "youden"]:
|
| 319 |
+
return "bm"
|
| 320 |
+
if metric_name in ["markedness", "deltap", "mk"]:
|
| 321 |
+
return "mk"
|
| 322 |
+
raise NotImplementedError("the metric is not implemented.")
|
source_code/SegMamba/monai/metrics/cumulative_average.py
ADDED
|
@@ -0,0 +1,160 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) MONAI Consortium
|
| 2 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 3 |
+
# you may not use this file except in compliance with the License.
|
| 4 |
+
# You may obtain a copy of the License at
|
| 5 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 6 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 7 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 8 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 9 |
+
# See the License for the specific language governing permissions and
|
| 10 |
+
# limitations under the License.
|
| 11 |
+
|
| 12 |
+
from __future__ import annotations
|
| 13 |
+
|
| 14 |
+
import warnings
|
| 15 |
+
from typing import Any
|
| 16 |
+
|
| 17 |
+
import torch
|
| 18 |
+
import torch.distributed as dist
|
| 19 |
+
|
| 20 |
+
from monai.config import NdarrayOrTensor
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
class CumulativeAverage:
|
| 24 |
+
"""
|
| 25 |
+
A utility class to keep track of average values. For example during training/validation loop,
|
| 26 |
+
we need to accumulate the per-batch metrics and calculate the final average value for the whole dataset.
|
| 27 |
+
When training in multi-gpu environment, with DistributedDataParallel, it will average across the processes.
|
| 28 |
+
|
| 29 |
+
Example:
|
| 30 |
+
|
| 31 |
+
.. code-block:: python
|
| 32 |
+
|
| 33 |
+
from monai.metrics import CumulativeAverage
|
| 34 |
+
|
| 35 |
+
run_avg = CumulativeAverage()
|
| 36 |
+
batch_size = 8
|
| 37 |
+
for i in range(len(train_set)):
|
| 38 |
+
...
|
| 39 |
+
val = calc_metric(x,y) #some metric value
|
| 40 |
+
run_avg.append(val, count=batch_size)
|
| 41 |
+
|
| 42 |
+
val_avg = run_avg.aggregate() #average value
|
| 43 |
+
|
| 44 |
+
"""
|
| 45 |
+
|
| 46 |
+
def __init__(self) -> None:
|
| 47 |
+
self.reset()
|
| 48 |
+
|
| 49 |
+
def reset(self) -> None:
|
| 50 |
+
"""
|
| 51 |
+
Reset all stats
|
| 52 |
+
"""
|
| 53 |
+
self.val: torch.Tensor = None # type: ignore
|
| 54 |
+
self.sum = torch.tensor(0, dtype=torch.float)
|
| 55 |
+
self.count = torch.tensor(0, dtype=torch.float)
|
| 56 |
+
self.is_distributed = dist.is_available() and dist.is_initialized()
|
| 57 |
+
|
| 58 |
+
def get_current(self, to_numpy: bool = True) -> NdarrayOrTensor:
|
| 59 |
+
"""
|
| 60 |
+
returns the most recent value (averaged across processes)
|
| 61 |
+
|
| 62 |
+
Args:
|
| 63 |
+
to_numpy: whether to convert to numpy array. Defaults to True
|
| 64 |
+
"""
|
| 65 |
+
if self.val is None:
|
| 66 |
+
return 0
|
| 67 |
+
|
| 68 |
+
val = self.val.clone()
|
| 69 |
+
val[~torch.isfinite(val)] = 0
|
| 70 |
+
|
| 71 |
+
if self.is_distributed:
|
| 72 |
+
val = val / dist.get_world_size()
|
| 73 |
+
dist.all_reduce(val)
|
| 74 |
+
|
| 75 |
+
if to_numpy:
|
| 76 |
+
val = val.cpu().numpy()
|
| 77 |
+
|
| 78 |
+
return val
|
| 79 |
+
|
| 80 |
+
def aggregate(self, to_numpy: bool = True) -> NdarrayOrTensor:
|
| 81 |
+
"""
|
| 82 |
+
returns the total average value (averaged across processes)
|
| 83 |
+
|
| 84 |
+
Args:
|
| 85 |
+
to_numpy: whether to convert to numpy array. Defaults to True
|
| 86 |
+
"""
|
| 87 |
+
if self.val is None:
|
| 88 |
+
return 0
|
| 89 |
+
|
| 90 |
+
sum = self.sum
|
| 91 |
+
count = self.count
|
| 92 |
+
|
| 93 |
+
if self.is_distributed:
|
| 94 |
+
sum = sum.to(self.val, copy=True)
|
| 95 |
+
count = count.to(self.val, copy=True)
|
| 96 |
+
dist.all_reduce(sum)
|
| 97 |
+
dist.all_reduce(count)
|
| 98 |
+
|
| 99 |
+
val = torch.where(count > 0, sum / count, sum)
|
| 100 |
+
|
| 101 |
+
if to_numpy:
|
| 102 |
+
val = val.cpu().numpy()
|
| 103 |
+
return val
|
| 104 |
+
|
| 105 |
+
def append(self, val: Any, count: Any | None = 1) -> None:
|
| 106 |
+
"""
|
| 107 |
+
Append with a new value, and an optional count. Any data type is supported that is convertable
|
| 108 |
+
with torch.as_tensor() e.g. number, list, numpy array, or Tensor.
|
| 109 |
+
|
| 110 |
+
Args:
|
| 111 |
+
val: value (e.g. number, list, numpy array or Tensor) to keep track of
|
| 112 |
+
count: count (e.g. number, list, numpy array or Tensor), to update the contribution count
|
| 113 |
+
|
| 114 |
+
For example:
|
| 115 |
+
# a simple constant tracking
|
| 116 |
+
avg = CumulativeAverage()
|
| 117 |
+
avg.append(0.6)
|
| 118 |
+
avg.append(0.8)
|
| 119 |
+
print(avg.aggregate()) #prints 0.7
|
| 120 |
+
|
| 121 |
+
# an array tracking, e.g. metrics from 3 classes
|
| 122 |
+
avg= CumulativeAverage()
|
| 123 |
+
avg.append([0.2, 0.4, 0.4])
|
| 124 |
+
avg.append([0.4, 0.6, 0.4])
|
| 125 |
+
print(avg.aggregate()) #prints [0.3, 0.5. 0.4]
|
| 126 |
+
|
| 127 |
+
# different contributions / counts
|
| 128 |
+
avg= CumulativeAverage()
|
| 129 |
+
avg.append(1, count=4) #avg metric 1 coming from a batch of 4
|
| 130 |
+
avg.append(2, count=6) #avg metric 2 coming from a batch of 6
|
| 131 |
+
print(avg.aggregate()) #prints 1.6 == (1*4 +2*6)/(4+6)
|
| 132 |
+
|
| 133 |
+
# different contributions / counts
|
| 134 |
+
avg= CumulativeAverage()
|
| 135 |
+
avg.append([0.5, 0.5, 0], count=[1, 1, 0]) # last elements count is zero to ignore it
|
| 136 |
+
avg.append([0.5, 0.5, 0.5], count=[1, 1, 1]) #
|
| 137 |
+
print(avg.aggregate()) #prints [0.5, 0.5, 0,5] == ([0.5, 0.5, 0] + [0.5, 0.5, 0.5]) / ([1, 1, 0] + [1, 1, 1])
|
| 138 |
+
|
| 139 |
+
"""
|
| 140 |
+
self.val = torch.as_tensor(val, dtype=torch.float)
|
| 141 |
+
if self.val.requires_grad:
|
| 142 |
+
self.val = self.val.detach().clone()
|
| 143 |
+
|
| 144 |
+
count = torch.as_tensor(count, dtype=torch.float, device="cpu")
|
| 145 |
+
if count.ndim > 0 and count.shape != self.val.shape:
|
| 146 |
+
raise ValueError(
|
| 147 |
+
f"Count shape must match val shape, unless count is a single number: {count} val {self.val.cpu()}"
|
| 148 |
+
)
|
| 149 |
+
|
| 150 |
+
val = count * self.val.cpu()
|
| 151 |
+
|
| 152 |
+
# account for possible non-finite numbers in val and replace them with 0s
|
| 153 |
+
nfin = torch.isfinite(val)
|
| 154 |
+
if not torch.all(nfin):
|
| 155 |
+
warnings.warn(f"non-finite inputs received: val: {val}, count: {count}")
|
| 156 |
+
count = torch.where(nfin, count, torch.zeros_like(count))
|
| 157 |
+
val = torch.where(nfin, val, torch.zeros_like(val))
|
| 158 |
+
|
| 159 |
+
self.count = self.count + count
|
| 160 |
+
self.sum = self.sum + val
|
source_code/SegMamba/monai/metrics/f_beta_score.py
ADDED
|
@@ -0,0 +1,107 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) MONAI Consortium
|
| 2 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 3 |
+
# you may not use this file except in compliance with the License.
|
| 4 |
+
# You may obtain a copy of the License at
|
| 5 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 6 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 7 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 8 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 9 |
+
# See the License for the specific language governing permissions and
|
| 10 |
+
# limitations under the License.
|
| 11 |
+
|
| 12 |
+
from __future__ import annotations
|
| 13 |
+
|
| 14 |
+
from collections.abc import Sequence
|
| 15 |
+
|
| 16 |
+
import torch
|
| 17 |
+
|
| 18 |
+
from monai.metrics.utils import do_metric_reduction, ignore_background
|
| 19 |
+
from monai.utils import MetricReduction
|
| 20 |
+
|
| 21 |
+
from .metric import CumulativeIterationMetric
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
class FBetaScore(CumulativeIterationMetric):
|
| 25 |
+
|
| 26 |
+
def __init__(
|
| 27 |
+
self,
|
| 28 |
+
beta: float = 1.0,
|
| 29 |
+
include_background: bool = True,
|
| 30 |
+
reduction: MetricReduction | str = MetricReduction.MEAN,
|
| 31 |
+
get_not_nans: bool = False,
|
| 32 |
+
) -> None:
|
| 33 |
+
super().__init__()
|
| 34 |
+
self.beta = beta
|
| 35 |
+
self.include_background = include_background
|
| 36 |
+
self.reduction = reduction
|
| 37 |
+
self.get_not_nans = get_not_nans
|
| 38 |
+
|
| 39 |
+
def _compute_tensor(self, y_pred: torch.Tensor, y: torch.Tensor) -> torch.Tensor: # type: ignore[override]
|
| 40 |
+
if y_pred.ndimension() < 2:
|
| 41 |
+
raise ValueError("y_pred should have at least two dimensions.")
|
| 42 |
+
|
| 43 |
+
return get_f_beta_score(y_pred=y_pred, y=y, include_background=self.include_background)
|
| 44 |
+
|
| 45 |
+
def aggregate(
|
| 46 |
+
self, compute_sample: bool = False, reduction: MetricReduction | str | None = None
|
| 47 |
+
) -> Sequence[torch.Tensor | tuple[torch.Tensor, torch.Tensor]]:
|
| 48 |
+
data = self.get_buffer()
|
| 49 |
+
if not isinstance(data, torch.Tensor):
|
| 50 |
+
raise ValueError("the data to aggregate must be PyTorch Tensor.")
|
| 51 |
+
|
| 52 |
+
results: list[torch.Tensor | tuple[torch.Tensor, torch.Tensor]] = []
|
| 53 |
+
f, not_nans = do_metric_reduction(data, reduction or self.reduction)
|
| 54 |
+
f = compute_f_beta_score(f, self.beta)
|
| 55 |
+
if self.get_not_nans:
|
| 56 |
+
results.append((f, not_nans))
|
| 57 |
+
else:
|
| 58 |
+
results.append(f)
|
| 59 |
+
|
| 60 |
+
return results
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
def get_f_beta_score(y_pred: torch.Tensor, y: torch.Tensor, include_background: bool = True) -> torch.Tensor:
|
| 64 |
+
if not include_background:
|
| 65 |
+
y_pred, y = ignore_background(y_pred=y_pred, y=y)
|
| 66 |
+
|
| 67 |
+
if y.shape != y_pred.shape:
|
| 68 |
+
raise ValueError(f"y_pred and y should have same shapes, got {y_pred.shape} and {y.shape}.")
|
| 69 |
+
|
| 70 |
+
# get confusion matrix related metric
|
| 71 |
+
batch_size, n_class = y_pred.shape[:2]
|
| 72 |
+
# convert to [BNS], where S is the number of pixels for one sample.
|
| 73 |
+
# As for classification tasks, S equals to 1.
|
| 74 |
+
y_pred = y_pred.view(batch_size, n_class, -1)
|
| 75 |
+
y = y.view(batch_size, n_class, -1)
|
| 76 |
+
tp = (y_pred + y) == 2
|
| 77 |
+
tn = (y_pred + y) == 0
|
| 78 |
+
|
| 79 |
+
tp = tp.sum(dim=[2]).float()
|
| 80 |
+
tn = tn.sum(dim=[2]).float()
|
| 81 |
+
p = y.sum(dim=[2]).float()
|
| 82 |
+
n = y.shape[-1] - p
|
| 83 |
+
|
| 84 |
+
fn = p - tp
|
| 85 |
+
fp = n - tn
|
| 86 |
+
|
| 87 |
+
return torch.stack([tp, fp, tn, fn], dim=-1)
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
def compute_f_beta_score(confusion_matrix: torch.Tensor, beta: float) -> torch.Tensor:
|
| 91 |
+
input_dim = confusion_matrix.ndimension()
|
| 92 |
+
if input_dim == 1:
|
| 93 |
+
confusion_matrix = confusion_matrix.unsqueeze(dim=0)
|
| 94 |
+
if confusion_matrix.shape[-1] != 4:
|
| 95 |
+
raise ValueError("the size of the last dimension of confusion_matrix should be 4.")
|
| 96 |
+
|
| 97 |
+
tp = confusion_matrix[..., 0]
|
| 98 |
+
fp = confusion_matrix[..., 1]
|
| 99 |
+
# tn = confusion_matrix[..., 2]
|
| 100 |
+
fn = confusion_matrix[..., 3]
|
| 101 |
+
|
| 102 |
+
nan_tensor = torch.tensor(float("nan"), device=confusion_matrix.device)
|
| 103 |
+
numerator, denominator = (1.0 + beta**2) * tp, ((1.0 + beta**2) * tp + beta**2 * fn + fp)
|
| 104 |
+
|
| 105 |
+
if isinstance(denominator, torch.Tensor):
|
| 106 |
+
return torch.where(denominator != 0, numerator / denominator, nan_tensor)
|
| 107 |
+
return numerator / denominator
|
source_code/SegMamba/monai/metrics/fid.py
ADDED
|
@@ -0,0 +1,110 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) MONAI Consortium
|
| 2 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 3 |
+
# you may not use this file except in compliance with the License.
|
| 4 |
+
# You may obtain a copy of the License at
|
| 5 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 6 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 7 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 8 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 9 |
+
# See the License for the specific language governing permissions and
|
| 10 |
+
# limitations under the License.
|
| 11 |
+
|
| 12 |
+
from __future__ import annotations
|
| 13 |
+
|
| 14 |
+
import numpy as np
|
| 15 |
+
import torch
|
| 16 |
+
|
| 17 |
+
from monai.metrics.metric import Metric
|
| 18 |
+
from monai.utils import optional_import
|
| 19 |
+
|
| 20 |
+
scipy, _ = optional_import("scipy")
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
class FIDMetric(Metric):
|
| 24 |
+
"""
|
| 25 |
+
Frechet Inception Distance (FID). The FID calculates the distance between two distributions of feature vectors.
|
| 26 |
+
Based on: Heusel M. et al. "Gans trained by a two time-scale update rule converge to a local nash equilibrium."
|
| 27 |
+
https://arxiv.org/abs/1706.08500. The inputs for this metric should be two groups of feature vectors (with format
|
| 28 |
+
(number images, number of features)) extracted from a pretrained network.
|
| 29 |
+
|
| 30 |
+
Originally, it was proposed to use the activations of the pool_3 layer of an Inception v3 pretrained with Imagenet.
|
| 31 |
+
However, others networks pretrained on medical datasets can be used as well (for example, RadImageNwt for 2D and
|
| 32 |
+
MedicalNet for 3D images). If the chosen model output is not a scalar, a global spatia average pooling should be
|
| 33 |
+
used.
|
| 34 |
+
"""
|
| 35 |
+
|
| 36 |
+
def __call__(self, y_pred: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
|
| 37 |
+
return get_fid_score(y_pred, y)
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
def get_fid_score(y_pred: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
|
| 41 |
+
"""Computes the FID score metric on a batch of feature vectors.
|
| 42 |
+
|
| 43 |
+
Args:
|
| 44 |
+
y_pred: feature vectors extracted from a pretrained network run on generated images.
|
| 45 |
+
y: feature vectors extracted from a pretrained network run on images from the real data distribution.
|
| 46 |
+
"""
|
| 47 |
+
y = y.double()
|
| 48 |
+
y_pred = y_pred.double()
|
| 49 |
+
|
| 50 |
+
if y.ndimension() > 2:
|
| 51 |
+
raise ValueError("Inputs should have (number images, number of features) shape.")
|
| 52 |
+
|
| 53 |
+
mu_y_pred = torch.mean(y_pred, dim=0)
|
| 54 |
+
sigma_y_pred = _cov(y_pred, rowvar=False)
|
| 55 |
+
mu_y = torch.mean(y, dim=0)
|
| 56 |
+
sigma_y = _cov(y, rowvar=False)
|
| 57 |
+
|
| 58 |
+
return compute_frechet_distance(mu_y_pred, sigma_y_pred, mu_y, sigma_y)
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
def _cov(input_data: torch.Tensor, rowvar: bool = True) -> torch.Tensor:
|
| 62 |
+
"""
|
| 63 |
+
Estimate a covariance matrix of the variables.
|
| 64 |
+
|
| 65 |
+
Args:
|
| 66 |
+
input_data: A 1-D or 2-D array containing multiple variables and observations. Each row of `m` represents a variable,
|
| 67 |
+
and each column a single observation of all those variables.
|
| 68 |
+
rowvar: If rowvar is True (default), then each row represents a variable, with observations in the columns.
|
| 69 |
+
Otherwise, the relationship is transposed: each column represents a variable, while the rows contain
|
| 70 |
+
observations.
|
| 71 |
+
"""
|
| 72 |
+
if input_data.dim() < 2:
|
| 73 |
+
input_data = input_data.view(1, -1)
|
| 74 |
+
|
| 75 |
+
if not rowvar and input_data.size(0) != 1:
|
| 76 |
+
input_data = input_data.t()
|
| 77 |
+
|
| 78 |
+
factor = 1.0 / (input_data.size(1) - 1)
|
| 79 |
+
input_data = input_data - torch.mean(input_data, dim=1, keepdim=True)
|
| 80 |
+
return factor * input_data.matmul(input_data.t()).squeeze()
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
def _sqrtm(input_data: torch.Tensor) -> torch.Tensor:
|
| 84 |
+
"""Compute the square root of a matrix."""
|
| 85 |
+
scipy_res, _ = scipy.linalg.sqrtm(input_data.detach().cpu().numpy().astype(np.float_), disp=False)
|
| 86 |
+
return torch.from_numpy(scipy_res)
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
def compute_frechet_distance(
|
| 90 |
+
mu_x: torch.Tensor, sigma_x: torch.Tensor, mu_y: torch.Tensor, sigma_y: torch.Tensor, epsilon: float = 1e-6
|
| 91 |
+
) -> torch.Tensor:
|
| 92 |
+
"""The Frechet distance between multivariate normal distributions."""
|
| 93 |
+
diff = mu_x - mu_y
|
| 94 |
+
|
| 95 |
+
covmean = _sqrtm(sigma_x.mm(sigma_y))
|
| 96 |
+
|
| 97 |
+
# Product might be almost singular
|
| 98 |
+
if not torch.isfinite(covmean).all():
|
| 99 |
+
print(f"FID calculation produces singular product; adding {epsilon} to diagonal of covariance estimates")
|
| 100 |
+
offset = torch.eye(sigma_x.size(0), device=mu_x.device, dtype=mu_x.dtype) * epsilon
|
| 101 |
+
covmean = _sqrtm((sigma_x + offset).mm(sigma_y + offset))
|
| 102 |
+
|
| 103 |
+
# Numerical error might give slight imaginary component
|
| 104 |
+
if torch.is_complex(covmean):
|
| 105 |
+
if not torch.allclose(torch.diagonal(covmean).imag, torch.tensor(0, dtype=torch.double), atol=1e-3):
|
| 106 |
+
raise ValueError(f"Imaginary component {torch.max(torch.abs(covmean.imag))} too high.")
|
| 107 |
+
covmean = covmean.real
|
| 108 |
+
|
| 109 |
+
tr_covmean = torch.trace(covmean)
|
| 110 |
+
return diff.dot(diff) + torch.trace(sigma_x) + torch.trace(sigma_y) - 2 * tr_covmean
|
source_code/SegMamba/monai/metrics/froc.py
ADDED
|
@@ -0,0 +1,175 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) MONAI Consortium
|
| 2 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 3 |
+
# you may not use this file except in compliance with the License.
|
| 4 |
+
# You may obtain a copy of the License at
|
| 5 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 6 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 7 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 8 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 9 |
+
# See the License for the specific language governing permissions and
|
| 10 |
+
# limitations under the License.
|
| 11 |
+
|
| 12 |
+
from __future__ import annotations
|
| 13 |
+
|
| 14 |
+
from typing import Any, cast
|
| 15 |
+
|
| 16 |
+
import numpy as np
|
| 17 |
+
import torch
|
| 18 |
+
|
| 19 |
+
from monai.config import NdarrayOrTensor
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
def compute_fp_tp_probs_nd(
|
| 23 |
+
probs: NdarrayOrTensor,
|
| 24 |
+
coords: NdarrayOrTensor,
|
| 25 |
+
evaluation_mask: NdarrayOrTensor,
|
| 26 |
+
labels_to_exclude: list | None = None,
|
| 27 |
+
) -> tuple[NdarrayOrTensor, NdarrayOrTensor, int]:
|
| 28 |
+
"""
|
| 29 |
+
This function is modified from the official evaluation code of
|
| 30 |
+
`CAMELYON 16 Challenge <https://camelyon16.grand-challenge.org/>`_, and used to distinguish
|
| 31 |
+
true positive and false positive predictions. A true positive prediction is defined when
|
| 32 |
+
the detection point is within the annotated ground truth region.
|
| 33 |
+
|
| 34 |
+
Args:
|
| 35 |
+
probs: an array with shape (n,) that represents the probabilities of the detections.
|
| 36 |
+
Where, n is the number of predicted detections.
|
| 37 |
+
coords: an array with shape (n, n_dim) that represents the coordinates of the detections.
|
| 38 |
+
The dimensions must be in the same order as in `evaluation_mask`.
|
| 39 |
+
evaluation_mask: the ground truth mask for evaluation.
|
| 40 |
+
labels_to_exclude: labels in this list will not be counted for metric calculation.
|
| 41 |
+
|
| 42 |
+
Returns:
|
| 43 |
+
fp_probs: an array that contains the probabilities of the false positive detections.
|
| 44 |
+
tp_probs: an array that contains the probabilities of the True positive detections.
|
| 45 |
+
num_targets: the total number of targets (excluding `labels_to_exclude`) for all images under evaluation.
|
| 46 |
+
|
| 47 |
+
"""
|
| 48 |
+
if not (len(probs) == len(coords)):
|
| 49 |
+
raise ValueError(f"the length of probs {probs.shape}, should be the same as of coords {coords.shape}.")
|
| 50 |
+
if not (len(coords.shape) > 1 and coords.shape[1] == len(evaluation_mask.shape)):
|
| 51 |
+
raise ValueError(
|
| 52 |
+
f"coords {coords.shape} need to represent the same number of dimensions as mask {evaluation_mask.shape}."
|
| 53 |
+
)
|
| 54 |
+
|
| 55 |
+
if isinstance(probs, torch.Tensor):
|
| 56 |
+
probs = probs.detach().cpu().numpy()
|
| 57 |
+
if isinstance(coords, torch.Tensor):
|
| 58 |
+
coords = coords.detach().cpu().numpy()
|
| 59 |
+
if isinstance(evaluation_mask, torch.Tensor):
|
| 60 |
+
evaluation_mask = evaluation_mask.detach().cpu().numpy()
|
| 61 |
+
|
| 62 |
+
if labels_to_exclude is None:
|
| 63 |
+
labels_to_exclude = []
|
| 64 |
+
|
| 65 |
+
max_label = np.max(evaluation_mask)
|
| 66 |
+
tp_probs = np.zeros((max_label,), dtype=np.float32)
|
| 67 |
+
|
| 68 |
+
hittedlabel = evaluation_mask[tuple(coords.T)]
|
| 69 |
+
fp_probs = probs[np.where(hittedlabel == 0)]
|
| 70 |
+
for i in range(1, max_label + 1):
|
| 71 |
+
if i not in labels_to_exclude and i in hittedlabel:
|
| 72 |
+
tp_probs[i - 1] = probs[np.where(hittedlabel == i)].max()
|
| 73 |
+
|
| 74 |
+
num_targets = max_label - len(labels_to_exclude)
|
| 75 |
+
return fp_probs, tp_probs, cast(int, num_targets)
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
def compute_fp_tp_probs(
|
| 79 |
+
probs: NdarrayOrTensor,
|
| 80 |
+
y_coord: NdarrayOrTensor,
|
| 81 |
+
x_coord: NdarrayOrTensor,
|
| 82 |
+
evaluation_mask: NdarrayOrTensor,
|
| 83 |
+
labels_to_exclude: list | None = None,
|
| 84 |
+
resolution_level: int = 0,
|
| 85 |
+
) -> tuple[NdarrayOrTensor, NdarrayOrTensor, int]:
|
| 86 |
+
"""
|
| 87 |
+
This function is modified from the official evaluation code of
|
| 88 |
+
`CAMELYON 16 Challenge <https://camelyon16.grand-challenge.org/>`_, and used to distinguish
|
| 89 |
+
true positive and false positive predictions. A true positive prediction is defined when
|
| 90 |
+
the detection point is within the annotated ground truth region.
|
| 91 |
+
|
| 92 |
+
Args:
|
| 93 |
+
probs: an array with shape (n,) that represents the probabilities of the detections.
|
| 94 |
+
Where, n is the number of predicted detections.
|
| 95 |
+
y_coord: an array with shape (n,) that represents the Y-coordinates of the detections.
|
| 96 |
+
x_coord: an array with shape (n,) that represents the X-coordinates of the detections.
|
| 97 |
+
evaluation_mask: the ground truth mask for evaluation.
|
| 98 |
+
labels_to_exclude: labels in this list will not be counted for metric calculation.
|
| 99 |
+
resolution_level: the level at which the evaluation mask is made.
|
| 100 |
+
|
| 101 |
+
Returns:
|
| 102 |
+
fp_probs: an array that contains the probabilities of the false positive detections.
|
| 103 |
+
tp_probs: an array that contains the probabilities of the True positive detections.
|
| 104 |
+
num_targets: the total number of targets (excluding `labels_to_exclude`) for all images under evaluation.
|
| 105 |
+
|
| 106 |
+
"""
|
| 107 |
+
if isinstance(y_coord, torch.Tensor):
|
| 108 |
+
y_coord = y_coord.detach().cpu().numpy()
|
| 109 |
+
if isinstance(x_coord, torch.Tensor):
|
| 110 |
+
x_coord = x_coord.detach().cpu().numpy()
|
| 111 |
+
|
| 112 |
+
y_coord = (y_coord / pow(2, resolution_level)).astype(int)
|
| 113 |
+
x_coord = (x_coord / pow(2, resolution_level)).astype(int)
|
| 114 |
+
|
| 115 |
+
stacked = np.stack([y_coord, x_coord], axis=1)
|
| 116 |
+
|
| 117 |
+
return compute_fp_tp_probs_nd(
|
| 118 |
+
probs=probs, coords=stacked, evaluation_mask=evaluation_mask, labels_to_exclude=labels_to_exclude
|
| 119 |
+
)
|
| 120 |
+
|
| 121 |
+
|
| 122 |
+
def compute_froc_curve_data(
|
| 123 |
+
fp_probs: np.ndarray | torch.Tensor, tp_probs: np.ndarray | torch.Tensor, num_targets: int, num_images: int
|
| 124 |
+
) -> tuple[np.ndarray, np.ndarray]:
|
| 125 |
+
"""
|
| 126 |
+
This function is modified from the official evaluation code of
|
| 127 |
+
`CAMELYON 16 Challenge <https://camelyon16.grand-challenge.org/>`_, and used to compute
|
| 128 |
+
the required data for plotting the Free Response Operating Characteristic (FROC) curve.
|
| 129 |
+
|
| 130 |
+
Args:
|
| 131 |
+
fp_probs: an array that contains the probabilities of the false positive detections for all
|
| 132 |
+
images under evaluation.
|
| 133 |
+
tp_probs: an array that contains the probabilities of the True positive detections for all
|
| 134 |
+
images under evaluation.
|
| 135 |
+
num_targets: the total number of targets (excluding `labels_to_exclude`) for all images under evaluation.
|
| 136 |
+
num_images: the number of images under evaluation.
|
| 137 |
+
|
| 138 |
+
"""
|
| 139 |
+
if not isinstance(fp_probs, type(tp_probs)):
|
| 140 |
+
raise AssertionError("fp and tp probs should have same type.")
|
| 141 |
+
if isinstance(fp_probs, torch.Tensor):
|
| 142 |
+
fp_probs = fp_probs.detach().cpu().numpy()
|
| 143 |
+
if isinstance(tp_probs, torch.Tensor):
|
| 144 |
+
tp_probs = tp_probs.detach().cpu().numpy()
|
| 145 |
+
|
| 146 |
+
total_fps, total_tps = [], []
|
| 147 |
+
all_probs = sorted(set(list(fp_probs) + list(tp_probs)))
|
| 148 |
+
for thresh in all_probs[1:]:
|
| 149 |
+
total_fps.append((fp_probs >= thresh).sum())
|
| 150 |
+
total_tps.append((tp_probs >= thresh).sum())
|
| 151 |
+
total_fps.append(0)
|
| 152 |
+
total_tps.append(0)
|
| 153 |
+
fps_per_image = np.asarray(total_fps) / float(num_images)
|
| 154 |
+
total_sensitivity = np.asarray(total_tps) / float(num_targets)
|
| 155 |
+
return fps_per_image, total_sensitivity
|
| 156 |
+
|
| 157 |
+
|
| 158 |
+
def compute_froc_score(
|
| 159 |
+
fps_per_image: np.ndarray, total_sensitivity: np.ndarray, eval_thresholds: tuple = (0.25, 0.5, 1, 2, 4, 8)
|
| 160 |
+
) -> Any:
|
| 161 |
+
"""
|
| 162 |
+
This function is modified from the official evaluation code of
|
| 163 |
+
`CAMELYON 16 Challenge <https://camelyon16.grand-challenge.org/>`_, and used to compute
|
| 164 |
+
the challenge's second evaluation metric, which is defined as the average sensitivity at
|
| 165 |
+
the predefined false positive rates per whole slide image.
|
| 166 |
+
|
| 167 |
+
Args:
|
| 168 |
+
fps_per_image: the average number of false positives per image for different thresholds.
|
| 169 |
+
total_sensitivity: sensitivities (true positive rates) for different thresholds.
|
| 170 |
+
eval_thresholds: the false positive rates for calculating the average sensitivity. Defaults
|
| 171 |
+
to (0.25, 0.5, 1, 2, 4, 8) which is the same as the CAMELYON 16 Challenge.
|
| 172 |
+
|
| 173 |
+
"""
|
| 174 |
+
interp_sens = np.interp(eval_thresholds, fps_per_image[::-1], total_sensitivity[::-1])
|
| 175 |
+
return np.mean(interp_sens)
|
source_code/SegMamba/monai/metrics/hausdorff_distance.py
ADDED
|
@@ -0,0 +1,246 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) MONAI Consortium
|
| 2 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 3 |
+
# you may not use this file except in compliance with the License.
|
| 4 |
+
# You may obtain a copy of the License at
|
| 5 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 6 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 7 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 8 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 9 |
+
# See the License for the specific language governing permissions and
|
| 10 |
+
# limitations under the License.
|
| 11 |
+
|
| 12 |
+
from __future__ import annotations
|
| 13 |
+
|
| 14 |
+
from collections.abc import Sequence
|
| 15 |
+
from typing import Any
|
| 16 |
+
|
| 17 |
+
import numpy as np
|
| 18 |
+
import torch
|
| 19 |
+
|
| 20 |
+
from monai.metrics.utils import (
|
| 21 |
+
do_metric_reduction,
|
| 22 |
+
get_edge_surface_distance,
|
| 23 |
+
get_surface_distance,
|
| 24 |
+
ignore_background,
|
| 25 |
+
prepare_spacing,
|
| 26 |
+
)
|
| 27 |
+
from monai.utils import MetricReduction, convert_data_type, deprecated
|
| 28 |
+
|
| 29 |
+
from .metric import CumulativeIterationMetric
|
| 30 |
+
|
| 31 |
+
__all__ = ["HausdorffDistanceMetric", "compute_hausdorff_distance", "compute_percent_hausdorff_distance"]
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
class HausdorffDistanceMetric(CumulativeIterationMetric):
|
| 35 |
+
"""
|
| 36 |
+
Compute Hausdorff Distance between two tensors. It can support both multi-classes and multi-labels tasks.
|
| 37 |
+
It supports both directed and non-directed Hausdorff distance calculation. In addition, specify the `percentile`
|
| 38 |
+
parameter can get the percentile of the distance. Input `y_pred` is compared with ground truth `y`.
|
| 39 |
+
`y_preds` is expected to have binarized predictions and `y` should be in one-hot format.
|
| 40 |
+
You can use suitable transforms in ``monai.transforms.post`` first to achieve binarized values.
|
| 41 |
+
`y_preds` and `y` can be a list of channel-first Tensor (CHW[D]) or a batch-first Tensor (BCHW[D]).
|
| 42 |
+
The implementation refers to `DeepMind's implementation <https://github.com/deepmind/surface-distance>`_.
|
| 43 |
+
|
| 44 |
+
Example of the typical execution steps of this metric class follows :py:class:`monai.metrics.metric.Cumulative`.
|
| 45 |
+
|
| 46 |
+
Args:
|
| 47 |
+
include_background: whether to include distance computation on the first channel of
|
| 48 |
+
the predicted output. Defaults to ``False``.
|
| 49 |
+
distance_metric: : [``"euclidean"``, ``"chessboard"``, ``"taxicab"``]
|
| 50 |
+
the metric used to compute surface distance. Defaults to ``"euclidean"``.
|
| 51 |
+
percentile: an optional float number between 0 and 100. If specified, the corresponding
|
| 52 |
+
percentile of the Hausdorff Distance rather than the maximum result will be achieved.
|
| 53 |
+
Defaults to ``None``.
|
| 54 |
+
directed: whether to calculate directed Hausdorff distance. Defaults to ``False``.
|
| 55 |
+
reduction: define mode of reduction to the metrics, will only apply reduction on `not-nan` values,
|
| 56 |
+
available reduction modes: {``"none"``, ``"mean"``, ``"sum"``, ``"mean_batch"``, ``"sum_batch"``,
|
| 57 |
+
``"mean_channel"``, ``"sum_channel"``}, default to ``"mean"``. if "none", will not do reduction.
|
| 58 |
+
get_not_nans: whether to return the `not_nans` count, if True, aggregate() returns (metric, not_nans).
|
| 59 |
+
Here `not_nans` count the number of not nans for the metric, thus its shape equals to the shape of the metric.
|
| 60 |
+
|
| 61 |
+
"""
|
| 62 |
+
|
| 63 |
+
def __init__(
|
| 64 |
+
self,
|
| 65 |
+
include_background: bool = False,
|
| 66 |
+
distance_metric: str = "euclidean",
|
| 67 |
+
percentile: float | None = None,
|
| 68 |
+
directed: bool = False,
|
| 69 |
+
reduction: MetricReduction | str = MetricReduction.MEAN,
|
| 70 |
+
get_not_nans: bool = False,
|
| 71 |
+
) -> None:
|
| 72 |
+
super().__init__()
|
| 73 |
+
self.include_background = include_background
|
| 74 |
+
self.distance_metric = distance_metric
|
| 75 |
+
self.percentile = percentile
|
| 76 |
+
self.directed = directed
|
| 77 |
+
self.reduction = reduction
|
| 78 |
+
self.get_not_nans = get_not_nans
|
| 79 |
+
|
| 80 |
+
def _compute_tensor(self, y_pred: torch.Tensor, y: torch.Tensor, **kwargs: Any) -> torch.Tensor: # type: ignore[override]
|
| 81 |
+
"""
|
| 82 |
+
Args:
|
| 83 |
+
y_pred: input data to compute, typical segmentation model output.
|
| 84 |
+
It must be one-hot format and first dim is batch, example shape: [16, 3, 32, 32]. The values
|
| 85 |
+
should be binarized.
|
| 86 |
+
y: ground truth to compute the distance. It must be one-hot format and first dim is batch.
|
| 87 |
+
The values should be binarized.
|
| 88 |
+
kwargs: additional parameters, e.g. ``spacing`` should be passed to correctly compute the metric.
|
| 89 |
+
``spacing``: spacing of pixel (or voxel). This parameter is relevant only
|
| 90 |
+
if ``distance_metric`` is set to ``"euclidean"``.
|
| 91 |
+
If a single number, isotropic spacing with that value is used for all images in the batch. If a sequence of numbers,
|
| 92 |
+
the length of the sequence must be equal to the image dimensions.
|
| 93 |
+
This spacing will be used for all images in the batch.
|
| 94 |
+
If a sequence of sequences, the length of the outer sequence must be equal to the batch size.
|
| 95 |
+
If inner sequence has length 1, isotropic spacing with that value is used for all images in the batch,
|
| 96 |
+
else the inner sequence length must be equal to the image dimensions. If ``None``, spacing of unity is used
|
| 97 |
+
for all images in batch. Defaults to ``None``.
|
| 98 |
+
|
| 99 |
+
Raises:
|
| 100 |
+
ValueError: when `y_pred` has less than three dimensions.
|
| 101 |
+
"""
|
| 102 |
+
dims = y_pred.ndimension()
|
| 103 |
+
if dims < 3:
|
| 104 |
+
raise ValueError("y_pred should have at least three dimensions.")
|
| 105 |
+
|
| 106 |
+
# compute (BxC) for each channel for each batch
|
| 107 |
+
return compute_hausdorff_distance(
|
| 108 |
+
y_pred=y_pred,
|
| 109 |
+
y=y,
|
| 110 |
+
include_background=self.include_background,
|
| 111 |
+
distance_metric=self.distance_metric,
|
| 112 |
+
percentile=self.percentile,
|
| 113 |
+
directed=self.directed,
|
| 114 |
+
spacing=kwargs.get("spacing"),
|
| 115 |
+
)
|
| 116 |
+
|
| 117 |
+
def aggregate(
|
| 118 |
+
self, reduction: MetricReduction | str | None = None
|
| 119 |
+
) -> torch.Tensor | tuple[torch.Tensor, torch.Tensor]:
|
| 120 |
+
"""
|
| 121 |
+
Execute reduction logic for the output of `compute_hausdorff_distance`.
|
| 122 |
+
|
| 123 |
+
Args:
|
| 124 |
+
reduction: define mode of reduction to the metrics, will only apply reduction on `not-nan` values,
|
| 125 |
+
available reduction modes: {``"none"``, ``"mean"``, ``"sum"``, ``"mean_batch"``, ``"sum_batch"``,
|
| 126 |
+
``"mean_channel"``, ``"sum_channel"``}, default to `self.reduction`. if "none", will not do reduction.
|
| 127 |
+
|
| 128 |
+
"""
|
| 129 |
+
data = self.get_buffer()
|
| 130 |
+
if not isinstance(data, torch.Tensor):
|
| 131 |
+
raise ValueError("the data to aggregate must be PyTorch Tensor.")
|
| 132 |
+
|
| 133 |
+
# do metric reduction
|
| 134 |
+
f, not_nans = do_metric_reduction(data, reduction or self.reduction)
|
| 135 |
+
return (f, not_nans) if self.get_not_nans else f
|
| 136 |
+
|
| 137 |
+
|
| 138 |
+
def compute_hausdorff_distance(
|
| 139 |
+
y_pred: np.ndarray | torch.Tensor,
|
| 140 |
+
y: np.ndarray | torch.Tensor,
|
| 141 |
+
include_background: bool = False,
|
| 142 |
+
distance_metric: str = "euclidean",
|
| 143 |
+
percentile: float | None = None,
|
| 144 |
+
directed: bool = False,
|
| 145 |
+
spacing: int | float | np.ndarray | Sequence[int | float | np.ndarray | Sequence[int | float]] | None = None,
|
| 146 |
+
) -> torch.Tensor:
|
| 147 |
+
"""
|
| 148 |
+
Compute the Hausdorff distance.
|
| 149 |
+
|
| 150 |
+
Args:
|
| 151 |
+
y_pred: input data to compute, typical segmentation model output.
|
| 152 |
+
It must be one-hot format and first dim is batch, example shape: [16, 3, 32, 32]. The values
|
| 153 |
+
should be binarized.
|
| 154 |
+
y: ground truth to compute mean the distance. It must be one-hot format and first dim is batch.
|
| 155 |
+
The values should be binarized.
|
| 156 |
+
include_background: whether to include distance computation on the first channel of
|
| 157 |
+
the predicted output. Defaults to ``False``.
|
| 158 |
+
distance_metric: : [``"euclidean"``, ``"chessboard"``, ``"taxicab"``]
|
| 159 |
+
the metric used to compute surface distance. Defaults to ``"euclidean"``.
|
| 160 |
+
percentile: an optional float number between 0 and 100. If specified, the corresponding
|
| 161 |
+
percentile of the Hausdorff Distance rather than the maximum result will be achieved.
|
| 162 |
+
Defaults to ``None``.
|
| 163 |
+
directed: whether to calculate directed Hausdorff distance. Defaults to ``False``.
|
| 164 |
+
spacing: spacing of pixel (or voxel). This parameter is relevant only if ``distance_metric`` is set to ``"euclidean"``.
|
| 165 |
+
If a single number, isotropic spacing with that value is used for all images in the batch. If a sequence of numbers,
|
| 166 |
+
the length of the sequence must be equal to the image dimensions. This spacing will be used for all images in the batch.
|
| 167 |
+
If a sequence of sequences, the length of the outer sequence must be equal to the batch size.
|
| 168 |
+
If inner sequence has length 1, isotropic spacing with that value is used for all images in the batch,
|
| 169 |
+
else the inner sequence length must be equal to the image dimensions. If ``None``, spacing of unity is used
|
| 170 |
+
for all images in batch. Defaults to ``None``.
|
| 171 |
+
"""
|
| 172 |
+
|
| 173 |
+
if not include_background:
|
| 174 |
+
y_pred, y = ignore_background(y_pred=y_pred, y=y)
|
| 175 |
+
y_pred = convert_data_type(y_pred, output_type=torch.Tensor, dtype=torch.float)[0]
|
| 176 |
+
y = convert_data_type(y, output_type=torch.Tensor, dtype=torch.float)[0]
|
| 177 |
+
|
| 178 |
+
if y.shape != y_pred.shape:
|
| 179 |
+
raise ValueError(f"y_pred and y should have same shapes, got {y_pred.shape} and {y.shape}.")
|
| 180 |
+
|
| 181 |
+
batch_size, n_class = y_pred.shape[:2]
|
| 182 |
+
hd = torch.empty((batch_size, n_class), dtype=torch.float, device=y_pred.device)
|
| 183 |
+
|
| 184 |
+
img_dim = y_pred.ndim - 2
|
| 185 |
+
spacing_list = prepare_spacing(spacing=spacing, batch_size=batch_size, img_dim=img_dim)
|
| 186 |
+
|
| 187 |
+
for b, c in np.ndindex(batch_size, n_class):
|
| 188 |
+
_, distances, _ = get_edge_surface_distance(
|
| 189 |
+
y_pred[b, c],
|
| 190 |
+
y[b, c],
|
| 191 |
+
distance_metric=distance_metric,
|
| 192 |
+
spacing=spacing_list[b],
|
| 193 |
+
symmetric=not directed,
|
| 194 |
+
class_index=c,
|
| 195 |
+
)
|
| 196 |
+
percentile_distances = [_compute_percentile_hausdorff_distance(d, percentile) for d in distances]
|
| 197 |
+
max_distance = torch.max(torch.stack(percentile_distances))
|
| 198 |
+
hd[b, c] = max_distance
|
| 199 |
+
return hd
|
| 200 |
+
|
| 201 |
+
|
| 202 |
+
def _compute_percentile_hausdorff_distance(
|
| 203 |
+
surface_distance: torch.Tensor, percentile: float | None = None
|
| 204 |
+
) -> torch.Tensor:
|
| 205 |
+
"""
|
| 206 |
+
This function is used to compute the Hausdorff distance.
|
| 207 |
+
"""
|
| 208 |
+
|
| 209 |
+
# for both pred and gt do not have foreground
|
| 210 |
+
if surface_distance.shape == (0,):
|
| 211 |
+
return torch.tensor(np.nan, dtype=torch.float, device=surface_distance.device)
|
| 212 |
+
|
| 213 |
+
if not percentile:
|
| 214 |
+
return surface_distance.max()
|
| 215 |
+
|
| 216 |
+
if 0 <= percentile <= 100:
|
| 217 |
+
return torch.quantile(surface_distance, percentile / 100)
|
| 218 |
+
raise ValueError(f"percentile should be a value between 0 and 100, get {percentile}.")
|
| 219 |
+
|
| 220 |
+
|
| 221 |
+
@deprecated(since="1.3.0", removed="1.5.0")
|
| 222 |
+
def compute_percent_hausdorff_distance(
|
| 223 |
+
edges_pred: np.ndarray,
|
| 224 |
+
edges_gt: np.ndarray,
|
| 225 |
+
distance_metric: str = "euclidean",
|
| 226 |
+
percentile: float | None = None,
|
| 227 |
+
spacing: int | float | np.ndarray | Sequence[int | float] | None = None,
|
| 228 |
+
) -> float:
|
| 229 |
+
"""
|
| 230 |
+
This function is used to compute the directed Hausdorff distance.
|
| 231 |
+
"""
|
| 232 |
+
|
| 233 |
+
surface_distance: np.ndarray = get_surface_distance( # type: ignore
|
| 234 |
+
edges_pred, edges_gt, distance_metric=distance_metric, spacing=spacing
|
| 235 |
+
)
|
| 236 |
+
|
| 237 |
+
# for both pred and gt do not have foreground
|
| 238 |
+
if surface_distance.shape == (0,):
|
| 239 |
+
return np.nan
|
| 240 |
+
|
| 241 |
+
if not percentile:
|
| 242 |
+
return surface_distance.max() # type: ignore[no-any-return]
|
| 243 |
+
|
| 244 |
+
if 0 <= percentile <= 100:
|
| 245 |
+
return np.percentile(surface_distance, percentile) # type: ignore[no-any-return]
|
| 246 |
+
raise ValueError(f"percentile should be a value between 0 and 100, get {percentile}.")
|
source_code/SegMamba/monai/metrics/loss_metric.py
ADDED
|
@@ -0,0 +1,111 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) MONAI Consortium
|
| 2 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 3 |
+
# you may not use this file except in compliance with the License.
|
| 4 |
+
# You may obtain a copy of the License at
|
| 5 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 6 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 7 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 8 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 9 |
+
# See the License for the specific language governing permissions and
|
| 10 |
+
# limitations under the License.
|
| 11 |
+
|
| 12 |
+
from __future__ import annotations
|
| 13 |
+
|
| 14 |
+
from typing import Any
|
| 15 |
+
|
| 16 |
+
import torch
|
| 17 |
+
from torch.nn.modules.loss import _Loss
|
| 18 |
+
|
| 19 |
+
from monai.metrics.utils import do_metric_reduction
|
| 20 |
+
from monai.utils import MetricReduction
|
| 21 |
+
|
| 22 |
+
from ..config import TensorOrList
|
| 23 |
+
from .metric import CumulativeIterationMetric
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
class LossMetric(CumulativeIterationMetric):
|
| 27 |
+
"""
|
| 28 |
+
A wrapper to make ``loss_fn`` available as a cumulative metric. That is, the loss values computed from
|
| 29 |
+
mini-batches can be combined in the ``reduction`` mode across multiple iterations, as a quantitative measurement
|
| 30 |
+
of a model.
|
| 31 |
+
|
| 32 |
+
Example:
|
| 33 |
+
|
| 34 |
+
.. code-block:: python
|
| 35 |
+
|
| 36 |
+
import torch
|
| 37 |
+
from monai.losses import DiceLoss
|
| 38 |
+
from monai.metrics import LossMetric
|
| 39 |
+
|
| 40 |
+
dice_loss = DiceLoss(include_background=True)
|
| 41 |
+
loss_metric = LossMetric(loss_fn=dice_loss)
|
| 42 |
+
|
| 43 |
+
# first iteration
|
| 44 |
+
y_pred = torch.tensor([[[[1.0, 0.0], [0.0, 1.0]]]]) # shape [batch=1, channel=1, 2, 2]
|
| 45 |
+
y = torch.tensor([[[[1.0, 0.0], [1.0, 1.0]]]]) # shape [batch=1, channel=1, 2, 2]
|
| 46 |
+
loss_metric(y_pred, y)
|
| 47 |
+
|
| 48 |
+
# second iteration
|
| 49 |
+
y_pred = torch.tensor([[[[1.0, 0.0], [0.0, 0.0]]]]) # shape [batch=1, channel=1, 2, 2]
|
| 50 |
+
y = torch.tensor([[[[1.0, 0.0], [1.0, 1.0]]]]) # shape [batch=1, channel=1, 2, 2]
|
| 51 |
+
loss_metric(y_pred, y)
|
| 52 |
+
|
| 53 |
+
# aggregate
|
| 54 |
+
print(loss_metric.aggregate(reduction="none")) # tensor([[0.2000], [0.5000]]) (shape [batch=2, channel=1])
|
| 55 |
+
|
| 56 |
+
# reset
|
| 57 |
+
loss_metric.reset()
|
| 58 |
+
print(loss_metric.aggregate())
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
Args:
|
| 62 |
+
loss_fn: a callable function that takes ``y_pred`` and optionally ``y`` as input (in the "batch-first" format),
|
| 63 |
+
returns a "batch-first" tensor of loss values.
|
| 64 |
+
reduction: define mode of reduction to the metrics, will only apply reduction on `not-nan` values,
|
| 65 |
+
available reduction modes: {``"none"``, ``"mean"``, ``"sum"``, ``"mean_batch"``, ``"sum_batch"``,
|
| 66 |
+
``"mean_channel"``, ``"sum_channel"``}, default to ``"mean"``. if "none", will not do reduction.
|
| 67 |
+
get_not_nans: whether to return the `not_nans` count, if True, aggregate() returns (metric, not_nans).
|
| 68 |
+
Here `not_nans` count the number of not nans for the metric, thus its shape equals to the shape of the metric.
|
| 69 |
+
|
| 70 |
+
"""
|
| 71 |
+
|
| 72 |
+
def __init__(
|
| 73 |
+
self, loss_fn: _Loss, reduction: MetricReduction | str = MetricReduction.MEAN, get_not_nans: bool = False
|
| 74 |
+
) -> None:
|
| 75 |
+
super().__init__()
|
| 76 |
+
self.loss_fn = loss_fn
|
| 77 |
+
self.reduction = reduction
|
| 78 |
+
self.get_not_nans = get_not_nans
|
| 79 |
+
|
| 80 |
+
def aggregate(
|
| 81 |
+
self, reduction: MetricReduction | str | None = None
|
| 82 |
+
) -> torch.Tensor | tuple[torch.Tensor, torch.Tensor]:
|
| 83 |
+
"""
|
| 84 |
+
Returns the aggregated loss value across multiple iterations.
|
| 85 |
+
|
| 86 |
+
Args:
|
| 87 |
+
reduction: define mode of reduction to the metrics, will only apply reduction on `not-nan` values,
|
| 88 |
+
available reduction modes: {``"none"``, ``"mean"``, ``"sum"``, ``"mean_batch"``, ``"sum_batch"``,
|
| 89 |
+
``"mean_channel"``, ``"sum_channel"``}, default to `self.reduction`. if "none", will not do reduction.
|
| 90 |
+
"""
|
| 91 |
+
data = self.get_buffer()
|
| 92 |
+
if data is None:
|
| 93 |
+
return (torch.tensor(0.0), torch.tensor(0.0)) if self.get_not_nans else torch.tensor(0.0)
|
| 94 |
+
f, not_nans = do_metric_reduction(data, reduction or self.reduction)
|
| 95 |
+
return (f, not_nans) if self.get_not_nans else f
|
| 96 |
+
|
| 97 |
+
def _compute_tensor(self, y_pred: torch.Tensor, y: torch.Tensor | None = None, **kwargs: Any) -> TensorOrList:
|
| 98 |
+
"""
|
| 99 |
+
Input `y_pred` is compared with ground truth `y`.
|
| 100 |
+
Both `y_pred` and `y` are expected to be a batch-first Tensor (BC[HWD]).
|
| 101 |
+
|
| 102 |
+
Returns:
|
| 103 |
+
a tensor with shape (BC[HWD]), or a list of tensors, each tensor with shape (C[HWD]).
|
| 104 |
+
"""
|
| 105 |
+
iter_loss: TensorOrList = self.loss_fn(y_pred) if y is None else self.loss_fn(y_pred, y)
|
| 106 |
+
if isinstance(iter_loss, torch.Tensor):
|
| 107 |
+
while iter_loss.dim() < 2:
|
| 108 |
+
iter_loss = iter_loss[None]
|
| 109 |
+
# to be compatible with `Cumulative`, iter_loss should at least have a batch dim.
|
| 110 |
+
# to be compatible with `do_metric_reduction`, iter_loss should at least have a batch and a channel dim.
|
| 111 |
+
return iter_loss
|
source_code/SegMamba/monai/metrics/meandice.py
ADDED
|
@@ -0,0 +1,281 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) MONAI Consortium
|
| 2 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 3 |
+
# you may not use this file except in compliance with the License.
|
| 4 |
+
# You may obtain a copy of the License at
|
| 5 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 6 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 7 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 8 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 9 |
+
# See the License for the specific language governing permissions and
|
| 10 |
+
# limitations under the License.
|
| 11 |
+
|
| 12 |
+
from __future__ import annotations
|
| 13 |
+
|
| 14 |
+
import torch
|
| 15 |
+
|
| 16 |
+
from monai.metrics.utils import do_metric_reduction
|
| 17 |
+
from monai.utils import MetricReduction
|
| 18 |
+
|
| 19 |
+
from .metric import CumulativeIterationMetric
|
| 20 |
+
|
| 21 |
+
__all__ = ["DiceMetric", "compute_dice", "DiceHelper"]
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
class DiceMetric(CumulativeIterationMetric):
|
| 25 |
+
"""
|
| 26 |
+
Compute average Dice score for a set of pairs of prediction-groundtruth segmentations.
|
| 27 |
+
|
| 28 |
+
It supports both multi-classes and multi-labels tasks.
|
| 29 |
+
Input `y_pred` is compared with ground truth `y`.
|
| 30 |
+
`y_pred` is expected to have binarized predictions and `y` can be single-channel class indices or in the
|
| 31 |
+
one-hot format. The `include_background` parameter can be set to ``False`` to exclude
|
| 32 |
+
the first category (channel index 0) which is by convention assumed to be background. If the non-background
|
| 33 |
+
segmentations are small compared to the total image size they can get overwhelmed by the signal from the
|
| 34 |
+
background. `y_preds` and `y` can be a list of channel-first Tensor (CHW[D]) or a batch-first Tensor (BCHW[D]),
|
| 35 |
+
`y` can also be in the format of `B1HW[D]`.
|
| 36 |
+
|
| 37 |
+
Example of the typical execution steps of this metric class follows :py:class:`monai.metrics.metric.Cumulative`.
|
| 38 |
+
|
| 39 |
+
Args:
|
| 40 |
+
include_background: whether to include Dice computation on the first channel of
|
| 41 |
+
the predicted output. Defaults to ``True``.
|
| 42 |
+
reduction: define mode of reduction to the metrics, will only apply reduction on `not-nan` values,
|
| 43 |
+
available reduction modes: {``"none"``, ``"mean"``, ``"sum"``, ``"mean_batch"``, ``"sum_batch"``,
|
| 44 |
+
``"mean_channel"``, ``"sum_channel"``}, default to ``"mean"``. if "none", will not do reduction.
|
| 45 |
+
get_not_nans: whether to return the `not_nans` count, if True, aggregate() returns (metric, not_nans).
|
| 46 |
+
Here `not_nans` count the number of not nans for the metric, thus its shape equals to the shape of the metric.
|
| 47 |
+
ignore_empty: whether to ignore empty ground truth cases during calculation.
|
| 48 |
+
If `True`, NaN value will be set for empty ground truth cases.
|
| 49 |
+
If `False`, 1 will be set if the predictions of empty ground truth cases are also empty.
|
| 50 |
+
num_classes: number of input channels (always including the background). When this is None,
|
| 51 |
+
``y_pred.shape[1]`` will be used. This option is useful when both ``y_pred`` and ``y`` are
|
| 52 |
+
single-channel class indices and the number of classes is not automatically inferred from data.
|
| 53 |
+
return_with_label: whether to return the metrics with label, only works when reduction is "mean_batch".
|
| 54 |
+
If `True`, use "label_{index}" as the key corresponding to C channels; if 'include_background' is True,
|
| 55 |
+
the index begins at "0", otherwise at "1". It can also take a list of label names.
|
| 56 |
+
The outcome will then be returned as a dictionary.
|
| 57 |
+
|
| 58 |
+
"""
|
| 59 |
+
|
| 60 |
+
def __init__(
|
| 61 |
+
self,
|
| 62 |
+
include_background: bool = True,
|
| 63 |
+
reduction: MetricReduction | str = MetricReduction.MEAN,
|
| 64 |
+
get_not_nans: bool = False,
|
| 65 |
+
ignore_empty: bool = True,
|
| 66 |
+
num_classes: int | None = None,
|
| 67 |
+
return_with_label: bool | list[str] = False,
|
| 68 |
+
) -> None:
|
| 69 |
+
super().__init__()
|
| 70 |
+
self.include_background = include_background
|
| 71 |
+
self.reduction = reduction
|
| 72 |
+
self.get_not_nans = get_not_nans
|
| 73 |
+
self.ignore_empty = ignore_empty
|
| 74 |
+
self.num_classes = num_classes
|
| 75 |
+
self.return_with_label = return_with_label
|
| 76 |
+
self.dice_helper = DiceHelper(
|
| 77 |
+
include_background=self.include_background,
|
| 78 |
+
reduction=MetricReduction.NONE,
|
| 79 |
+
get_not_nans=False,
|
| 80 |
+
softmax=False,
|
| 81 |
+
ignore_empty=self.ignore_empty,
|
| 82 |
+
num_classes=self.num_classes,
|
| 83 |
+
)
|
| 84 |
+
|
| 85 |
+
def _compute_tensor(self, y_pred: torch.Tensor, y: torch.Tensor) -> torch.Tensor: # type: ignore[override]
|
| 86 |
+
"""
|
| 87 |
+
Args:
|
| 88 |
+
y_pred: input data to compute, typical segmentation model output.
|
| 89 |
+
It must be one-hot format and first dim is batch, example shape: [16, 3, 32, 32]. The values
|
| 90 |
+
should be binarized.
|
| 91 |
+
y: ground truth to compute mean Dice metric. `y` can be single-channel class indices or
|
| 92 |
+
in the one-hot format.
|
| 93 |
+
|
| 94 |
+
Raises:
|
| 95 |
+
ValueError: when `y_pred` has less than three dimensions.
|
| 96 |
+
"""
|
| 97 |
+
dims = y_pred.ndimension()
|
| 98 |
+
if dims < 3:
|
| 99 |
+
raise ValueError(f"y_pred should have at least 3 dimensions (batch, channel, spatial), got {dims}.")
|
| 100 |
+
# compute dice (BxC) for each channel for each batch
|
| 101 |
+
return self.dice_helper(y_pred=y_pred, y=y) # type: ignore
|
| 102 |
+
|
| 103 |
+
def aggregate(
|
| 104 |
+
self, reduction: MetricReduction | str | None = None
|
| 105 |
+
) -> torch.Tensor | tuple[torch.Tensor, torch.Tensor]:
|
| 106 |
+
"""
|
| 107 |
+
Execute reduction and aggregation logic for the output of `compute_dice`.
|
| 108 |
+
|
| 109 |
+
Args:
|
| 110 |
+
reduction: define mode of reduction to the metrics, will only apply reduction on `not-nan` values,
|
| 111 |
+
available reduction modes: {``"none"``, ``"mean"``, ``"sum"``, ``"mean_batch"``, ``"sum_batch"``,
|
| 112 |
+
``"mean_channel"``, ``"sum_channel"``}, default to `self.reduction`. if "none", will not do reduction.
|
| 113 |
+
|
| 114 |
+
"""
|
| 115 |
+
data = self.get_buffer()
|
| 116 |
+
if not isinstance(data, torch.Tensor):
|
| 117 |
+
raise ValueError(f"the data to aggregate must be PyTorch Tensor, got {type(data)}.")
|
| 118 |
+
|
| 119 |
+
# do metric reduction
|
| 120 |
+
f, not_nans = do_metric_reduction(data, reduction or self.reduction)
|
| 121 |
+
if self.reduction == MetricReduction.MEAN_BATCH and self.return_with_label:
|
| 122 |
+
_f = {}
|
| 123 |
+
if isinstance(self.return_with_label, bool):
|
| 124 |
+
for i, v in enumerate(f):
|
| 125 |
+
_label_key = f"label_{i+1}" if not self.include_background else f"label_{i}"
|
| 126 |
+
_f[_label_key] = round(v.item(), 4)
|
| 127 |
+
else:
|
| 128 |
+
for key, v in zip(self.return_with_label, f):
|
| 129 |
+
_f[key] = round(v.item(), 4)
|
| 130 |
+
f = _f
|
| 131 |
+
return (f, not_nans) if self.get_not_nans else f
|
| 132 |
+
|
| 133 |
+
|
| 134 |
+
def compute_dice(
|
| 135 |
+
y_pred: torch.Tensor,
|
| 136 |
+
y: torch.Tensor,
|
| 137 |
+
include_background: bool = True,
|
| 138 |
+
ignore_empty: bool = True,
|
| 139 |
+
num_classes: int | None = None,
|
| 140 |
+
) -> torch.Tensor:
|
| 141 |
+
"""Computes Dice score metric for a batch of predictions.
|
| 142 |
+
|
| 143 |
+
Args:
|
| 144 |
+
y_pred: input data to compute, typical segmentation model output.
|
| 145 |
+
`y_pred` can be single-channel class indices or in the one-hot format.
|
| 146 |
+
y: ground truth to compute mean dice metric. `y` can be single-channel class indices or in the one-hot format.
|
| 147 |
+
include_background: whether to include Dice computation on the first channel of
|
| 148 |
+
the predicted output. Defaults to True.
|
| 149 |
+
ignore_empty: whether to ignore empty ground truth cases during calculation.
|
| 150 |
+
If `True`, NaN value will be set for empty ground truth cases.
|
| 151 |
+
If `False`, 1 will be set if the predictions of empty ground truth cases are also empty.
|
| 152 |
+
num_classes: number of input channels (always including the background). When this is None,
|
| 153 |
+
``y_pred.shape[1]`` will be used. This option is useful when both ``y_pred`` and ``y`` are
|
| 154 |
+
single-channel class indices and the number of classes is not automatically inferred from data.
|
| 155 |
+
|
| 156 |
+
Returns:
|
| 157 |
+
Dice scores per batch and per class, (shape: [batch_size, num_classes]).
|
| 158 |
+
|
| 159 |
+
"""
|
| 160 |
+
return DiceHelper( # type: ignore
|
| 161 |
+
include_background=include_background,
|
| 162 |
+
reduction=MetricReduction.NONE,
|
| 163 |
+
get_not_nans=False,
|
| 164 |
+
softmax=False,
|
| 165 |
+
ignore_empty=ignore_empty,
|
| 166 |
+
num_classes=num_classes,
|
| 167 |
+
)(y_pred=y_pred, y=y)
|
| 168 |
+
|
| 169 |
+
|
| 170 |
+
class DiceHelper:
|
| 171 |
+
"""
|
| 172 |
+
Compute Dice score between two tensors `y_pred` and `y`.
|
| 173 |
+
`y_pred` and `y` can be single-channel class indices or in the one-hot format.
|
| 174 |
+
|
| 175 |
+
Example:
|
| 176 |
+
|
| 177 |
+
.. code-block:: python
|
| 178 |
+
|
| 179 |
+
import torch
|
| 180 |
+
from monai.metrics import DiceHelper
|
| 181 |
+
|
| 182 |
+
n_classes, batch_size = 5, 16
|
| 183 |
+
spatial_shape = (128, 128, 128)
|
| 184 |
+
|
| 185 |
+
y_pred = torch.rand(batch_size, n_classes, *spatial_shape).float() # predictions
|
| 186 |
+
y = torch.randint(0, n_classes, size=(batch_size, 1, *spatial_shape)).long() # ground truth
|
| 187 |
+
|
| 188 |
+
score, not_nans = DiceHelper(include_background=False, sigmoid=True, softmax=True)(y_pred, y)
|
| 189 |
+
print(score, not_nans)
|
| 190 |
+
|
| 191 |
+
"""
|
| 192 |
+
|
| 193 |
+
def __init__(
|
| 194 |
+
self,
|
| 195 |
+
include_background: bool | None = None,
|
| 196 |
+
sigmoid: bool = False,
|
| 197 |
+
softmax: bool | None = None,
|
| 198 |
+
activate: bool = False,
|
| 199 |
+
get_not_nans: bool = True,
|
| 200 |
+
reduction: MetricReduction | str = MetricReduction.MEAN_BATCH,
|
| 201 |
+
ignore_empty: bool = True,
|
| 202 |
+
num_classes: int | None = None,
|
| 203 |
+
) -> None:
|
| 204 |
+
"""
|
| 205 |
+
|
| 206 |
+
Args:
|
| 207 |
+
include_background: whether to include the score on the first channel
|
| 208 |
+
(default to the value of `sigmoid`, False).
|
| 209 |
+
sigmoid: whether ``y_pred`` are/will be sigmoid activated outputs. If True, thresholding at 0.5
|
| 210 |
+
will be performed to get the discrete prediction. Defaults to False.
|
| 211 |
+
softmax: whether ``y_pred`` are softmax activated outputs. If True, `argmax` will be performed to
|
| 212 |
+
get the discrete prediction. Defaults to the value of ``not sigmoid``.
|
| 213 |
+
activate: whether to apply sigmoid to ``y_pred`` if ``sigmoid`` is True. Defaults to False.
|
| 214 |
+
This option is only valid when ``sigmoid`` is True.
|
| 215 |
+
get_not_nans: whether to return the number of not-nan values.
|
| 216 |
+
reduction: define mode of reduction to the metrics
|
| 217 |
+
ignore_empty: if `True`, NaN value will be set for empty ground truth cases.
|
| 218 |
+
If `False`, 1 will be set if the Union of ``y_pred`` and ``y`` is empty.
|
| 219 |
+
num_classes: number of input channels (always including the background). When this is None,
|
| 220 |
+
``y_pred.shape[1]`` will be used. This option is useful when both ``y_pred`` and ``y`` are
|
| 221 |
+
single-channel class indices and the number of classes is not automatically inferred from data.
|
| 222 |
+
"""
|
| 223 |
+
self.sigmoid = sigmoid
|
| 224 |
+
self.reduction = reduction
|
| 225 |
+
self.get_not_nans = get_not_nans
|
| 226 |
+
self.include_background = sigmoid if include_background is None else include_background
|
| 227 |
+
self.softmax = not sigmoid if softmax is None else softmax
|
| 228 |
+
self.activate = activate
|
| 229 |
+
self.ignore_empty = ignore_empty
|
| 230 |
+
self.num_classes = num_classes
|
| 231 |
+
|
| 232 |
+
def compute_channel(self, y_pred: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
|
| 233 |
+
""""""
|
| 234 |
+
y_o = torch.sum(y)
|
| 235 |
+
if y_o > 0:
|
| 236 |
+
return (2.0 * torch.sum(torch.masked_select(y, y_pred))) / (y_o + torch.sum(y_pred))
|
| 237 |
+
if self.ignore_empty:
|
| 238 |
+
return torch.tensor(float("nan"), device=y_o.device)
|
| 239 |
+
denorm = y_o + torch.sum(y_pred)
|
| 240 |
+
if denorm <= 0:
|
| 241 |
+
return torch.tensor(1.0, device=y_o.device)
|
| 242 |
+
return torch.tensor(0.0, device=y_o.device)
|
| 243 |
+
|
| 244 |
+
def __call__(self, y_pred: torch.Tensor, y: torch.Tensor) -> torch.Tensor | tuple[torch.Tensor, torch.Tensor]:
|
| 245 |
+
"""
|
| 246 |
+
|
| 247 |
+
Args:
|
| 248 |
+
y_pred: input predictions with shape (batch_size, num_classes or 1, spatial_dims...).
|
| 249 |
+
the number of channels is inferred from ``y_pred.shape[1]`` when ``num_classes is None``.
|
| 250 |
+
y: ground truth with shape (batch_size, num_classes or 1, spatial_dims...).
|
| 251 |
+
"""
|
| 252 |
+
_softmax, _sigmoid = self.softmax, self.sigmoid
|
| 253 |
+
if self.num_classes is None:
|
| 254 |
+
n_pred_ch = y_pred.shape[1] # y_pred is in one-hot format or multi-channel scores
|
| 255 |
+
else:
|
| 256 |
+
n_pred_ch = self.num_classes
|
| 257 |
+
if y_pred.shape[1] == 1 and self.num_classes > 1: # y_pred is single-channel class indices
|
| 258 |
+
_softmax = _sigmoid = False
|
| 259 |
+
|
| 260 |
+
if _softmax:
|
| 261 |
+
if n_pred_ch > 1:
|
| 262 |
+
y_pred = torch.argmax(y_pred, dim=1, keepdim=True)
|
| 263 |
+
|
| 264 |
+
elif _sigmoid:
|
| 265 |
+
if self.activate:
|
| 266 |
+
y_pred = torch.sigmoid(y_pred)
|
| 267 |
+
y_pred = y_pred > 0.5
|
| 268 |
+
|
| 269 |
+
first_ch = 0 if self.include_background else 1
|
| 270 |
+
data = []
|
| 271 |
+
for b in range(y_pred.shape[0]):
|
| 272 |
+
c_list = []
|
| 273 |
+
for c in range(first_ch, n_pred_ch) if n_pred_ch > 1 else [1]:
|
| 274 |
+
x_pred = (y_pred[b, 0] == c) if (y_pred.shape[1] == 1) else y_pred[b, c].bool()
|
| 275 |
+
x = (y[b, 0] == c) if (y.shape[1] == 1) else y[b, c]
|
| 276 |
+
c_list.append(self.compute_channel(x_pred, x))
|
| 277 |
+
data.append(torch.stack(c_list))
|
| 278 |
+
data = torch.stack(data, dim=0).contiguous() # type: ignore
|
| 279 |
+
|
| 280 |
+
f, not_nans = do_metric_reduction(data, self.reduction) # type: ignore
|
| 281 |
+
return (f, not_nans) if self.get_not_nans else f
|
source_code/SegMamba/monai/metrics/mmd.py
ADDED
|
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) MONAI Consortium
|
| 2 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 3 |
+
# you may not use this file except in compliance with the License.
|
| 4 |
+
# You may obtain a copy of the License at
|
| 5 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 6 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 7 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 8 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 9 |
+
# See the License for the specific language governing permissions and
|
| 10 |
+
# limitations under the License.
|
| 11 |
+
|
| 12 |
+
from __future__ import annotations
|
| 13 |
+
|
| 14 |
+
from collections.abc import Callable
|
| 15 |
+
|
| 16 |
+
import torch
|
| 17 |
+
|
| 18 |
+
from monai.metrics.metric import Metric
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
class MMDMetric(Metric):
|
| 22 |
+
"""
|
| 23 |
+
Unbiased Maximum Mean Discrepancy (MMD) is a kernel-based method for measuring the similarity between two
|
| 24 |
+
distributions. It is a non-negative metric where a smaller value indicates a closer match between the two
|
| 25 |
+
distributions.
|
| 26 |
+
|
| 27 |
+
Gretton, A., et al,, 2012. A kernel two-sample test. The Journal of Machine Learning Research, 13(1), pp.723-773.
|
| 28 |
+
|
| 29 |
+
Args:
|
| 30 |
+
y_mapping: Callable to transform the y tensors before computing the metric. It is usually a Gaussian or Laplace
|
| 31 |
+
filter, but it can be any function that takes a tensor as input and returns a tensor as output such as a
|
| 32 |
+
feature extractor or an Identity function., e.g. `y_mapping = lambda x: x.square()`.
|
| 33 |
+
"""
|
| 34 |
+
|
| 35 |
+
def __init__(self, y_mapping: Callable | None = None) -> None:
|
| 36 |
+
super().__init__()
|
| 37 |
+
self.y_mapping = y_mapping
|
| 38 |
+
|
| 39 |
+
def __call__(self, y: torch.Tensor, y_pred: torch.Tensor) -> torch.Tensor:
|
| 40 |
+
return compute_mmd(y, y_pred, self.y_mapping)
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
def compute_mmd(y: torch.Tensor, y_pred: torch.Tensor, y_mapping: Callable | None) -> torch.Tensor:
|
| 44 |
+
"""
|
| 45 |
+
Args:
|
| 46 |
+
y: first sample (e.g., the reference image). Its shape is (B,C,W,H) for 2D data and (B,C,W,H,D) for 3D.
|
| 47 |
+
y_pred: second sample (e.g., the reconstructed image). It has similar shape as y.
|
| 48 |
+
y_mapping: Callable to transform the y tensors before computing the metric.
|
| 49 |
+
"""
|
| 50 |
+
if y_pred.shape[0] == 1 or y.shape[0] == 1:
|
| 51 |
+
raise ValueError("MMD metric requires at least two samples in y and y_pred.")
|
| 52 |
+
|
| 53 |
+
if y_mapping is not None:
|
| 54 |
+
y = y_mapping(y)
|
| 55 |
+
y_pred = y_mapping(y_pred)
|
| 56 |
+
|
| 57 |
+
if y_pred.shape != y.shape:
|
| 58 |
+
raise ValueError(
|
| 59 |
+
"y_pred and y shapes dont match after being processed "
|
| 60 |
+
f"by their transforms, received y_pred: {y_pred.shape} and y: {y.shape}"
|
| 61 |
+
)
|
| 62 |
+
|
| 63 |
+
for d in range(len(y.shape) - 1, 1, -1):
|
| 64 |
+
y = y.squeeze(dim=d)
|
| 65 |
+
y_pred = y_pred.squeeze(dim=d)
|
| 66 |
+
|
| 67 |
+
y = y.view(y.shape[0], -1)
|
| 68 |
+
y_pred = y_pred.view(y_pred.shape[0], -1)
|
| 69 |
+
|
| 70 |
+
y_y = torch.mm(y, y.t())
|
| 71 |
+
y_pred_y_pred = torch.mm(y_pred, y_pred.t())
|
| 72 |
+
y_pred_y = torch.mm(y_pred, y.t())
|
| 73 |
+
|
| 74 |
+
m = y.shape[0]
|
| 75 |
+
n = y_pred.shape[0]
|
| 76 |
+
|
| 77 |
+
# Ref. 1 Eq. 3 (found under Lemma 6)
|
| 78 |
+
# term 1
|
| 79 |
+
c1 = 1 / (m * (m - 1))
|
| 80 |
+
a = torch.sum(y_y - torch.diag(torch.diagonal(y_y)))
|
| 81 |
+
|
| 82 |
+
# term 2
|
| 83 |
+
c2 = 1 / (n * (n - 1))
|
| 84 |
+
b = torch.sum(y_pred_y_pred - torch.diag(torch.diagonal(y_pred_y_pred)))
|
| 85 |
+
|
| 86 |
+
# term 3
|
| 87 |
+
c3 = 2 / (m * n)
|
| 88 |
+
c = torch.sum(y_pred_y)
|
| 89 |
+
|
| 90 |
+
mmd = c1 * a + c2 * b - c3 * c
|
| 91 |
+
return mmd
|
source_code/SegMamba/monai/metrics/panoptic_quality.py
ADDED
|
@@ -0,0 +1,296 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) MONAI Consortium
|
| 2 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 3 |
+
# you may not use this file except in compliance with the License.
|
| 4 |
+
# You may obtain a copy of the License at
|
| 5 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 6 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 7 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 8 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 9 |
+
# See the License for the specific language governing permissions and
|
| 10 |
+
# limitations under the License.
|
| 11 |
+
|
| 12 |
+
from __future__ import annotations
|
| 13 |
+
|
| 14 |
+
from collections.abc import Sequence
|
| 15 |
+
|
| 16 |
+
import torch
|
| 17 |
+
|
| 18 |
+
from monai.metrics.metric import CumulativeIterationMetric
|
| 19 |
+
from monai.metrics.utils import do_metric_reduction, remap_instance_id
|
| 20 |
+
from monai.utils import MetricReduction, ensure_tuple, optional_import
|
| 21 |
+
|
| 22 |
+
linear_sum_assignment, _ = optional_import("scipy.optimize", name="linear_sum_assignment")
|
| 23 |
+
|
| 24 |
+
__all__ = ["PanopticQualityMetric", "compute_panoptic_quality"]
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
class PanopticQualityMetric(CumulativeIterationMetric):
|
| 28 |
+
"""
|
| 29 |
+
Compute Panoptic Quality between two instance segmentation masks. If specifying `metric_name` to "SQ" or "RQ",
|
| 30 |
+
Segmentation Quality (SQ) or Recognition Quality (RQ) will be returned instead.
|
| 31 |
+
|
| 32 |
+
Panoptic Quality is a metric used in panoptic segmentation tasks. This task unifies the typically distinct tasks
|
| 33 |
+
of semantic segmentation (assign a class label to each pixel) and
|
| 34 |
+
instance segmentation (detect and segment each object instance). Compared with semantic segmentation, panoptic
|
| 35 |
+
segmentation distinguish different instances that belong to same class.
|
| 36 |
+
Compared with instance segmentation, panoptic segmentation does not allow overlap and only one semantic label and
|
| 37 |
+
one instance id can be assigned to each pixel.
|
| 38 |
+
Please refer to the following paper for more details:
|
| 39 |
+
https://openaccess.thecvf.com/content_CVPR_2019/papers/Kirillov_Panoptic_Segmentation_CVPR_2019_paper.pdf
|
| 40 |
+
|
| 41 |
+
This class also refers to the following implementation:
|
| 42 |
+
https://github.com/TissueImageAnalytics/CoNIC
|
| 43 |
+
|
| 44 |
+
Args:
|
| 45 |
+
num_classes: number of classes. The number should not count the background.
|
| 46 |
+
metric_name: output metric. The value can be "pq", "sq" or "rq".
|
| 47 |
+
Except for input only one metric, multiple metrics are also supported via input a sequence of metric names
|
| 48 |
+
such as ("pq", "sq", "rq"). If input a sequence, a list of results with the same order
|
| 49 |
+
as the input names will be returned.
|
| 50 |
+
reduction: define mode of reduction to the metrics, will only apply reduction on `not-nan` values,
|
| 51 |
+
available reduction modes: {``"none"``, ``"mean"``, ``"sum"``, ``"mean_batch"``, ``"sum_batch"``,
|
| 52 |
+
``"mean_channel"``, ``"sum_channel"``}, default to `self.reduction`. if "none", will not do reduction.
|
| 53 |
+
match_iou_threshold: IOU threshold to determine the pairing between `y_pred` and `y`. Usually,
|
| 54 |
+
it should >= 0.5, the pairing between instances of `y_pred` and `y` are identical.
|
| 55 |
+
If set `match_iou_threshold` < 0.5, this function uses Munkres assignment to find the
|
| 56 |
+
maximal amount of unique pairing.
|
| 57 |
+
smooth_numerator: a small constant added to the numerator to avoid zero.
|
| 58 |
+
|
| 59 |
+
"""
|
| 60 |
+
|
| 61 |
+
def __init__(
|
| 62 |
+
self,
|
| 63 |
+
num_classes: int,
|
| 64 |
+
metric_name: Sequence[str] | str = "pq",
|
| 65 |
+
reduction: MetricReduction | str = MetricReduction.MEAN_BATCH,
|
| 66 |
+
match_iou_threshold: float = 0.5,
|
| 67 |
+
smooth_numerator: float = 1e-6,
|
| 68 |
+
) -> None:
|
| 69 |
+
super().__init__()
|
| 70 |
+
self.num_classes = num_classes
|
| 71 |
+
self.reduction = reduction
|
| 72 |
+
self.match_iou_threshold = match_iou_threshold
|
| 73 |
+
self.smooth_numerator = smooth_numerator
|
| 74 |
+
self.metric_name = ensure_tuple(metric_name)
|
| 75 |
+
|
| 76 |
+
def _compute_tensor(self, y_pred: torch.Tensor, y: torch.Tensor) -> torch.Tensor: # type: ignore[override]
|
| 77 |
+
"""
|
| 78 |
+
Args:
|
| 79 |
+
y_pred: Predictions. It must be in the form of B2HW and have integer type. The first channel and the
|
| 80 |
+
second channel represent the instance predictions and classification predictions respectively.
|
| 81 |
+
y: ground truth. It must have the same shape as `y_pred` and have integer type. The first channel and the
|
| 82 |
+
second channel represent the instance labels and classification labels respectively.
|
| 83 |
+
Values in the second channel of `y_pred` and `y` should be in the range of 0 to `self.num_classes`,
|
| 84 |
+
where 0 represents the background.
|
| 85 |
+
|
| 86 |
+
Raises:
|
| 87 |
+
ValueError: when `y_pred` and `y` have different shapes.
|
| 88 |
+
ValueError: when `y_pred` and `y` have != 2 channels.
|
| 89 |
+
ValueError: when `y_pred` and `y` have != 4 dimensions.
|
| 90 |
+
|
| 91 |
+
"""
|
| 92 |
+
if y_pred.shape != y.shape:
|
| 93 |
+
raise ValueError(f"y_pred and y should have same shapes, got {y_pred.shape} and {y.shape}.")
|
| 94 |
+
|
| 95 |
+
if y_pred.shape[1] != 2:
|
| 96 |
+
raise ValueError(
|
| 97 |
+
f"for panoptic quality calculation, only 2 channels input is supported, got {y_pred.shape[1]}."
|
| 98 |
+
)
|
| 99 |
+
|
| 100 |
+
dims = y_pred.ndimension()
|
| 101 |
+
if dims != 4:
|
| 102 |
+
raise ValueError(f"y_pred should have 4 dimensions (batch, 2, h, w), got {dims}.")
|
| 103 |
+
|
| 104 |
+
batch_size = y_pred.shape[0]
|
| 105 |
+
|
| 106 |
+
outputs = torch.zeros([batch_size, self.num_classes, 4], device=y_pred.device)
|
| 107 |
+
|
| 108 |
+
for b in range(batch_size):
|
| 109 |
+
true_instance, pred_instance = y[b, 0], y_pred[b, 0]
|
| 110 |
+
true_class, pred_class = y[b, 1], y_pred[b, 1]
|
| 111 |
+
for c in range(self.num_classes):
|
| 112 |
+
pred_instance_c = (pred_class == c + 1) * pred_instance
|
| 113 |
+
true_instance_c = (true_class == c + 1) * true_instance
|
| 114 |
+
|
| 115 |
+
outputs[b, c] = compute_panoptic_quality(
|
| 116 |
+
pred=pred_instance_c,
|
| 117 |
+
gt=true_instance_c,
|
| 118 |
+
remap=True,
|
| 119 |
+
match_iou_threshold=self.match_iou_threshold,
|
| 120 |
+
output_confusion_matrix=True,
|
| 121 |
+
)
|
| 122 |
+
|
| 123 |
+
return outputs
|
| 124 |
+
|
| 125 |
+
def aggregate(self, reduction: MetricReduction | str | None = None) -> torch.Tensor | list[torch.Tensor]:
|
| 126 |
+
"""
|
| 127 |
+
Execute reduction logic for the output of `compute_panoptic_quality`.
|
| 128 |
+
|
| 129 |
+
Args:
|
| 130 |
+
reduction: define mode of reduction to the metrics, will only apply reduction on `not-nan` values,
|
| 131 |
+
available reduction modes: {``"none"``, ``"mean"``, ``"sum"``, ``"mean_batch"``, ``"sum_batch"``,
|
| 132 |
+
``"mean_channel"``, ``"sum_channel"``}, default to `self.reduction`. if "none", will not do reduction.
|
| 133 |
+
|
| 134 |
+
"""
|
| 135 |
+
data = self.get_buffer()
|
| 136 |
+
if not isinstance(data, torch.Tensor):
|
| 137 |
+
raise ValueError("the data to aggregate must be PyTorch Tensor.")
|
| 138 |
+
|
| 139 |
+
# do metric reduction
|
| 140 |
+
f, _ = do_metric_reduction(data, reduction or self.reduction)
|
| 141 |
+
tp, fp, fn, iou_sum = f[..., 0], f[..., 1], f[..., 2], f[..., 3]
|
| 142 |
+
results = []
|
| 143 |
+
for metric_name in self.metric_name:
|
| 144 |
+
metric_name = _check_panoptic_metric_name(metric_name)
|
| 145 |
+
if metric_name == "rq":
|
| 146 |
+
results.append(tp / (tp + 0.5 * fp + 0.5 * fn + self.smooth_numerator))
|
| 147 |
+
elif metric_name == "sq":
|
| 148 |
+
results.append(iou_sum / (tp + self.smooth_numerator))
|
| 149 |
+
else:
|
| 150 |
+
results.append(iou_sum / (tp + 0.5 * fp + 0.5 * fn + self.smooth_numerator))
|
| 151 |
+
|
| 152 |
+
return results[0] if len(results) == 1 else results
|
| 153 |
+
|
| 154 |
+
|
| 155 |
+
def compute_panoptic_quality(
|
| 156 |
+
pred: torch.Tensor,
|
| 157 |
+
gt: torch.Tensor,
|
| 158 |
+
metric_name: str = "pq",
|
| 159 |
+
remap: bool = True,
|
| 160 |
+
match_iou_threshold: float = 0.5,
|
| 161 |
+
smooth_numerator: float = 1e-6,
|
| 162 |
+
output_confusion_matrix: bool = False,
|
| 163 |
+
) -> torch.Tensor:
|
| 164 |
+
"""Computes Panoptic Quality (PQ). If specifying `metric_name` to "SQ" or "RQ",
|
| 165 |
+
Segmentation Quality (SQ) or Recognition Quality (RQ) will be returned instead.
|
| 166 |
+
|
| 167 |
+
In addition, if `output_confusion_matrix` is True, the function will return a tensor with shape 4, which
|
| 168 |
+
represents the true positive, false positive, false negative and the sum of iou. These four values are used to
|
| 169 |
+
calculate PQ, and returning them directly enables further calculation over all images.
|
| 170 |
+
|
| 171 |
+
Args:
|
| 172 |
+
pred: input data to compute, it must be in the form of HW and have integer type.
|
| 173 |
+
gt: ground truth. It must have the same shape as `pred` and have integer type.
|
| 174 |
+
metric_name: output metric. The value can be "pq", "sq" or "rq".
|
| 175 |
+
remap: whether to remap `pred` and `gt` to ensure contiguous ordering of instance id.
|
| 176 |
+
match_iou_threshold: IOU threshold to determine the pairing between `pred` and `gt`. Usually,
|
| 177 |
+
it should >= 0.5, the pairing between instances of `pred` and `gt` are identical.
|
| 178 |
+
If set `match_iou_threshold` < 0.5, this function uses Munkres assignment to find the
|
| 179 |
+
maximal amount of unique pairing.
|
| 180 |
+
smooth_numerator: a small constant added to the numerator to avoid zero.
|
| 181 |
+
|
| 182 |
+
Raises:
|
| 183 |
+
ValueError: when `pred` and `gt` have different shapes.
|
| 184 |
+
ValueError: when `match_iou_threshold` <= 0.0 or > 1.0.
|
| 185 |
+
|
| 186 |
+
"""
|
| 187 |
+
|
| 188 |
+
if gt.shape != pred.shape:
|
| 189 |
+
raise ValueError(f"pred and gt should have same shapes, got {pred.shape} and {gt.shape}.")
|
| 190 |
+
if match_iou_threshold <= 0.0 or match_iou_threshold > 1.0:
|
| 191 |
+
raise ValueError(f"'match_iou_threshold' should be within (0, 1], got: {match_iou_threshold}.")
|
| 192 |
+
|
| 193 |
+
gt = gt.int()
|
| 194 |
+
pred = pred.int()
|
| 195 |
+
|
| 196 |
+
if remap is True:
|
| 197 |
+
gt = remap_instance_id(gt)
|
| 198 |
+
pred = remap_instance_id(pred)
|
| 199 |
+
|
| 200 |
+
pairwise_iou, true_id_list, pred_id_list = _get_pairwise_iou(pred, gt, device=pred.device)
|
| 201 |
+
paired_iou, paired_true, paired_pred = _get_paired_iou(
|
| 202 |
+
pairwise_iou, match_iou_threshold, device=pairwise_iou.device
|
| 203 |
+
)
|
| 204 |
+
|
| 205 |
+
unpaired_true = [idx for idx in true_id_list[1:] if idx not in paired_true]
|
| 206 |
+
unpaired_pred = [idx for idx in pred_id_list[1:] if idx not in paired_pred]
|
| 207 |
+
|
| 208 |
+
tp, fp, fn = len(paired_true), len(unpaired_pred), len(unpaired_true)
|
| 209 |
+
iou_sum = paired_iou.sum()
|
| 210 |
+
|
| 211 |
+
if output_confusion_matrix:
|
| 212 |
+
return torch.as_tensor([tp, fp, fn, iou_sum], device=pred.device)
|
| 213 |
+
|
| 214 |
+
metric_name = _check_panoptic_metric_name(metric_name)
|
| 215 |
+
if metric_name == "rq":
|
| 216 |
+
return torch.as_tensor(tp / (tp + 0.5 * fp + 0.5 * fn + smooth_numerator), device=pred.device)
|
| 217 |
+
if metric_name == "sq":
|
| 218 |
+
return torch.as_tensor(iou_sum / (tp + smooth_numerator), device=pred.device)
|
| 219 |
+
return torch.as_tensor(iou_sum / (tp + 0.5 * fp + 0.5 * fn + smooth_numerator), device=pred.device)
|
| 220 |
+
|
| 221 |
+
|
| 222 |
+
def _get_id_list(gt: torch.Tensor) -> list[torch.Tensor]:
|
| 223 |
+
id_list = list(gt.unique())
|
| 224 |
+
# ensure id 0 is included
|
| 225 |
+
if 0 not in id_list:
|
| 226 |
+
id_list.insert(0, torch.tensor(0).int())
|
| 227 |
+
|
| 228 |
+
return id_list
|
| 229 |
+
|
| 230 |
+
|
| 231 |
+
def _get_pairwise_iou(
|
| 232 |
+
pred: torch.Tensor, gt: torch.Tensor, device: str | torch.device = "cpu"
|
| 233 |
+
) -> tuple[torch.Tensor, list[torch.Tensor], list[torch.Tensor]]:
|
| 234 |
+
pred_id_list = _get_id_list(pred)
|
| 235 |
+
true_id_list = _get_id_list(gt)
|
| 236 |
+
|
| 237 |
+
pairwise_iou = torch.zeros([len(true_id_list) - 1, len(pred_id_list) - 1], dtype=torch.float, device=device)
|
| 238 |
+
true_masks: list[torch.Tensor] = []
|
| 239 |
+
pred_masks: list[torch.Tensor] = []
|
| 240 |
+
|
| 241 |
+
for t in true_id_list[1:]:
|
| 242 |
+
t_mask = torch.as_tensor(gt == t, device=device).int()
|
| 243 |
+
true_masks.append(t_mask)
|
| 244 |
+
|
| 245 |
+
for p in pred_id_list[1:]:
|
| 246 |
+
p_mask = torch.as_tensor(pred == p, device=device).int()
|
| 247 |
+
pred_masks.append(p_mask)
|
| 248 |
+
|
| 249 |
+
for true_id in range(1, len(true_id_list)):
|
| 250 |
+
t_mask = true_masks[true_id - 1]
|
| 251 |
+
pred_true_overlap = pred[t_mask > 0]
|
| 252 |
+
pred_true_overlap_id = list(pred_true_overlap.unique())
|
| 253 |
+
for pred_id in pred_true_overlap_id:
|
| 254 |
+
if pred_id == 0:
|
| 255 |
+
continue
|
| 256 |
+
p_mask = pred_masks[pred_id - 1]
|
| 257 |
+
total = (t_mask + p_mask).sum()
|
| 258 |
+
inter = (t_mask * p_mask).sum()
|
| 259 |
+
iou = inter / (total - inter)
|
| 260 |
+
pairwise_iou[true_id - 1, pred_id - 1] = iou
|
| 261 |
+
|
| 262 |
+
return pairwise_iou, true_id_list, pred_id_list
|
| 263 |
+
|
| 264 |
+
|
| 265 |
+
def _get_paired_iou(
|
| 266 |
+
pairwise_iou: torch.Tensor, match_iou_threshold: float = 0.5, device: str | torch.device = "cpu"
|
| 267 |
+
) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
|
| 268 |
+
if match_iou_threshold >= 0.5:
|
| 269 |
+
pairwise_iou[pairwise_iou <= match_iou_threshold] = 0.0
|
| 270 |
+
paired_true, paired_pred = torch.nonzero(pairwise_iou)[:, 0], torch.nonzero(pairwise_iou)[:, 1]
|
| 271 |
+
paired_iou = pairwise_iou[paired_true, paired_pred]
|
| 272 |
+
paired_true += 1
|
| 273 |
+
paired_pred += 1
|
| 274 |
+
|
| 275 |
+
return paired_iou, paired_true, paired_pred
|
| 276 |
+
|
| 277 |
+
pairwise_iou = pairwise_iou.cpu().numpy()
|
| 278 |
+
paired_true, paired_pred = linear_sum_assignment(-pairwise_iou)
|
| 279 |
+
paired_iou = pairwise_iou[paired_true, paired_pred]
|
| 280 |
+
paired_true = torch.as_tensor(list(paired_true[paired_iou > match_iou_threshold] + 1), device=device)
|
| 281 |
+
paired_pred = torch.as_tensor(list(paired_pred[paired_iou > match_iou_threshold] + 1), device=device)
|
| 282 |
+
paired_iou = paired_iou[paired_iou > match_iou_threshold]
|
| 283 |
+
|
| 284 |
+
return paired_iou, paired_true, paired_pred
|
| 285 |
+
|
| 286 |
+
|
| 287 |
+
def _check_panoptic_metric_name(metric_name: str) -> str:
|
| 288 |
+
metric_name = metric_name.replace(" ", "_")
|
| 289 |
+
metric_name = metric_name.lower()
|
| 290 |
+
if metric_name in ["panoptic_quality", "pq"]:
|
| 291 |
+
return "pq"
|
| 292 |
+
if metric_name in ["segmentation_quality", "sq"]:
|
| 293 |
+
return "sq"
|
| 294 |
+
if metric_name in ["recognition_quality", "rq"]:
|
| 295 |
+
return "rq"
|
| 296 |
+
raise ValueError(f"metric name: {metric_name} is wrong, please use 'pq', 'sq' or 'rq'.")
|
source_code/SegMamba/monai/metrics/regression.py
ADDED
|
@@ -0,0 +1,596 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) MONAI Consortium
|
| 2 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 3 |
+
# you may not use this file except in compliance with the License.
|
| 4 |
+
# You may obtain a copy of the License at
|
| 5 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 6 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 7 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 8 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 9 |
+
# See the License for the specific language governing permissions and
|
| 10 |
+
# limitations under the License.
|
| 11 |
+
|
| 12 |
+
from __future__ import annotations
|
| 13 |
+
|
| 14 |
+
import math
|
| 15 |
+
from abc import abstractmethod
|
| 16 |
+
from collections.abc import Callable, Sequence
|
| 17 |
+
from functools import partial
|
| 18 |
+
from typing import Any
|
| 19 |
+
|
| 20 |
+
import torch
|
| 21 |
+
import torch.nn.functional as F
|
| 22 |
+
|
| 23 |
+
from monai.metrics.utils import do_metric_reduction
|
| 24 |
+
from monai.utils import MetricReduction, StrEnum, convert_data_type, ensure_tuple_rep
|
| 25 |
+
from monai.utils.type_conversion import convert_to_dst_type
|
| 26 |
+
|
| 27 |
+
from .metric import CumulativeIterationMetric
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
class RegressionMetric(CumulativeIterationMetric):
|
| 31 |
+
"""
|
| 32 |
+
Base class for regression metrics.
|
| 33 |
+
Input `y_pred` is compared with ground truth `y`.
|
| 34 |
+
Both `y_pred` and `y` are expected to be real-valued, where `y_pred` is output from a regression model.
|
| 35 |
+
`y_preds` and `y` can be a list of channel-first Tensor (CHW[D]) or a batch-first Tensor (BCHW[D]).
|
| 36 |
+
|
| 37 |
+
Example of the typical execution steps of this metric class follows :py:class:`monai.metrics.metric.Cumulative`.
|
| 38 |
+
|
| 39 |
+
Args:
|
| 40 |
+
reduction: define mode of reduction to the metrics, will only apply reduction on `not-nan` values,
|
| 41 |
+
available reduction modes: {``"none"``, ``"mean"``, ``"sum"``, ``"mean_batch"``, ``"sum_batch"``,
|
| 42 |
+
``"mean_channel"``, ``"sum_channel"``}, default to ``"mean"``. if "none", will not do reduction.
|
| 43 |
+
get_not_nans: whether to return the `not_nans` count, if True, aggregate() returns (metric, not_nans).
|
| 44 |
+
Here `not_nans` count the number of not nans for the metric, thus its shape equals to the shape of the metric.
|
| 45 |
+
|
| 46 |
+
"""
|
| 47 |
+
|
| 48 |
+
def __init__(self, reduction: MetricReduction | str = MetricReduction.MEAN, get_not_nans: bool = False) -> None:
|
| 49 |
+
super().__init__()
|
| 50 |
+
self.reduction = reduction
|
| 51 |
+
self.get_not_nans = get_not_nans
|
| 52 |
+
|
| 53 |
+
def aggregate(
|
| 54 |
+
self, reduction: MetricReduction | str | None = None
|
| 55 |
+
) -> torch.Tensor | tuple[torch.Tensor, torch.Tensor]:
|
| 56 |
+
"""
|
| 57 |
+
Args:
|
| 58 |
+
reduction: define mode of reduction to the metrics, will only apply reduction on `not-nan` values,
|
| 59 |
+
available reduction modes: {``"none"``, ``"mean"``, ``"sum"``, ``"mean_batch"``, ``"sum_batch"``,
|
| 60 |
+
``"mean_channel"``, ``"sum_channel"``}, default to `self.reduction`. if "none", will not do reduction.
|
| 61 |
+
"""
|
| 62 |
+
data = self.get_buffer()
|
| 63 |
+
if not isinstance(data, torch.Tensor):
|
| 64 |
+
raise ValueError("the data to aggregate must be PyTorch Tensor.")
|
| 65 |
+
|
| 66 |
+
f, not_nans = do_metric_reduction(data, reduction or self.reduction)
|
| 67 |
+
return (f, not_nans) if self.get_not_nans else f
|
| 68 |
+
|
| 69 |
+
def _check_shape(self, y_pred: torch.Tensor, y: torch.Tensor) -> None:
|
| 70 |
+
if y_pred.shape != y.shape:
|
| 71 |
+
raise ValueError(f"y_pred and y shapes dont match, received y_pred: [{y_pred.shape}] and y: [{y.shape}]")
|
| 72 |
+
|
| 73 |
+
# also check if there is atleast one non-batch dimension i.e. num_dims >= 2
|
| 74 |
+
if len(y_pred.shape) < 2:
|
| 75 |
+
raise ValueError("either channel or spatial dimensions required, found only batch dimension")
|
| 76 |
+
|
| 77 |
+
@abstractmethod
|
| 78 |
+
def _compute_metric(self, y_pred: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
|
| 79 |
+
raise NotImplementedError(f"Subclass {self.__class__.__name__} must implement this method.")
|
| 80 |
+
|
| 81 |
+
def _compute_tensor(self, y_pred: torch.Tensor, y: torch.Tensor) -> torch.Tensor: # type: ignore[override]
|
| 82 |
+
if not isinstance(y_pred, torch.Tensor) or not isinstance(y, torch.Tensor):
|
| 83 |
+
raise ValueError("y_pred and y must be PyTorch Tensor.")
|
| 84 |
+
self._check_shape(y_pred, y)
|
| 85 |
+
return self._compute_metric(y_pred, y)
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
class MSEMetric(RegressionMetric):
|
| 89 |
+
r"""Compute Mean Squared Error between two tensors using function:
|
| 90 |
+
|
| 91 |
+
.. math::
|
| 92 |
+
\operatorname {MSE}\left(Y, \hat{Y}\right) =\frac {1}{n}\sum _{i=1}^{n}\left(y_i-\hat{y_i} \right)^{2}.
|
| 93 |
+
|
| 94 |
+
More info: https://en.wikipedia.org/wiki/Mean_squared_error
|
| 95 |
+
|
| 96 |
+
Input `y_pred` is compared with ground truth `y`.
|
| 97 |
+
Both `y_pred` and `y` are expected to be real-valued, where `y_pred` is output from a regression model.
|
| 98 |
+
|
| 99 |
+
Example of the typical execution steps of this metric class follows :py:class:`monai.metrics.metric.Cumulative`.
|
| 100 |
+
|
| 101 |
+
Args:
|
| 102 |
+
reduction: define the mode to reduce metrics, will only execute reduction on `not-nan` values,
|
| 103 |
+
available reduction modes: {``"none"``, ``"mean"``, ``"sum"``, ``"mean_batch"``, ``"sum_batch"``,
|
| 104 |
+
``"mean_channel"``, ``"sum_channel"``}, default to ``"mean"``. if "none", will not do reduction.
|
| 105 |
+
get_not_nans: whether to return the `not_nans` count, if True, aggregate() returns (metric, not_nans).
|
| 106 |
+
|
| 107 |
+
"""
|
| 108 |
+
|
| 109 |
+
def __init__(self, reduction: MetricReduction | str = MetricReduction.MEAN, get_not_nans: bool = False) -> None:
|
| 110 |
+
super().__init__(reduction=reduction, get_not_nans=get_not_nans)
|
| 111 |
+
self.sq_func = partial(torch.pow, exponent=2.0)
|
| 112 |
+
|
| 113 |
+
def _compute_metric(self, y_pred: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
|
| 114 |
+
return compute_mean_error_metrics(y_pred, y, func=self.sq_func)
|
| 115 |
+
|
| 116 |
+
|
| 117 |
+
class MAEMetric(RegressionMetric):
|
| 118 |
+
r"""Compute Mean Absolute Error between two tensors using function:
|
| 119 |
+
|
| 120 |
+
.. math::
|
| 121 |
+
\operatorname {MAE}\left(Y, \hat{Y}\right) =\frac {1}{n}\sum _{i=1}^{n}\left|y_i-\hat{y_i}\right|.
|
| 122 |
+
|
| 123 |
+
More info: https://en.wikipedia.org/wiki/Mean_absolute_error
|
| 124 |
+
|
| 125 |
+
Input `y_pred` is compared with ground truth `y`.
|
| 126 |
+
Both `y_pred` and `y` are expected to be real-valued, where `y_pred` is output from a regression model.
|
| 127 |
+
|
| 128 |
+
Example of the typical execution steps of this metric class follows :py:class:`monai.metrics.metric.Cumulative`.
|
| 129 |
+
|
| 130 |
+
Args:
|
| 131 |
+
reduction: define the mode to reduce metrics, will only execute reduction on `not-nan` values,
|
| 132 |
+
available reduction modes: {``"none"``, ``"mean"``, ``"sum"``, ``"mean_batch"``, ``"sum_batch"``,
|
| 133 |
+
``"mean_channel"``, ``"sum_channel"``}, default to ``"mean"``. if "none", will not do reduction.
|
| 134 |
+
get_not_nans: whether to return the `not_nans` count, if True, aggregate() returns (metric, not_nans).
|
| 135 |
+
|
| 136 |
+
"""
|
| 137 |
+
|
| 138 |
+
def __init__(self, reduction: MetricReduction | str = MetricReduction.MEAN, get_not_nans: bool = False) -> None:
|
| 139 |
+
super().__init__(reduction=reduction, get_not_nans=get_not_nans)
|
| 140 |
+
self.abs_func = torch.abs
|
| 141 |
+
|
| 142 |
+
def _compute_metric(self, y_pred: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
|
| 143 |
+
return compute_mean_error_metrics(y_pred, y, func=self.abs_func)
|
| 144 |
+
|
| 145 |
+
|
| 146 |
+
class RMSEMetric(RegressionMetric):
|
| 147 |
+
r"""Compute Root Mean Squared Error between two tensors using function:
|
| 148 |
+
|
| 149 |
+
.. math::
|
| 150 |
+
\operatorname {RMSE}\left(Y, \hat{Y}\right) ={ \sqrt{ \frac {1}{n}\sum _{i=1}^{n}\left(y_i-\hat{y_i}\right)^2 } } \
|
| 151 |
+
= \sqrt {\operatorname{MSE}\left(Y, \hat{Y}\right)}.
|
| 152 |
+
|
| 153 |
+
More info: https://en.wikipedia.org/wiki/Root-mean-square_deviation
|
| 154 |
+
|
| 155 |
+
Input `y_pred` is compared with ground truth `y`.
|
| 156 |
+
Both `y_pred` and `y` are expected to be real-valued, where `y_pred` is output from a regression model.
|
| 157 |
+
|
| 158 |
+
Example of the typical execution steps of this metric class follows :py:class:`monai.metrics.metric.Cumulative`.
|
| 159 |
+
|
| 160 |
+
Args:
|
| 161 |
+
reduction: define the mode to reduce metrics, will only execute reduction on `not-nan` values,
|
| 162 |
+
available reduction modes: {``"none"``, ``"mean"``, ``"sum"``, ``"mean_batch"``, ``"sum_batch"``,
|
| 163 |
+
``"mean_channel"``, ``"sum_channel"``}, default to ``"mean"``. if "none", will not do reduction.
|
| 164 |
+
get_not_nans: whether to return the `not_nans` count, if True, aggregate() returns (metric, not_nans).
|
| 165 |
+
|
| 166 |
+
"""
|
| 167 |
+
|
| 168 |
+
def __init__(self, reduction: MetricReduction | str = MetricReduction.MEAN, get_not_nans: bool = False) -> None:
|
| 169 |
+
super().__init__(reduction=reduction, get_not_nans=get_not_nans)
|
| 170 |
+
self.sq_func = partial(torch.pow, exponent=2.0)
|
| 171 |
+
|
| 172 |
+
def _compute_metric(self, y_pred: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
|
| 173 |
+
mse_out = compute_mean_error_metrics(y_pred, y, func=self.sq_func)
|
| 174 |
+
return torch.sqrt(mse_out)
|
| 175 |
+
|
| 176 |
+
|
| 177 |
+
class PSNRMetric(RegressionMetric):
|
| 178 |
+
r"""Compute Peak Signal To Noise Ratio between two tensors using function:
|
| 179 |
+
|
| 180 |
+
.. math::
|
| 181 |
+
\operatorname{PSNR}\left(Y, \hat{Y}\right) = 20 \cdot \log_{10} \left({\mathit{MAX}}_Y\right) \
|
| 182 |
+
-10 \cdot \log_{10}\left(\operatorname{MSE\left(Y, \hat{Y}\right)}\right)
|
| 183 |
+
|
| 184 |
+
More info: https://en.wikipedia.org/wiki/Peak_signal-to-noise_ratio
|
| 185 |
+
|
| 186 |
+
Help taken from:
|
| 187 |
+
https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/ops/image_ops_impl.py line 4139
|
| 188 |
+
|
| 189 |
+
Input `y_pred` is compared with ground truth `y`.
|
| 190 |
+
Both `y_pred` and `y` are expected to be real-valued, where `y_pred` is output from a regression model.
|
| 191 |
+
|
| 192 |
+
Example of the typical execution steps of this metric class follows :py:class:`monai.metrics.metric.Cumulative`.
|
| 193 |
+
|
| 194 |
+
Args:
|
| 195 |
+
max_val: The dynamic range of the images/volumes (i.e., the difference between the
|
| 196 |
+
maximum and the minimum allowed values e.g. 255 for a uint8 image).
|
| 197 |
+
reduction: define the mode to reduce metrics, will only execute reduction on `not-nan` values,
|
| 198 |
+
available reduction modes: {``"none"``, ``"mean"``, ``"sum"``, ``"mean_batch"``, ``"sum_batch"``,
|
| 199 |
+
``"mean_channel"``, ``"sum_channel"``}, default to ``"mean"``. if "none", will not do reduction.
|
| 200 |
+
get_not_nans: whether to return the `not_nans` count, if True, aggregate() returns (metric, not_nans).
|
| 201 |
+
|
| 202 |
+
"""
|
| 203 |
+
|
| 204 |
+
def __init__(
|
| 205 |
+
self, max_val: int | float, reduction: MetricReduction | str = MetricReduction.MEAN, get_not_nans: bool = False
|
| 206 |
+
) -> None:
|
| 207 |
+
super().__init__(reduction=reduction, get_not_nans=get_not_nans)
|
| 208 |
+
self.max_val = max_val
|
| 209 |
+
self.sq_func = partial(torch.pow, exponent=2.0)
|
| 210 |
+
|
| 211 |
+
def _compute_metric(self, y_pred: torch.Tensor, y: torch.Tensor) -> Any:
|
| 212 |
+
mse_out = compute_mean_error_metrics(y_pred, y, func=self.sq_func)
|
| 213 |
+
return 20 * math.log10(self.max_val) - 10 * torch.log10(mse_out)
|
| 214 |
+
|
| 215 |
+
|
| 216 |
+
def compute_mean_error_metrics(y_pred: torch.Tensor, y: torch.Tensor, func: Callable) -> torch.Tensor:
|
| 217 |
+
# reducing in only channel + spatial dimensions (not batch)
|
| 218 |
+
# reduction of batch handled inside __call__() using do_metric_reduction() in respective calling class
|
| 219 |
+
flt = partial(torch.flatten, start_dim=1)
|
| 220 |
+
return torch.mean(flt(func(y - y_pred)), dim=-1, keepdim=True)
|
| 221 |
+
|
| 222 |
+
|
| 223 |
+
class KernelType(StrEnum):
|
| 224 |
+
GAUSSIAN = "gaussian"
|
| 225 |
+
UNIFORM = "uniform"
|
| 226 |
+
|
| 227 |
+
|
| 228 |
+
class SSIMMetric(RegressionMetric):
|
| 229 |
+
r"""
|
| 230 |
+
Computes the Structural Similarity Index Measure (SSIM).
|
| 231 |
+
|
| 232 |
+
.. math::
|
| 233 |
+
\operatorname {SSIM}(x,y) =\frac {(2 \mu_x \mu_y + c_1)(2 \sigma_{xy} + c_2)}{((\mu_x^2 + \
|
| 234 |
+
\mu_y^2 + c_1)(\sigma_x^2 + \sigma_y^2 + c_2)}
|
| 235 |
+
|
| 236 |
+
For more info, visit
|
| 237 |
+
https://vicuesoft.com/glossary/term/ssim-ms-ssim/
|
| 238 |
+
|
| 239 |
+
SSIM reference paper:
|
| 240 |
+
Wang, Zhou, et al. "Image quality assessment: from error visibility to structural
|
| 241 |
+
similarity." IEEE transactions on image processing 13.4 (2004): 600-612.
|
| 242 |
+
|
| 243 |
+
Args:
|
| 244 |
+
spatial_dims: number of spatial dimensions of the input images.
|
| 245 |
+
data_range: value range of input images. (usually 1.0 or 255)
|
| 246 |
+
kernel_type: type of kernel, can be "gaussian" or "uniform".
|
| 247 |
+
win_size: window size of kernel
|
| 248 |
+
kernel_sigma: standard deviation for Gaussian kernel.
|
| 249 |
+
k1: stability constant used in the luminance denominator
|
| 250 |
+
k2: stability constant used in the contrast denominator
|
| 251 |
+
reduction: define the mode to reduce metrics, will only execute reduction on `not-nan` values,
|
| 252 |
+
available reduction modes: {``"none"``, ``"mean"``, ``"sum"``, ``"mean_batch"``, ``"sum_batch"``,
|
| 253 |
+
``"mean_channel"``, ``"sum_channel"``}, default to ``"mean"``. if "none", will not do reduction
|
| 254 |
+
get_not_nans: whether to return the `not_nans` count, if True, aggregate() returns (metric, not_nans)
|
| 255 |
+
"""
|
| 256 |
+
|
| 257 |
+
def __init__(
|
| 258 |
+
self,
|
| 259 |
+
spatial_dims: int,
|
| 260 |
+
data_range: float = 1.0,
|
| 261 |
+
kernel_type: KernelType | str = KernelType.GAUSSIAN,
|
| 262 |
+
win_size: int | Sequence[int] = 11,
|
| 263 |
+
kernel_sigma: float | Sequence[float] = 1.5,
|
| 264 |
+
k1: float = 0.01,
|
| 265 |
+
k2: float = 0.03,
|
| 266 |
+
reduction: MetricReduction | str = MetricReduction.MEAN,
|
| 267 |
+
get_not_nans: bool = False,
|
| 268 |
+
) -> None:
|
| 269 |
+
super().__init__(reduction=reduction, get_not_nans=get_not_nans)
|
| 270 |
+
|
| 271 |
+
self.spatial_dims = spatial_dims
|
| 272 |
+
self.data_range = data_range
|
| 273 |
+
self.kernel_type = kernel_type
|
| 274 |
+
|
| 275 |
+
if not isinstance(win_size, Sequence):
|
| 276 |
+
win_size = ensure_tuple_rep(win_size, spatial_dims)
|
| 277 |
+
self.kernel_size = win_size
|
| 278 |
+
|
| 279 |
+
if not isinstance(kernel_sigma, Sequence):
|
| 280 |
+
kernel_sigma = ensure_tuple_rep(kernel_sigma, spatial_dims)
|
| 281 |
+
self.kernel_sigma = kernel_sigma
|
| 282 |
+
|
| 283 |
+
self.k1 = k1
|
| 284 |
+
self.k2 = k2
|
| 285 |
+
|
| 286 |
+
def _compute_metric(self, y_pred: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
|
| 287 |
+
"""
|
| 288 |
+
Args:
|
| 289 |
+
y_pred: Predicted image.
|
| 290 |
+
It must be a 2D or 3D batch-first tensor [B,C,H,W] or [B,C,H,W,D].
|
| 291 |
+
y: Reference image.
|
| 292 |
+
It must be a 2D or 3D batch-first tensor [B,C,H,W] or [B,C,H,W,D].
|
| 293 |
+
|
| 294 |
+
Raises:
|
| 295 |
+
ValueError: when `y_pred` is not a 2D or 3D image.
|
| 296 |
+
"""
|
| 297 |
+
dims = y_pred.ndimension()
|
| 298 |
+
if self.spatial_dims == 2 and dims != 4:
|
| 299 |
+
raise ValueError(
|
| 300 |
+
f"y_pred should have 4 dimensions (batch, channel, height, width) when using {self.spatial_dims} "
|
| 301 |
+
f"spatial dimensions, got {dims}."
|
| 302 |
+
)
|
| 303 |
+
|
| 304 |
+
if self.spatial_dims == 3 and dims != 5:
|
| 305 |
+
raise ValueError(
|
| 306 |
+
f"y_pred should have 5 dimensions (batch, channel, height, width, depth) when using {self.spatial_dims}"
|
| 307 |
+
f" spatial dimensions, got {dims}."
|
| 308 |
+
)
|
| 309 |
+
|
| 310 |
+
ssim_value_full_image, _ = compute_ssim_and_cs(
|
| 311 |
+
y_pred=y_pred,
|
| 312 |
+
y=y,
|
| 313 |
+
spatial_dims=self.spatial_dims,
|
| 314 |
+
data_range=self.data_range,
|
| 315 |
+
kernel_type=self.kernel_type,
|
| 316 |
+
kernel_size=self.kernel_size,
|
| 317 |
+
kernel_sigma=self.kernel_sigma,
|
| 318 |
+
k1=self.k1,
|
| 319 |
+
k2=self.k2,
|
| 320 |
+
)
|
| 321 |
+
|
| 322 |
+
ssim_per_batch: torch.Tensor = ssim_value_full_image.view(ssim_value_full_image.shape[0], -1).mean(
|
| 323 |
+
1, keepdim=True
|
| 324 |
+
)
|
| 325 |
+
|
| 326 |
+
return ssim_per_batch
|
| 327 |
+
|
| 328 |
+
|
| 329 |
+
def _gaussian_kernel(
|
| 330 |
+
spatial_dims: int, num_channels: int, kernel_size: Sequence[int], kernel_sigma: Sequence[float]
|
| 331 |
+
) -> torch.Tensor:
|
| 332 |
+
"""Computes 2D or 3D gaussian kernel.
|
| 333 |
+
|
| 334 |
+
Args:
|
| 335 |
+
spatial_dims: number of spatial dimensions of the input images.
|
| 336 |
+
num_channels: number of channels in the image
|
| 337 |
+
kernel_size: size of kernel
|
| 338 |
+
kernel_sigma: standard deviation for Gaussian kernel.
|
| 339 |
+
"""
|
| 340 |
+
|
| 341 |
+
def gaussian_1d(kernel_size: int, sigma: float) -> torch.Tensor:
|
| 342 |
+
"""Computes 1D gaussian kernel.
|
| 343 |
+
|
| 344 |
+
Args:
|
| 345 |
+
kernel_size: size of the gaussian kernel
|
| 346 |
+
sigma: Standard deviation of the gaussian kernel
|
| 347 |
+
"""
|
| 348 |
+
dist = torch.arange(start=(1 - kernel_size) / 2, end=(1 + kernel_size) / 2, step=1)
|
| 349 |
+
gauss = torch.exp(-torch.pow(dist / sigma, 2) / 2)
|
| 350 |
+
return (gauss / gauss.sum()).unsqueeze(dim=0)
|
| 351 |
+
|
| 352 |
+
gaussian_kernel_x = gaussian_1d(kernel_size[0], kernel_sigma[0])
|
| 353 |
+
gaussian_kernel_y = gaussian_1d(kernel_size[1], kernel_sigma[1])
|
| 354 |
+
kernel = torch.matmul(gaussian_kernel_x.t(), gaussian_kernel_y) # (kernel_size, 1) * (1, kernel_size)
|
| 355 |
+
|
| 356 |
+
kernel_dimensions: tuple[int, ...] = (num_channels, 1, kernel_size[0], kernel_size[1])
|
| 357 |
+
|
| 358 |
+
if spatial_dims == 3:
|
| 359 |
+
gaussian_kernel_z = gaussian_1d(kernel_size[2], kernel_sigma[2])[None,]
|
| 360 |
+
kernel = torch.mul(
|
| 361 |
+
kernel.unsqueeze(-1).repeat(1, 1, kernel_size[2]),
|
| 362 |
+
gaussian_kernel_z.expand(kernel_size[0], kernel_size[1], kernel_size[2]),
|
| 363 |
+
)
|
| 364 |
+
kernel_dimensions = (num_channels, 1, kernel_size[0], kernel_size[1], kernel_size[2])
|
| 365 |
+
|
| 366 |
+
return kernel.expand(kernel_dimensions)
|
| 367 |
+
|
| 368 |
+
|
| 369 |
+
def compute_ssim_and_cs(
|
| 370 |
+
y_pred: torch.Tensor,
|
| 371 |
+
y: torch.Tensor,
|
| 372 |
+
spatial_dims: int,
|
| 373 |
+
kernel_size: Sequence[int],
|
| 374 |
+
kernel_sigma: Sequence[float],
|
| 375 |
+
data_range: float = 1.0,
|
| 376 |
+
kernel_type: KernelType | str = KernelType.GAUSSIAN,
|
| 377 |
+
k1: float = 0.01,
|
| 378 |
+
k2: float = 0.03,
|
| 379 |
+
) -> tuple[torch.Tensor, torch.Tensor]:
|
| 380 |
+
"""
|
| 381 |
+
Function to compute the Structural Similarity Index Measure (SSIM) and Contrast Sensitivity (CS) for a batch
|
| 382 |
+
of images.
|
| 383 |
+
|
| 384 |
+
Args:
|
| 385 |
+
y_pred: batch of predicted images with shape (batch_size, channels, spatial_dim1, spatial_dim2[, spatial_dim3])
|
| 386 |
+
y: batch of target images with shape (batch_size, channels, spatial_dim1, spatial_dim2[, spatial_dim3])
|
| 387 |
+
kernel_size: the size of the kernel to use for the SSIM computation.
|
| 388 |
+
kernel_sigma: the standard deviation of the kernel to use for the SSIM computation.
|
| 389 |
+
spatial_dims: number of spatial dimensions of the images (2, 3)
|
| 390 |
+
data_range: the data range of the images.
|
| 391 |
+
kernel_type: the type of kernel to use for the SSIM computation. Can be either "gaussian" or "uniform".
|
| 392 |
+
k1: the first stability constant.
|
| 393 |
+
k2: the second stability constant.
|
| 394 |
+
|
| 395 |
+
Returns:
|
| 396 |
+
ssim: the Structural Similarity Index Measure score for the batch of images.
|
| 397 |
+
cs: the Contrast Sensitivity for the batch of images.
|
| 398 |
+
"""
|
| 399 |
+
if y.shape != y_pred.shape:
|
| 400 |
+
raise ValueError(f"y_pred and y should have same shapes, got {y_pred.shape} and {y.shape}.")
|
| 401 |
+
|
| 402 |
+
y_pred = convert_data_type(y_pred, output_type=torch.Tensor, dtype=torch.float)[0]
|
| 403 |
+
y = convert_data_type(y, output_type=torch.Tensor, dtype=torch.float)[0]
|
| 404 |
+
|
| 405 |
+
num_channels = y_pred.size(1)
|
| 406 |
+
|
| 407 |
+
if kernel_type == KernelType.GAUSSIAN:
|
| 408 |
+
kernel = _gaussian_kernel(spatial_dims, num_channels, kernel_size, kernel_sigma)
|
| 409 |
+
elif kernel_type == KernelType.UNIFORM:
|
| 410 |
+
kernel = torch.ones((num_channels, 1, *kernel_size)) / torch.prod(torch.tensor(kernel_size))
|
| 411 |
+
|
| 412 |
+
kernel = convert_to_dst_type(src=kernel, dst=y_pred)[0]
|
| 413 |
+
|
| 414 |
+
c1 = (k1 * data_range) ** 2 # stability constant for luminance
|
| 415 |
+
c2 = (k2 * data_range) ** 2 # stability constant for contrast
|
| 416 |
+
|
| 417 |
+
conv_fn = getattr(F, f"conv{spatial_dims}d")
|
| 418 |
+
mu_x = conv_fn(y_pred, kernel, groups=num_channels)
|
| 419 |
+
mu_y = conv_fn(y, kernel, groups=num_channels)
|
| 420 |
+
mu_xx = conv_fn(y_pred * y_pred, kernel, groups=num_channels)
|
| 421 |
+
mu_yy = conv_fn(y * y, kernel, groups=num_channels)
|
| 422 |
+
mu_xy = conv_fn(y_pred * y, kernel, groups=num_channels)
|
| 423 |
+
|
| 424 |
+
sigma_x = mu_xx - mu_x * mu_x
|
| 425 |
+
sigma_y = mu_yy - mu_y * mu_y
|
| 426 |
+
sigma_xy = mu_xy - mu_x * mu_y
|
| 427 |
+
|
| 428 |
+
contrast_sensitivity = (2 * sigma_xy + c2) / (sigma_x + sigma_y + c2)
|
| 429 |
+
ssim_value_full_image = ((2 * mu_x * mu_y + c1) / (mu_x**2 + mu_y**2 + c1)) * contrast_sensitivity
|
| 430 |
+
|
| 431 |
+
return ssim_value_full_image, contrast_sensitivity
|
| 432 |
+
|
| 433 |
+
|
| 434 |
+
class MultiScaleSSIMMetric(RegressionMetric):
|
| 435 |
+
"""
|
| 436 |
+
Computes the Multi-Scale Structural Similarity Index Measure (MS-SSIM).
|
| 437 |
+
|
| 438 |
+
MS-SSIM reference paper:
|
| 439 |
+
Wang, Z., Simoncelli, E.P. and Bovik, A.C., 2003, November. "Multiscale structural
|
| 440 |
+
similarity for image quality assessment." In The Thirty-Seventh Asilomar Conference
|
| 441 |
+
on Signals, Systems & Computers, 2003 (Vol. 2, pp. 1398-1402). IEEE
|
| 442 |
+
|
| 443 |
+
Args:
|
| 444 |
+
spatial_dims: number of spatial dimensions of the input images.
|
| 445 |
+
data_range: value range of input images. (usually 1.0 or 255)
|
| 446 |
+
kernel_type: type of kernel, can be "gaussian" or "uniform".
|
| 447 |
+
kernel_size: size of kernel
|
| 448 |
+
kernel_sigma: standard deviation for Gaussian kernel.
|
| 449 |
+
k1: stability constant used in the luminance denominator
|
| 450 |
+
k2: stability constant used in the contrast denominator
|
| 451 |
+
weights: parameters for image similarity and contrast sensitivity at different resolution scores.
|
| 452 |
+
reduction: define the mode to reduce metrics, will only execute reduction on `not-nan` values,
|
| 453 |
+
available reduction modes: {``"none"``, ``"mean"``, ``"sum"``, ``"mean_batch"``, ``"sum_batch"``,
|
| 454 |
+
``"mean_channel"``, ``"sum_channel"``}, default to ``"mean"``. if "none", will not do reduction
|
| 455 |
+
get_not_nans: whether to return the `not_nans` count, if True, aggregate() returns (metric, not_nans)
|
| 456 |
+
"""
|
| 457 |
+
|
| 458 |
+
def __init__(
|
| 459 |
+
self,
|
| 460 |
+
spatial_dims: int,
|
| 461 |
+
data_range: float = 1.0,
|
| 462 |
+
kernel_type: KernelType | str = KernelType.GAUSSIAN,
|
| 463 |
+
kernel_size: int | Sequence[int] = 11,
|
| 464 |
+
kernel_sigma: float | Sequence[float] = 1.5,
|
| 465 |
+
k1: float = 0.01,
|
| 466 |
+
k2: float = 0.03,
|
| 467 |
+
weights: Sequence[float] = (0.0448, 0.2856, 0.3001, 0.2363, 0.1333),
|
| 468 |
+
reduction: MetricReduction | str = MetricReduction.MEAN,
|
| 469 |
+
get_not_nans: bool = False,
|
| 470 |
+
) -> None:
|
| 471 |
+
super().__init__(reduction=reduction, get_not_nans=get_not_nans)
|
| 472 |
+
|
| 473 |
+
self.spatial_dims = spatial_dims
|
| 474 |
+
self.data_range = data_range
|
| 475 |
+
self.kernel_type = kernel_type
|
| 476 |
+
|
| 477 |
+
if not isinstance(kernel_size, Sequence):
|
| 478 |
+
kernel_size = ensure_tuple_rep(kernel_size, spatial_dims)
|
| 479 |
+
self.kernel_size = kernel_size
|
| 480 |
+
|
| 481 |
+
if not isinstance(kernel_sigma, Sequence):
|
| 482 |
+
kernel_sigma = ensure_tuple_rep(kernel_sigma, spatial_dims)
|
| 483 |
+
self.kernel_sigma = kernel_sigma
|
| 484 |
+
|
| 485 |
+
self.k1 = k1
|
| 486 |
+
self.k2 = k2
|
| 487 |
+
self.weights = weights
|
| 488 |
+
|
| 489 |
+
def _compute_metric(self, y_pred: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
|
| 490 |
+
return compute_ms_ssim(
|
| 491 |
+
y_pred=y_pred,
|
| 492 |
+
y=y,
|
| 493 |
+
spatial_dims=self.spatial_dims,
|
| 494 |
+
data_range=self.data_range,
|
| 495 |
+
kernel_type=self.kernel_type,
|
| 496 |
+
kernel_size=self.kernel_size,
|
| 497 |
+
kernel_sigma=self.kernel_sigma,
|
| 498 |
+
k1=self.k1,
|
| 499 |
+
k2=self.k2,
|
| 500 |
+
weights=self.weights,
|
| 501 |
+
)
|
| 502 |
+
|
| 503 |
+
|
| 504 |
+
def compute_ms_ssim(
|
| 505 |
+
y_pred: torch.Tensor,
|
| 506 |
+
y: torch.Tensor,
|
| 507 |
+
spatial_dims: int,
|
| 508 |
+
data_range: float = 1.0,
|
| 509 |
+
kernel_type: KernelType | str = KernelType.GAUSSIAN,
|
| 510 |
+
kernel_size: int | Sequence[int] = 11,
|
| 511 |
+
kernel_sigma: float | Sequence[float] = 1.5,
|
| 512 |
+
k1: float = 0.01,
|
| 513 |
+
k2: float = 0.03,
|
| 514 |
+
weights: Sequence[float] = (0.0448, 0.2856, 0.3001, 0.2363, 0.1333),
|
| 515 |
+
) -> torch.Tensor:
|
| 516 |
+
"""
|
| 517 |
+
Args:
|
| 518 |
+
y_pred: Predicted image.
|
| 519 |
+
It must be a 2D or 3D batch-first tensor [B,C,H,W] or [B,C,H,W,D].
|
| 520 |
+
y: Reference image.
|
| 521 |
+
It must be a 2D or 3D batch-first tensor [B,C,H,W] or [B,C,H,W,D].
|
| 522 |
+
spatial_dims: number of spatial dimensions of the input images.
|
| 523 |
+
data_range: value range of input images. (usually 1.0 or 255)
|
| 524 |
+
kernel_type: type of kernel, can be "gaussian" or "uniform".
|
| 525 |
+
kernel_size: size of kernel
|
| 526 |
+
kernel_sigma: standard deviation for Gaussian kernel.
|
| 527 |
+
k1: stability constant used in the luminance denominator
|
| 528 |
+
k2: stability constant used in the contrast denominator
|
| 529 |
+
weights: parameters for image similarity and contrast sensitivity at different resolution scores.
|
| 530 |
+
Raises:
|
| 531 |
+
ValueError: when `y_pred` is not a 2D or 3D image.
|
| 532 |
+
"""
|
| 533 |
+
dims = y_pred.ndimension()
|
| 534 |
+
if spatial_dims == 2 and dims != 4:
|
| 535 |
+
raise ValueError(
|
| 536 |
+
f"y_pred should have 4 dimensions (batch, channel, height, width) when using {spatial_dims} "
|
| 537 |
+
f"spatial dimensions, got {dims}."
|
| 538 |
+
)
|
| 539 |
+
|
| 540 |
+
if spatial_dims == 3 and dims != 5:
|
| 541 |
+
raise ValueError(
|
| 542 |
+
f"y_pred should have 4 dimensions (batch, channel, height, width, depth) when using {spatial_dims}"
|
| 543 |
+
f" spatial dimensions, got {dims}."
|
| 544 |
+
)
|
| 545 |
+
|
| 546 |
+
if not isinstance(kernel_size, Sequence):
|
| 547 |
+
kernel_size = ensure_tuple_rep(kernel_size, spatial_dims)
|
| 548 |
+
|
| 549 |
+
if not isinstance(kernel_sigma, Sequence):
|
| 550 |
+
kernel_sigma = ensure_tuple_rep(kernel_sigma, spatial_dims)
|
| 551 |
+
# check if image have enough size for the number of downsamplings and the size of the kernel
|
| 552 |
+
weights_div = max(1, (len(weights) - 1)) ** 2
|
| 553 |
+
y_pred_spatial_dims = y_pred.shape[2:]
|
| 554 |
+
for i in range(len(y_pred_spatial_dims)):
|
| 555 |
+
if y_pred_spatial_dims[i] // weights_div <= kernel_size[i] - 1:
|
| 556 |
+
raise ValueError(
|
| 557 |
+
f"For a given number of `weights` parameters {len(weights)} and kernel size "
|
| 558 |
+
f"{kernel_size[i]}, the image height must be larger than "
|
| 559 |
+
f"{(kernel_size[i] - 1) * weights_div}."
|
| 560 |
+
)
|
| 561 |
+
|
| 562 |
+
weights_tensor = torch.tensor(weights, device=y_pred.device, dtype=torch.float)
|
| 563 |
+
|
| 564 |
+
avg_pool = getattr(F, f"avg_pool{spatial_dims}d")
|
| 565 |
+
|
| 566 |
+
multiscale_list: list[torch.Tensor] = []
|
| 567 |
+
for _ in range(len(weights_tensor)):
|
| 568 |
+
ssim, cs = compute_ssim_and_cs(
|
| 569 |
+
y_pred=y_pred,
|
| 570 |
+
y=y,
|
| 571 |
+
spatial_dims=spatial_dims,
|
| 572 |
+
data_range=data_range,
|
| 573 |
+
kernel_type=kernel_type,
|
| 574 |
+
kernel_size=kernel_size,
|
| 575 |
+
kernel_sigma=kernel_sigma,
|
| 576 |
+
k1=k1,
|
| 577 |
+
k2=k2,
|
| 578 |
+
)
|
| 579 |
+
|
| 580 |
+
cs_per_batch = cs.view(cs.shape[0], -1).mean(1)
|
| 581 |
+
|
| 582 |
+
multiscale_list.append(torch.relu(cs_per_batch))
|
| 583 |
+
y_pred = avg_pool(y_pred, kernel_size=2)
|
| 584 |
+
y = avg_pool(y, kernel_size=2)
|
| 585 |
+
|
| 586 |
+
ssim = ssim.view(ssim.shape[0], -1).mean(1)
|
| 587 |
+
multiscale_list[-1] = torch.relu(ssim)
|
| 588 |
+
multiscale_list_tensor = torch.stack(multiscale_list)
|
| 589 |
+
|
| 590 |
+
ms_ssim_value_full_image = torch.prod(multiscale_list_tensor ** weights_tensor.view(-1, 1), dim=0)
|
| 591 |
+
|
| 592 |
+
ms_ssim_per_batch: torch.Tensor = ms_ssim_value_full_image.view(ms_ssim_value_full_image.shape[0], -1).mean(
|
| 593 |
+
1, keepdim=True
|
| 594 |
+
)
|
| 595 |
+
|
| 596 |
+
return ms_ssim_per_batch
|
source_code/SegMamba/monai/networks/__init__.py
ADDED
|
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) MONAI Consortium
|
| 2 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 3 |
+
# you may not use this file except in compliance with the License.
|
| 4 |
+
# You may obtain a copy of the License at
|
| 5 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 6 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 7 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 8 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 9 |
+
# See the License for the specific language governing permissions and
|
| 10 |
+
# limitations under the License.
|
| 11 |
+
|
| 12 |
+
from __future__ import annotations
|
| 13 |
+
|
| 14 |
+
from .utils import (
|
| 15 |
+
convert_to_onnx,
|
| 16 |
+
convert_to_torchscript,
|
| 17 |
+
convert_to_trt,
|
| 18 |
+
copy_model_state,
|
| 19 |
+
eval_mode,
|
| 20 |
+
get_state_dict,
|
| 21 |
+
icnr_init,
|
| 22 |
+
look_up_named_module,
|
| 23 |
+
normal_init,
|
| 24 |
+
normalize_transform,
|
| 25 |
+
one_hot,
|
| 26 |
+
pixelshuffle,
|
| 27 |
+
predict_segmentation,
|
| 28 |
+
replace_modules,
|
| 29 |
+
replace_modules_temp,
|
| 30 |
+
save_state,
|
| 31 |
+
set_named_module,
|
| 32 |
+
to_norm_affine,
|
| 33 |
+
train_mode,
|
| 34 |
+
)
|
source_code/sam3/.github/workflows/format.yml
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
name: SAM3/ufmt
|
| 2 |
+
on:
|
| 3 |
+
pull_request:
|
| 4 |
+
branches:
|
| 5 |
+
- main
|
| 6 |
+
jobs:
|
| 7 |
+
ufmt_check:
|
| 8 |
+
runs-on: ubuntu-latest
|
| 9 |
+
steps:
|
| 10 |
+
- name: Install ruff-api
|
| 11 |
+
run: pip install ruff-api==0.1.0
|
| 12 |
+
- name: Check formatting
|
| 13 |
+
uses: omnilib/ufmt@action-v1
|
| 14 |
+
with:
|
| 15 |
+
path: sam3 scripts
|
| 16 |
+
python-version: "3.12"
|
| 17 |
+
black-version: "24.2.0"
|
| 18 |
+
usort-version: "1.0.2"
|
source_code/sam3/assets/veval/toy_gt_and_pred/toy_saco_veval_sav_test_gt.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
source_code/sam3/sam3/agent/helpers/boxes.py
ADDED
|
@@ -0,0 +1,438 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved
|
| 2 |
+
|
| 3 |
+
import math
|
| 4 |
+
from enum import IntEnum, unique
|
| 5 |
+
from typing import List, Tuple, Union
|
| 6 |
+
|
| 7 |
+
import numpy as np
|
| 8 |
+
import torch
|
| 9 |
+
from torch import device
|
| 10 |
+
|
| 11 |
+
_RawBoxType = Union[List[float], Tuple[float, ...], torch.Tensor, np.ndarray]
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
@unique
|
| 15 |
+
class BoxMode(IntEnum):
|
| 16 |
+
"""
|
| 17 |
+
Enum of different ways to represent a box.
|
| 18 |
+
"""
|
| 19 |
+
|
| 20 |
+
XYXY_ABS = 0
|
| 21 |
+
"""
|
| 22 |
+
(x0, y0, x1, y1) in absolute floating points coordinates.
|
| 23 |
+
The coordinates in range [0, width or height].
|
| 24 |
+
"""
|
| 25 |
+
XYWH_ABS = 1
|
| 26 |
+
"""
|
| 27 |
+
(x0, y0, w, h) in absolute floating points coordinates.
|
| 28 |
+
"""
|
| 29 |
+
XYXY_REL = 2
|
| 30 |
+
"""
|
| 31 |
+
Not yet supported!
|
| 32 |
+
(x0, y0, x1, y1) in range [0, 1]. They are relative to the size of the image.
|
| 33 |
+
"""
|
| 34 |
+
XYWH_REL = 3
|
| 35 |
+
"""
|
| 36 |
+
Not yet supported!
|
| 37 |
+
(x0, y0, w, h) in range [0, 1]. They are relative to the size of the image.
|
| 38 |
+
"""
|
| 39 |
+
XYWHA_ABS = 4
|
| 40 |
+
"""
|
| 41 |
+
(xc, yc, w, h, a) in absolute floating points coordinates.
|
| 42 |
+
(xc, yc) is the center of the rotated box, and the angle a is in degrees ccw.
|
| 43 |
+
"""
|
| 44 |
+
|
| 45 |
+
@staticmethod
|
| 46 |
+
def convert(
|
| 47 |
+
box: _RawBoxType, from_mode: "BoxMode", to_mode: "BoxMode"
|
| 48 |
+
) -> _RawBoxType:
|
| 49 |
+
"""
|
| 50 |
+
Args:
|
| 51 |
+
box: can be a k-tuple, k-list or an Nxk array/tensor, where k = 4 or 5
|
| 52 |
+
from_mode, to_mode (BoxMode)
|
| 53 |
+
|
| 54 |
+
Returns:
|
| 55 |
+
The converted box of the same type.
|
| 56 |
+
"""
|
| 57 |
+
if from_mode == to_mode:
|
| 58 |
+
return box
|
| 59 |
+
|
| 60 |
+
original_type = type(box)
|
| 61 |
+
is_numpy = isinstance(box, np.ndarray)
|
| 62 |
+
single_box = isinstance(box, (list, tuple))
|
| 63 |
+
if single_box:
|
| 64 |
+
assert len(box) == 4 or len(box) == 5, (
|
| 65 |
+
"BoxMode.convert takes either a k-tuple/list or an Nxk array/tensor,"
|
| 66 |
+
" where k == 4 or 5"
|
| 67 |
+
)
|
| 68 |
+
arr = torch.tensor(box)[None, :]
|
| 69 |
+
else:
|
| 70 |
+
# avoid modifying the input box
|
| 71 |
+
if is_numpy:
|
| 72 |
+
arr = torch.from_numpy(np.asarray(box)).clone()
|
| 73 |
+
else:
|
| 74 |
+
arr = box.clone()
|
| 75 |
+
|
| 76 |
+
assert to_mode not in [
|
| 77 |
+
BoxMode.XYXY_REL,
|
| 78 |
+
BoxMode.XYWH_REL,
|
| 79 |
+
] and from_mode not in [
|
| 80 |
+
BoxMode.XYXY_REL,
|
| 81 |
+
BoxMode.XYWH_REL,
|
| 82 |
+
], "Relative mode not yet supported!"
|
| 83 |
+
|
| 84 |
+
if from_mode == BoxMode.XYWHA_ABS and to_mode == BoxMode.XYXY_ABS:
|
| 85 |
+
assert (
|
| 86 |
+
arr.shape[-1] == 5
|
| 87 |
+
), "The last dimension of input shape must be 5 for XYWHA format"
|
| 88 |
+
original_dtype = arr.dtype
|
| 89 |
+
arr = arr.double()
|
| 90 |
+
|
| 91 |
+
w = arr[:, 2]
|
| 92 |
+
h = arr[:, 3]
|
| 93 |
+
a = arr[:, 4]
|
| 94 |
+
c = torch.abs(torch.cos(a * math.pi / 180.0))
|
| 95 |
+
s = torch.abs(torch.sin(a * math.pi / 180.0))
|
| 96 |
+
# This basically computes the horizontal bounding rectangle of the rotated box
|
| 97 |
+
new_w = c * w + s * h
|
| 98 |
+
new_h = c * h + s * w
|
| 99 |
+
|
| 100 |
+
# convert center to top-left corner
|
| 101 |
+
arr[:, 0] -= new_w / 2.0
|
| 102 |
+
arr[:, 1] -= new_h / 2.0
|
| 103 |
+
# bottom-right corner
|
| 104 |
+
arr[:, 2] = arr[:, 0] + new_w
|
| 105 |
+
arr[:, 3] = arr[:, 1] + new_h
|
| 106 |
+
|
| 107 |
+
arr = arr[:, :4].to(dtype=original_dtype)
|
| 108 |
+
elif from_mode == BoxMode.XYWH_ABS and to_mode == BoxMode.XYWHA_ABS:
|
| 109 |
+
original_dtype = arr.dtype
|
| 110 |
+
arr = arr.double()
|
| 111 |
+
arr[:, 0] += arr[:, 2] / 2.0
|
| 112 |
+
arr[:, 1] += arr[:, 3] / 2.0
|
| 113 |
+
angles = torch.zeros((arr.shape[0], 1), dtype=arr.dtype)
|
| 114 |
+
arr = torch.cat((arr, angles), axis=1).to(dtype=original_dtype)
|
| 115 |
+
else:
|
| 116 |
+
if to_mode == BoxMode.XYXY_ABS and from_mode == BoxMode.XYWH_ABS:
|
| 117 |
+
arr[:, 2] += arr[:, 0]
|
| 118 |
+
arr[:, 3] += arr[:, 1]
|
| 119 |
+
elif from_mode == BoxMode.XYXY_ABS and to_mode == BoxMode.XYWH_ABS:
|
| 120 |
+
arr[:, 2] -= arr[:, 0]
|
| 121 |
+
arr[:, 3] -= arr[:, 1]
|
| 122 |
+
else:
|
| 123 |
+
raise NotImplementedError(
|
| 124 |
+
"Conversion from BoxMode {} to {} is not supported yet".format(
|
| 125 |
+
from_mode, to_mode
|
| 126 |
+
)
|
| 127 |
+
)
|
| 128 |
+
|
| 129 |
+
if single_box:
|
| 130 |
+
return original_type(arr.flatten().tolist())
|
| 131 |
+
if is_numpy:
|
| 132 |
+
return arr.numpy()
|
| 133 |
+
else:
|
| 134 |
+
return arr
|
| 135 |
+
|
| 136 |
+
|
| 137 |
+
class Boxes:
|
| 138 |
+
"""
|
| 139 |
+
This structure stores a list of boxes as a Nx4 torch.Tensor.
|
| 140 |
+
It supports some common methods about boxes
|
| 141 |
+
(`area`, `clip`, `nonempty`, etc),
|
| 142 |
+
and also behaves like a Tensor
|
| 143 |
+
(support indexing, `to(device)`, `.device`, and iteration over all boxes)
|
| 144 |
+
|
| 145 |
+
Attributes:
|
| 146 |
+
tensor (torch.Tensor): float matrix of Nx4. Each row is (x1, y1, x2, y2).
|
| 147 |
+
"""
|
| 148 |
+
|
| 149 |
+
def __init__(self, tensor: torch.Tensor):
|
| 150 |
+
"""
|
| 151 |
+
Args:
|
| 152 |
+
tensor (Tensor[float]): a Nx4 matrix. Each row is (x1, y1, x2, y2).
|
| 153 |
+
"""
|
| 154 |
+
if not isinstance(tensor, torch.Tensor):
|
| 155 |
+
tensor = torch.as_tensor(
|
| 156 |
+
tensor, dtype=torch.float32, device=torch.device("cpu")
|
| 157 |
+
)
|
| 158 |
+
else:
|
| 159 |
+
tensor = tensor.to(torch.float32)
|
| 160 |
+
if tensor.numel() == 0:
|
| 161 |
+
# Use reshape, so we don't end up creating a new tensor that does not depend on
|
| 162 |
+
# the inputs (and consequently confuses jit)
|
| 163 |
+
tensor = tensor.reshape((-1, 4)).to(dtype=torch.float32)
|
| 164 |
+
assert tensor.dim() == 2 and tensor.size(-1) == 4, tensor.size()
|
| 165 |
+
|
| 166 |
+
self.tensor = tensor
|
| 167 |
+
|
| 168 |
+
def clone(self) -> "Boxes":
|
| 169 |
+
"""
|
| 170 |
+
Clone the Boxes.
|
| 171 |
+
|
| 172 |
+
Returns:
|
| 173 |
+
Boxes
|
| 174 |
+
"""
|
| 175 |
+
return Boxes(self.tensor.clone())
|
| 176 |
+
|
| 177 |
+
def to(self, device: torch.device):
|
| 178 |
+
# Boxes are assumed float32 and does not support to(dtype)
|
| 179 |
+
return Boxes(self.tensor.to(device=device))
|
| 180 |
+
|
| 181 |
+
def area(self) -> torch.Tensor:
|
| 182 |
+
"""
|
| 183 |
+
Computes the area of all the boxes.
|
| 184 |
+
|
| 185 |
+
Returns:
|
| 186 |
+
torch.Tensor: a vector with areas of each box.
|
| 187 |
+
"""
|
| 188 |
+
box = self.tensor
|
| 189 |
+
area = (box[:, 2] - box[:, 0]) * (box[:, 3] - box[:, 1])
|
| 190 |
+
return area
|
| 191 |
+
|
| 192 |
+
def clip(self, box_size: Tuple[int, int]) -> None:
|
| 193 |
+
"""
|
| 194 |
+
Clip (in place) the boxes by limiting x coordinates to the range [0, width]
|
| 195 |
+
and y coordinates to the range [0, height].
|
| 196 |
+
|
| 197 |
+
Args:
|
| 198 |
+
box_size (height, width): The clipping box's size.
|
| 199 |
+
"""
|
| 200 |
+
assert torch.isfinite(self.tensor).all(), "Box tensor contains infinite or NaN!"
|
| 201 |
+
h, w = box_size
|
| 202 |
+
x1 = self.tensor[:, 0].clamp(min=0, max=w)
|
| 203 |
+
y1 = self.tensor[:, 1].clamp(min=0, max=h)
|
| 204 |
+
x2 = self.tensor[:, 2].clamp(min=0, max=w)
|
| 205 |
+
y2 = self.tensor[:, 3].clamp(min=0, max=h)
|
| 206 |
+
self.tensor = torch.stack((x1, y1, x2, y2), dim=-1)
|
| 207 |
+
|
| 208 |
+
def nonempty(self, threshold: float = 0.0) -> torch.Tensor:
|
| 209 |
+
"""
|
| 210 |
+
Find boxes that are non-empty.
|
| 211 |
+
A box is considered empty, if either of its side is no larger than threshold.
|
| 212 |
+
|
| 213 |
+
Returns:
|
| 214 |
+
Tensor:
|
| 215 |
+
a binary vector which represents whether each box is empty
|
| 216 |
+
(False) or non-empty (True).
|
| 217 |
+
"""
|
| 218 |
+
box = self.tensor
|
| 219 |
+
widths = box[:, 2] - box[:, 0]
|
| 220 |
+
heights = box[:, 3] - box[:, 1]
|
| 221 |
+
keep = (widths > threshold) & (heights > threshold)
|
| 222 |
+
return keep
|
| 223 |
+
|
| 224 |
+
def __getitem__(self, item) -> "Boxes":
|
| 225 |
+
"""
|
| 226 |
+
Args:
|
| 227 |
+
item: int, slice, or a BoolTensor
|
| 228 |
+
|
| 229 |
+
Returns:
|
| 230 |
+
Boxes: Create a new :class:`Boxes` by indexing.
|
| 231 |
+
|
| 232 |
+
The following usage are allowed:
|
| 233 |
+
|
| 234 |
+
1. `new_boxes = boxes[3]`: return a `Boxes` which contains only one box.
|
| 235 |
+
2. `new_boxes = boxes[2:10]`: return a slice of boxes.
|
| 236 |
+
3. `new_boxes = boxes[vector]`, where vector is a torch.BoolTensor
|
| 237 |
+
with `length = len(boxes)`. Nonzero elements in the vector will be selected.
|
| 238 |
+
|
| 239 |
+
Note that the returned Boxes might share storage with this Boxes,
|
| 240 |
+
subject to Pytorch's indexing semantics.
|
| 241 |
+
"""
|
| 242 |
+
if isinstance(item, int):
|
| 243 |
+
return Boxes(self.tensor[item].view(1, -1))
|
| 244 |
+
b = self.tensor[item]
|
| 245 |
+
assert (
|
| 246 |
+
b.dim() == 2
|
| 247 |
+
), "Indexing on Boxes with {} failed to return a matrix!".format(item)
|
| 248 |
+
return Boxes(b)
|
| 249 |
+
|
| 250 |
+
def __len__(self) -> int:
|
| 251 |
+
return self.tensor.shape[0]
|
| 252 |
+
|
| 253 |
+
def __repr__(self) -> str:
|
| 254 |
+
return "Boxes(" + str(self.tensor) + ")"
|
| 255 |
+
|
| 256 |
+
def inside_box(
|
| 257 |
+
self, box_size: Tuple[int, int], boundary_threshold: int = 0
|
| 258 |
+
) -> torch.Tensor:
|
| 259 |
+
"""
|
| 260 |
+
Args:
|
| 261 |
+
box_size (height, width): Size of the reference box.
|
| 262 |
+
boundary_threshold (int): Boxes that extend beyond the reference box
|
| 263 |
+
boundary by more than boundary_threshold are considered "outside".
|
| 264 |
+
|
| 265 |
+
Returns:
|
| 266 |
+
a binary vector, indicating whether each box is inside the reference box.
|
| 267 |
+
"""
|
| 268 |
+
height, width = box_size
|
| 269 |
+
inds_inside = (
|
| 270 |
+
(self.tensor[..., 0] >= -boundary_threshold)
|
| 271 |
+
& (self.tensor[..., 1] >= -boundary_threshold)
|
| 272 |
+
& (self.tensor[..., 2] < width + boundary_threshold)
|
| 273 |
+
& (self.tensor[..., 3] < height + boundary_threshold)
|
| 274 |
+
)
|
| 275 |
+
return inds_inside
|
| 276 |
+
|
| 277 |
+
def get_centers(self) -> torch.Tensor:
|
| 278 |
+
"""
|
| 279 |
+
Returns:
|
| 280 |
+
The box centers in a Nx2 array of (x, y).
|
| 281 |
+
"""
|
| 282 |
+
return (self.tensor[:, :2] + self.tensor[:, 2:]) / 2
|
| 283 |
+
|
| 284 |
+
def scale(self, scale_x: float, scale_y: float) -> None:
|
| 285 |
+
"""
|
| 286 |
+
Scale the box with horizontal and vertical scaling factors
|
| 287 |
+
"""
|
| 288 |
+
self.tensor[:, 0::2] *= scale_x
|
| 289 |
+
self.tensor[:, 1::2] *= scale_y
|
| 290 |
+
|
| 291 |
+
@classmethod
|
| 292 |
+
def cat(cls, boxes_list: List["Boxes"]) -> "Boxes":
|
| 293 |
+
"""
|
| 294 |
+
Concatenates a list of Boxes into a single Boxes
|
| 295 |
+
|
| 296 |
+
Arguments:
|
| 297 |
+
boxes_list (list[Boxes])
|
| 298 |
+
|
| 299 |
+
Returns:
|
| 300 |
+
Boxes: the concatenated Boxes
|
| 301 |
+
"""
|
| 302 |
+
assert isinstance(boxes_list, (list, tuple))
|
| 303 |
+
if len(boxes_list) == 0:
|
| 304 |
+
return cls(torch.empty(0))
|
| 305 |
+
assert all([isinstance(box, Boxes) for box in boxes_list])
|
| 306 |
+
|
| 307 |
+
# use torch.cat (v.s. layers.cat) so the returned boxes never share storage with input
|
| 308 |
+
cat_boxes = cls(torch.cat([b.tensor for b in boxes_list], dim=0))
|
| 309 |
+
return cat_boxes
|
| 310 |
+
|
| 311 |
+
@property
|
| 312 |
+
def device(self) -> device:
|
| 313 |
+
return self.tensor.device
|
| 314 |
+
|
| 315 |
+
# type "Iterator[torch.Tensor]", yield, and iter() not supported by torchscript
|
| 316 |
+
# https://github.com/pytorch/pytorch/issues/18627
|
| 317 |
+
@torch.jit.unused
|
| 318 |
+
def __iter__(self):
|
| 319 |
+
"""
|
| 320 |
+
Yield a box as a Tensor of shape (4,) at a time.
|
| 321 |
+
"""
|
| 322 |
+
yield from self.tensor
|
| 323 |
+
|
| 324 |
+
|
| 325 |
+
def pairwise_intersection(boxes1: Boxes, boxes2: Boxes) -> torch.Tensor:
|
| 326 |
+
"""
|
| 327 |
+
Given two lists of boxes of size N and M,
|
| 328 |
+
compute the intersection area between __all__ N x M pairs of boxes.
|
| 329 |
+
The box order must be (xmin, ymin, xmax, ymax)
|
| 330 |
+
|
| 331 |
+
Args:
|
| 332 |
+
boxes1,boxes2 (Boxes): two `Boxes`. Contains N & M boxes, respectively.
|
| 333 |
+
|
| 334 |
+
Returns:
|
| 335 |
+
Tensor: intersection, sized [N,M].
|
| 336 |
+
"""
|
| 337 |
+
boxes1, boxes2 = boxes1.tensor, boxes2.tensor
|
| 338 |
+
width_height = torch.min(boxes1[:, None, 2:], boxes2[:, 2:]) - torch.max(
|
| 339 |
+
boxes1[:, None, :2], boxes2[:, :2]
|
| 340 |
+
) # [N,M,2]
|
| 341 |
+
|
| 342 |
+
width_height.clamp_(min=0) # [N,M,2]
|
| 343 |
+
intersection = width_height.prod(dim=2) # [N,M]
|
| 344 |
+
return intersection
|
| 345 |
+
|
| 346 |
+
|
| 347 |
+
# implementation from https://github.com/kuangliu/torchcv/blob/master/torchcv/utils/box.py
|
| 348 |
+
# with slight modifications
|
| 349 |
+
def pairwise_iou(boxes1: Boxes, boxes2: Boxes) -> torch.Tensor:
|
| 350 |
+
"""
|
| 351 |
+
Given two lists of boxes of size N and M, compute the IoU
|
| 352 |
+
(intersection over union) between **all** N x M pairs of boxes.
|
| 353 |
+
The box order must be (xmin, ymin, xmax, ymax).
|
| 354 |
+
|
| 355 |
+
Args:
|
| 356 |
+
boxes1,boxes2 (Boxes): two `Boxes`. Contains N & M boxes, respectively.
|
| 357 |
+
|
| 358 |
+
Returns:
|
| 359 |
+
Tensor: IoU, sized [N,M].
|
| 360 |
+
"""
|
| 361 |
+
area1 = boxes1.area() # [N]
|
| 362 |
+
area2 = boxes2.area() # [M]
|
| 363 |
+
inter = pairwise_intersection(boxes1, boxes2)
|
| 364 |
+
|
| 365 |
+
# handle empty boxes
|
| 366 |
+
iou = torch.where(
|
| 367 |
+
inter > 0,
|
| 368 |
+
inter / (area1[:, None] + area2 - inter),
|
| 369 |
+
torch.zeros(1, dtype=inter.dtype, device=inter.device),
|
| 370 |
+
)
|
| 371 |
+
return iou
|
| 372 |
+
|
| 373 |
+
|
| 374 |
+
def pairwise_ioa(boxes1: Boxes, boxes2: Boxes) -> torch.Tensor:
|
| 375 |
+
"""
|
| 376 |
+
Similar to :func:`pariwise_iou` but compute the IoA (intersection over boxes2 area).
|
| 377 |
+
|
| 378 |
+
Args:
|
| 379 |
+
boxes1,boxes2 (Boxes): two `Boxes`. Contains N & M boxes, respectively.
|
| 380 |
+
|
| 381 |
+
Returns:
|
| 382 |
+
Tensor: IoA, sized [N,M].
|
| 383 |
+
"""
|
| 384 |
+
area2 = boxes2.area() # [M]
|
| 385 |
+
inter = pairwise_intersection(boxes1, boxes2)
|
| 386 |
+
|
| 387 |
+
# handle empty boxes
|
| 388 |
+
ioa = torch.where(
|
| 389 |
+
inter > 0, inter / area2, torch.zeros(1, dtype=inter.dtype, device=inter.device)
|
| 390 |
+
)
|
| 391 |
+
return ioa
|
| 392 |
+
|
| 393 |
+
|
| 394 |
+
def pairwise_point_box_distance(points: torch.Tensor, boxes: Boxes):
|
| 395 |
+
"""
|
| 396 |
+
Pairwise distance between N points and M boxes. The distance between a
|
| 397 |
+
point and a box is represented by the distance from the point to 4 edges
|
| 398 |
+
of the box. Distances are all positive when the point is inside the box.
|
| 399 |
+
|
| 400 |
+
Args:
|
| 401 |
+
points: Nx2 coordinates. Each row is (x, y)
|
| 402 |
+
boxes: M boxes
|
| 403 |
+
|
| 404 |
+
Returns:
|
| 405 |
+
Tensor: distances of size (N, M, 4). The 4 values are distances from
|
| 406 |
+
the point to the left, top, right, bottom of the box.
|
| 407 |
+
"""
|
| 408 |
+
x, y = points.unsqueeze(dim=2).unbind(dim=1) # (N, 1)
|
| 409 |
+
x0, y0, x1, y1 = boxes.tensor.unsqueeze(dim=0).unbind(dim=2) # (1, M)
|
| 410 |
+
return torch.stack([x - x0, y - y0, x1 - x, y1 - y], dim=2)
|
| 411 |
+
|
| 412 |
+
|
| 413 |
+
def matched_pairwise_iou(boxes1: Boxes, boxes2: Boxes) -> torch.Tensor:
|
| 414 |
+
"""
|
| 415 |
+
Compute pairwise intersection over union (IOU) of two sets of matched
|
| 416 |
+
boxes that have the same number of boxes.
|
| 417 |
+
Similar to :func:`pairwise_iou`, but computes only diagonal elements of the matrix.
|
| 418 |
+
|
| 419 |
+
Args:
|
| 420 |
+
boxes1 (Boxes): bounding boxes, sized [N,4].
|
| 421 |
+
boxes2 (Boxes): same length as boxes1
|
| 422 |
+
Returns:
|
| 423 |
+
Tensor: iou, sized [N].
|
| 424 |
+
"""
|
| 425 |
+
assert len(boxes1) == len(boxes2), (
|
| 426 |
+
"boxlists should have the same" "number of entries, got {}, {}".format(
|
| 427 |
+
len(boxes1), len(boxes2)
|
| 428 |
+
)
|
| 429 |
+
)
|
| 430 |
+
area1 = boxes1.area() # [N]
|
| 431 |
+
area2 = boxes2.area() # [N]
|
| 432 |
+
box1, box2 = boxes1.tensor, boxes2.tensor
|
| 433 |
+
lt = torch.max(box1[:, :2], box2[:, :2]) # [N,2]
|
| 434 |
+
rb = torch.min(box1[:, 2:], box2[:, 2:]) # [N,2]
|
| 435 |
+
wh = (rb - lt).clamp(min=0) # [N,2]
|
| 436 |
+
inter = wh[:, 0] * wh[:, 1] # [N]
|
| 437 |
+
iou = inter / (area1 + area2 - inter) # [N]
|
| 438 |
+
return iou
|
source_code/sam3/sam3/agent/helpers/color_map.py
ADDED
|
@@ -0,0 +1,150 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved
|
| 2 |
+
|
| 3 |
+
"""
|
| 4 |
+
An awesome colormap for really neat visualizations.
|
| 5 |
+
Copied from Detectron, and removed gray colors.
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
import random
|
| 9 |
+
|
| 10 |
+
import numpy as np
|
| 11 |
+
|
| 12 |
+
__all__ = ["colormap", "random_color", "random_colors"]
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
# A list of 25 bright and sharp colors for segmentation masks,
|
| 16 |
+
# generated from the edges of the sRGB color space for maximum intensity.
|
| 17 |
+
_COLORS = (
|
| 18 |
+
np.array(
|
| 19 |
+
[
|
| 20 |
+
# The original 8 sharp colors
|
| 21 |
+
1.000,
|
| 22 |
+
1.000,
|
| 23 |
+
0.000, # 1. Yellow
|
| 24 |
+
0.000,
|
| 25 |
+
1.000,
|
| 26 |
+
0.000, # 2. Lime
|
| 27 |
+
0.000,
|
| 28 |
+
1.000,
|
| 29 |
+
1.000, # 3. Cyan
|
| 30 |
+
1.000,
|
| 31 |
+
0.000,
|
| 32 |
+
1.000, # 4. Magenta
|
| 33 |
+
1.000,
|
| 34 |
+
0.000,
|
| 35 |
+
0.000, # 5. Red
|
| 36 |
+
1.000,
|
| 37 |
+
0.498,
|
| 38 |
+
0.000, # 6. Orange
|
| 39 |
+
0.498,
|
| 40 |
+
1.000,
|
| 41 |
+
0.000, # 7. Chartreuse
|
| 42 |
+
0.000,
|
| 43 |
+
1.000,
|
| 44 |
+
0.498, # 8. Spring Green
|
| 45 |
+
1.000,
|
| 46 |
+
0.000,
|
| 47 |
+
0.498, # 9. Rose
|
| 48 |
+
0.498,
|
| 49 |
+
0.000,
|
| 50 |
+
1.000, # 10. Violet
|
| 51 |
+
0.753,
|
| 52 |
+
1.000,
|
| 53 |
+
0.000, # 11. Electric Lime
|
| 54 |
+
1.000,
|
| 55 |
+
0.753,
|
| 56 |
+
0.000, # 12. Vivid Orange
|
| 57 |
+
0.000,
|
| 58 |
+
1.000,
|
| 59 |
+
0.753, # 13. Turquoise
|
| 60 |
+
0.753,
|
| 61 |
+
0.000,
|
| 62 |
+
1.000, # 14. Bright Violet
|
| 63 |
+
1.000,
|
| 64 |
+
0.000,
|
| 65 |
+
0.753, # 15. Bright Pink
|
| 66 |
+
1.000,
|
| 67 |
+
0.251,
|
| 68 |
+
0.000, # 16. Fiery Orange
|
| 69 |
+
0.251,
|
| 70 |
+
1.000,
|
| 71 |
+
0.000, # 17. Bright Chartreuse
|
| 72 |
+
0.000,
|
| 73 |
+
1.000,
|
| 74 |
+
0.251, # 18. Malachite Green
|
| 75 |
+
0.251,
|
| 76 |
+
0.000,
|
| 77 |
+
1.000, # 19. Deep Violet
|
| 78 |
+
1.000,
|
| 79 |
+
0.000,
|
| 80 |
+
0.251, # 20. Hot Pink
|
| 81 |
+
]
|
| 82 |
+
)
|
| 83 |
+
.astype(np.float32)
|
| 84 |
+
.reshape(-1, 3)
|
| 85 |
+
)
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
def colormap(rgb=False, maximum=255):
|
| 89 |
+
"""
|
| 90 |
+
Args:
|
| 91 |
+
rgb (bool): whether to return RGB colors or BGR colors.
|
| 92 |
+
maximum (int): either 255 or 1
|
| 93 |
+
|
| 94 |
+
Returns:
|
| 95 |
+
ndarray: a float32 array of Nx3 colors, in range [0, 255] or [0, 1]
|
| 96 |
+
"""
|
| 97 |
+
assert maximum in [255, 1], maximum
|
| 98 |
+
c = _COLORS * maximum
|
| 99 |
+
if not rgb:
|
| 100 |
+
c = c[:, ::-1]
|
| 101 |
+
return c
|
| 102 |
+
|
| 103 |
+
|
| 104 |
+
def random_color(rgb=False, maximum=255):
|
| 105 |
+
"""
|
| 106 |
+
Args:
|
| 107 |
+
rgb (bool): whether to return RGB colors or BGR colors.
|
| 108 |
+
maximum (int): either 255 or 1
|
| 109 |
+
|
| 110 |
+
Returns:
|
| 111 |
+
ndarray: a vector of 3 numbers
|
| 112 |
+
"""
|
| 113 |
+
idx = np.random.randint(0, len(_COLORS))
|
| 114 |
+
ret = _COLORS[idx] * maximum
|
| 115 |
+
if not rgb:
|
| 116 |
+
ret = ret[::-1]
|
| 117 |
+
return ret
|
| 118 |
+
|
| 119 |
+
|
| 120 |
+
def random_colors(N, rgb=False, maximum=255):
|
| 121 |
+
"""
|
| 122 |
+
Args:
|
| 123 |
+
N (int): number of unique colors needed
|
| 124 |
+
rgb (bool): whether to return RGB colors or BGR colors.
|
| 125 |
+
maximum (int): either 255 or 1
|
| 126 |
+
|
| 127 |
+
Returns:
|
| 128 |
+
ndarray: a list of random_color
|
| 129 |
+
"""
|
| 130 |
+
indices = random.sample(range(len(_COLORS)), N)
|
| 131 |
+
ret = [_COLORS[i] * maximum for i in indices]
|
| 132 |
+
if not rgb:
|
| 133 |
+
ret = [x[::-1] for x in ret]
|
| 134 |
+
return ret
|
| 135 |
+
|
| 136 |
+
|
| 137 |
+
if __name__ == "__main__":
|
| 138 |
+
import cv2
|
| 139 |
+
|
| 140 |
+
size = 100
|
| 141 |
+
H, W = 10, 10
|
| 142 |
+
canvas = np.random.rand(H * size, W * size, 3).astype("float32")
|
| 143 |
+
for h in range(H):
|
| 144 |
+
for w in range(W):
|
| 145 |
+
idx = h * W + w
|
| 146 |
+
if idx >= len(_COLORS):
|
| 147 |
+
break
|
| 148 |
+
canvas[h * size : (h + 1) * size, w * size : (w + 1) * size] = _COLORS[idx]
|
| 149 |
+
cv2.imshow("a", canvas)
|
| 150 |
+
cv2.waitKey(0)
|
source_code/sam3/sam3/agent/helpers/keypoints.py
ADDED
|
@@ -0,0 +1,244 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved
|
| 2 |
+
|
| 3 |
+
from typing import Any, List, Tuple, Union
|
| 4 |
+
|
| 5 |
+
import numpy as np
|
| 6 |
+
import torch
|
| 7 |
+
from torch.nn import functional as F
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
class Keypoints:
|
| 11 |
+
"""
|
| 12 |
+
Stores keypoint **annotation** data. GT Instances have a `gt_keypoints` property
|
| 13 |
+
containing the x,y location and visibility flag of each keypoint. This tensor has shape
|
| 14 |
+
(N, K, 3) where N is the number of instances and K is the number of keypoints per instance.
|
| 15 |
+
|
| 16 |
+
The visibility flag follows the COCO format and must be one of three integers:
|
| 17 |
+
|
| 18 |
+
* v=0: not labeled (in which case x=y=0)
|
| 19 |
+
* v=1: labeled but not visible
|
| 20 |
+
* v=2: labeled and visible
|
| 21 |
+
"""
|
| 22 |
+
|
| 23 |
+
def __init__(self, keypoints: Union[torch.Tensor, np.ndarray, List[List[float]]]):
|
| 24 |
+
"""
|
| 25 |
+
Arguments:
|
| 26 |
+
keypoints: A Tensor, numpy array, or list of the x, y, and visibility of each keypoint.
|
| 27 |
+
The shape should be (N, K, 3) where N is the number of
|
| 28 |
+
instances, and K is the number of keypoints per instance.
|
| 29 |
+
"""
|
| 30 |
+
device = (
|
| 31 |
+
keypoints.device
|
| 32 |
+
if isinstance(keypoints, torch.Tensor)
|
| 33 |
+
else torch.device("cpu")
|
| 34 |
+
)
|
| 35 |
+
keypoints = torch.as_tensor(keypoints, dtype=torch.float32, device=device)
|
| 36 |
+
assert keypoints.dim() == 3 and keypoints.shape[2] == 3, keypoints.shape
|
| 37 |
+
self.tensor = keypoints
|
| 38 |
+
|
| 39 |
+
def __len__(self) -> int:
|
| 40 |
+
return self.tensor.size(0)
|
| 41 |
+
|
| 42 |
+
def to(self, *args: Any, **kwargs: Any) -> "Keypoints":
|
| 43 |
+
return type(self)(self.tensor.to(*args, **kwargs))
|
| 44 |
+
|
| 45 |
+
@property
|
| 46 |
+
def device(self) -> torch.device:
|
| 47 |
+
return self.tensor.device
|
| 48 |
+
|
| 49 |
+
def to_heatmap(self, boxes: torch.Tensor, heatmap_size: int) -> torch.Tensor:
|
| 50 |
+
"""
|
| 51 |
+
Convert keypoint annotations to a heatmap of one-hot labels for training,
|
| 52 |
+
as described in :paper:`Mask R-CNN`.
|
| 53 |
+
|
| 54 |
+
Arguments:
|
| 55 |
+
boxes: Nx4 tensor, the boxes to draw the keypoints to
|
| 56 |
+
|
| 57 |
+
Returns:
|
| 58 |
+
heatmaps:
|
| 59 |
+
A tensor of shape (N, K), each element is integer spatial label
|
| 60 |
+
in the range [0, heatmap_size**2 - 1] for each keypoint in the input.
|
| 61 |
+
valid:
|
| 62 |
+
A tensor of shape (N, K) containing whether each keypoint is in the roi or not.
|
| 63 |
+
"""
|
| 64 |
+
return _keypoints_to_heatmap(self.tensor, boxes, heatmap_size)
|
| 65 |
+
|
| 66 |
+
def __getitem__(self, item: Union[int, slice, torch.BoolTensor]) -> "Keypoints":
|
| 67 |
+
"""
|
| 68 |
+
Create a new `Keypoints` by indexing on this `Keypoints`.
|
| 69 |
+
|
| 70 |
+
The following usage are allowed:
|
| 71 |
+
|
| 72 |
+
1. `new_kpts = kpts[3]`: return a `Keypoints` which contains only one instance.
|
| 73 |
+
2. `new_kpts = kpts[2:10]`: return a slice of key points.
|
| 74 |
+
3. `new_kpts = kpts[vector]`, where vector is a torch.ByteTensor
|
| 75 |
+
with `length = len(kpts)`. Nonzero elements in the vector will be selected.
|
| 76 |
+
|
| 77 |
+
Note that the returned Keypoints might share storage with this Keypoints,
|
| 78 |
+
subject to Pytorch's indexing semantics.
|
| 79 |
+
"""
|
| 80 |
+
if isinstance(item, int):
|
| 81 |
+
return Keypoints([self.tensor[item]])
|
| 82 |
+
return Keypoints(self.tensor[item])
|
| 83 |
+
|
| 84 |
+
def __repr__(self) -> str:
|
| 85 |
+
s = self.__class__.__name__ + "("
|
| 86 |
+
s += "num_instances={})".format(len(self.tensor))
|
| 87 |
+
return s
|
| 88 |
+
|
| 89 |
+
@staticmethod
|
| 90 |
+
def cat(keypoints_list: List["Keypoints"]) -> "Keypoints":
|
| 91 |
+
"""
|
| 92 |
+
Concatenates a list of Keypoints into a single Keypoints
|
| 93 |
+
|
| 94 |
+
Arguments:
|
| 95 |
+
keypoints_list (list[Keypoints])
|
| 96 |
+
|
| 97 |
+
Returns:
|
| 98 |
+
Keypoints: the concatenated Keypoints
|
| 99 |
+
"""
|
| 100 |
+
assert isinstance(keypoints_list, (list, tuple))
|
| 101 |
+
assert len(keypoints_list) > 0
|
| 102 |
+
assert all(isinstance(keypoints, Keypoints) for keypoints in keypoints_list)
|
| 103 |
+
|
| 104 |
+
cat_kpts = type(keypoints_list[0])(
|
| 105 |
+
torch.cat([kpts.tensor for kpts in keypoints_list], dim=0)
|
| 106 |
+
)
|
| 107 |
+
return cat_kpts
|
| 108 |
+
|
| 109 |
+
|
| 110 |
+
def _keypoints_to_heatmap(
|
| 111 |
+
keypoints: torch.Tensor, rois: torch.Tensor, heatmap_size: int
|
| 112 |
+
) -> Tuple[torch.Tensor, torch.Tensor]:
|
| 113 |
+
"""
|
| 114 |
+
Encode keypoint locations into a target heatmap for use in SoftmaxWithLoss across space.
|
| 115 |
+
|
| 116 |
+
Maps keypoints from the half-open interval [x1, x2) on continuous image coordinates to the
|
| 117 |
+
closed interval [0, heatmap_size - 1] on discrete image coordinates. We use the
|
| 118 |
+
continuous-discrete conversion from Heckbert 1990 ("What is the coordinate of a pixel?"):
|
| 119 |
+
d = floor(c) and c = d + 0.5, where d is a discrete coordinate and c is a continuous coordinate.
|
| 120 |
+
|
| 121 |
+
Arguments:
|
| 122 |
+
keypoints: tensor of keypoint locations in of shape (N, K, 3).
|
| 123 |
+
rois: Nx4 tensor of rois in xyxy format
|
| 124 |
+
heatmap_size: integer side length of square heatmap.
|
| 125 |
+
|
| 126 |
+
Returns:
|
| 127 |
+
heatmaps: A tensor of shape (N, K) containing an integer spatial label
|
| 128 |
+
in the range [0, heatmap_size**2 - 1] for each keypoint in the input.
|
| 129 |
+
valid: A tensor of shape (N, K) containing whether each keypoint is in
|
| 130 |
+
the roi or not.
|
| 131 |
+
"""
|
| 132 |
+
|
| 133 |
+
if rois.numel() == 0:
|
| 134 |
+
return rois.new().long(), rois.new().long()
|
| 135 |
+
offset_x = rois[:, 0]
|
| 136 |
+
offset_y = rois[:, 1]
|
| 137 |
+
scale_x = heatmap_size / (rois[:, 2] - rois[:, 0])
|
| 138 |
+
scale_y = heatmap_size / (rois[:, 3] - rois[:, 1])
|
| 139 |
+
|
| 140 |
+
offset_x = offset_x[:, None]
|
| 141 |
+
offset_y = offset_y[:, None]
|
| 142 |
+
scale_x = scale_x[:, None]
|
| 143 |
+
scale_y = scale_y[:, None]
|
| 144 |
+
|
| 145 |
+
x = keypoints[..., 0]
|
| 146 |
+
y = keypoints[..., 1]
|
| 147 |
+
|
| 148 |
+
x_boundary_inds = x == rois[:, 2][:, None]
|
| 149 |
+
y_boundary_inds = y == rois[:, 3][:, None]
|
| 150 |
+
|
| 151 |
+
x = (x - offset_x) * scale_x
|
| 152 |
+
x = x.floor().long()
|
| 153 |
+
y = (y - offset_y) * scale_y
|
| 154 |
+
y = y.floor().long()
|
| 155 |
+
|
| 156 |
+
x[x_boundary_inds] = heatmap_size - 1
|
| 157 |
+
y[y_boundary_inds] = heatmap_size - 1
|
| 158 |
+
|
| 159 |
+
valid_loc = (x >= 0) & (y >= 0) & (x < heatmap_size) & (y < heatmap_size)
|
| 160 |
+
vis = keypoints[..., 2] > 0
|
| 161 |
+
valid = (valid_loc & vis).long()
|
| 162 |
+
|
| 163 |
+
lin_ind = y * heatmap_size + x
|
| 164 |
+
heatmaps = lin_ind * valid
|
| 165 |
+
|
| 166 |
+
return heatmaps, valid
|
| 167 |
+
|
| 168 |
+
|
| 169 |
+
@torch.jit.script_if_tracing
|
| 170 |
+
def heatmaps_to_keypoints(maps: torch.Tensor, rois: torch.Tensor) -> torch.Tensor:
|
| 171 |
+
"""
|
| 172 |
+
Extract predicted keypoint locations from heatmaps.
|
| 173 |
+
|
| 174 |
+
Args:
|
| 175 |
+
maps (Tensor): (#ROIs, #keypoints, POOL_H, POOL_W). The predicted heatmap of logits for
|
| 176 |
+
each ROI and each keypoint.
|
| 177 |
+
rois (Tensor): (#ROIs, 4). The box of each ROI.
|
| 178 |
+
|
| 179 |
+
Returns:
|
| 180 |
+
Tensor of shape (#ROIs, #keypoints, 4) with the last dimension corresponding to
|
| 181 |
+
(x, y, logit, score) for each keypoint.
|
| 182 |
+
|
| 183 |
+
When converting discrete pixel indices in an NxN image to a continuous keypoint coordinate,
|
| 184 |
+
we maintain consistency with :meth:`Keypoints.to_heatmap` by using the conversion from
|
| 185 |
+
Heckbert 1990: c = d + 0.5, where d is a discrete coordinate and c is a continuous coordinate.
|
| 186 |
+
"""
|
| 187 |
+
|
| 188 |
+
offset_x = rois[:, 0]
|
| 189 |
+
offset_y = rois[:, 1]
|
| 190 |
+
|
| 191 |
+
widths = (rois[:, 2] - rois[:, 0]).clamp(min=1)
|
| 192 |
+
heights = (rois[:, 3] - rois[:, 1]).clamp(min=1)
|
| 193 |
+
widths_ceil = widths.ceil()
|
| 194 |
+
heights_ceil = heights.ceil()
|
| 195 |
+
|
| 196 |
+
num_rois, num_keypoints = maps.shape[:2]
|
| 197 |
+
xy_preds = maps.new_zeros(rois.shape[0], num_keypoints, 4)
|
| 198 |
+
|
| 199 |
+
width_corrections = widths / widths_ceil
|
| 200 |
+
height_corrections = heights / heights_ceil
|
| 201 |
+
|
| 202 |
+
keypoints_idx = torch.arange(num_keypoints, device=maps.device)
|
| 203 |
+
|
| 204 |
+
for i in range(num_rois):
|
| 205 |
+
outsize = (int(heights_ceil[i]), int(widths_ceil[i]))
|
| 206 |
+
roi_map = F.interpolate(
|
| 207 |
+
maps[[i]], size=outsize, mode="bicubic", align_corners=False
|
| 208 |
+
)
|
| 209 |
+
|
| 210 |
+
# Although semantically equivalent, `reshape` is used instead of `squeeze` due
|
| 211 |
+
# to limitation during ONNX export of `squeeze` in scripting mode
|
| 212 |
+
roi_map = roi_map.reshape(roi_map.shape[1:]) # keypoints x H x W
|
| 213 |
+
|
| 214 |
+
# softmax over the spatial region
|
| 215 |
+
max_score, _ = roi_map.view(num_keypoints, -1).max(1)
|
| 216 |
+
max_score = max_score.view(num_keypoints, 1, 1)
|
| 217 |
+
tmp_full_resolution = (roi_map - max_score).exp_()
|
| 218 |
+
tmp_pool_resolution = (maps[i] - max_score).exp_()
|
| 219 |
+
# Produce scores over the region H x W, but normalize with POOL_H x POOL_W,
|
| 220 |
+
# so that the scores of objects of different absolute sizes will be more comparable
|
| 221 |
+
roi_map_scores = tmp_full_resolution / tmp_pool_resolution.sum(
|
| 222 |
+
(1, 2), keepdim=True
|
| 223 |
+
)
|
| 224 |
+
|
| 225 |
+
w = roi_map.shape[2]
|
| 226 |
+
pos = roi_map.view(num_keypoints, -1).argmax(1)
|
| 227 |
+
|
| 228 |
+
x_int = pos % w
|
| 229 |
+
y_int = (pos - x_int) // w
|
| 230 |
+
|
| 231 |
+
assert (
|
| 232 |
+
roi_map_scores[keypoints_idx, y_int, x_int]
|
| 233 |
+
== roi_map_scores.view(num_keypoints, -1).max(1)[0]
|
| 234 |
+
).all()
|
| 235 |
+
|
| 236 |
+
x = (x_int.float() + 0.5) * width_corrections[i]
|
| 237 |
+
y = (y_int.float() + 0.5) * height_corrections[i]
|
| 238 |
+
|
| 239 |
+
xy_preds[i, :, 0] = x + offset_x[i]
|
| 240 |
+
xy_preds[i, :, 1] = y + offset_y[i]
|
| 241 |
+
xy_preds[i, :, 2] = roi_map[keypoints_idx, y_int, x_int]
|
| 242 |
+
xy_preds[i, :, 3] = roi_map_scores[keypoints_idx, y_int, x_int]
|
| 243 |
+
|
| 244 |
+
return xy_preds
|
source_code/sam3/sam3/agent/helpers/mask_overlap_removal.py
ADDED
|
@@ -0,0 +1,128 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved
|
| 2 |
+
|
| 3 |
+
from typing import Dict, List
|
| 4 |
+
|
| 5 |
+
import numpy as np
|
| 6 |
+
import torch
|
| 7 |
+
|
| 8 |
+
try:
|
| 9 |
+
from pycocotools import mask as mask_utils
|
| 10 |
+
except Exception:
|
| 11 |
+
mask_utils = None
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
def mask_intersection(
|
| 15 |
+
masks1: torch.Tensor, masks2: torch.Tensor, block_size: int = 16
|
| 16 |
+
) -> torch.Tensor:
|
| 17 |
+
assert masks1.shape[1:] == masks2.shape[1:]
|
| 18 |
+
assert masks1.dtype == torch.bool and masks2.dtype == torch.bool
|
| 19 |
+
N, M = masks1.shape[0], masks2.shape[0]
|
| 20 |
+
out = torch.zeros(N, M, device=masks1.device, dtype=torch.long)
|
| 21 |
+
for i in range(0, N, block_size):
|
| 22 |
+
for j in range(0, M, block_size):
|
| 23 |
+
a = masks1[i : i + block_size]
|
| 24 |
+
b = masks2[j : j + block_size]
|
| 25 |
+
inter = (a[:, None] & b[None, :]).flatten(-2).sum(-1)
|
| 26 |
+
out[i : i + block_size, j : j + block_size] = inter
|
| 27 |
+
return out
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
def mask_iom(masks1: torch.Tensor, masks2: torch.Tensor) -> torch.Tensor:
|
| 31 |
+
assert masks1.shape[1:] == masks2.shape[1:]
|
| 32 |
+
assert masks1.dtype == torch.bool and masks2.dtype == torch.bool
|
| 33 |
+
inter = mask_intersection(masks1, masks2)
|
| 34 |
+
area1 = masks1.flatten(-2).sum(-1) # (N,)
|
| 35 |
+
area2 = masks2.flatten(-2).sum(-1) # (M,)
|
| 36 |
+
min_area = torch.min(area1[:, None], area2[None, :]).clamp_min(1)
|
| 37 |
+
return inter.float() / (min_area.float() + 1e-8)
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
def _decode_single_mask(mask_repr, h: int, w: int) -> np.ndarray:
|
| 41 |
+
if isinstance(mask_repr, (list, tuple, np.ndarray)):
|
| 42 |
+
arr = np.array(mask_repr)
|
| 43 |
+
if arr.ndim != 2:
|
| 44 |
+
raise ValueError("Mask array must be 2D (H, W).")
|
| 45 |
+
return (arr > 0).astype(np.uint8)
|
| 46 |
+
|
| 47 |
+
if mask_utils is None:
|
| 48 |
+
raise ImportError(
|
| 49 |
+
"pycocotools is required to decode RLE mask strings. pip install pycocotools"
|
| 50 |
+
)
|
| 51 |
+
|
| 52 |
+
if not isinstance(mask_repr, (str, bytes)):
|
| 53 |
+
raise ValueError("Unsupported mask representation type for RLE decode.")
|
| 54 |
+
|
| 55 |
+
rle = {
|
| 56 |
+
"counts": mask_repr if isinstance(mask_repr, (str, bytes)) else str(mask_repr),
|
| 57 |
+
"size": [h, w],
|
| 58 |
+
}
|
| 59 |
+
decoded = mask_utils.decode(rle)
|
| 60 |
+
if decoded.ndim == 3:
|
| 61 |
+
decoded = decoded[:, :, 0]
|
| 62 |
+
return (decoded > 0).astype(np.uint8)
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
def _decode_masks_to_torch_bool(pred_masks: List, h: int, w: int) -> torch.Tensor:
|
| 66 |
+
bin_masks = [_decode_single_mask(m, h, w) for m in pred_masks]
|
| 67 |
+
masks_np = np.stack(bin_masks, axis=0).astype(np.uint8) # (N, H, W)
|
| 68 |
+
return torch.from_numpy(masks_np > 0)
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
def remove_overlapping_masks(sample: Dict, iom_thresh: float = 0.3) -> Dict:
|
| 72 |
+
"""
|
| 73 |
+
Greedy keep: sort by score desc; keep a mask if IoM to all kept masks <= threshold.
|
| 74 |
+
If pred_masks has length 0 or 1, returns sample unchanged (no extra keys).
|
| 75 |
+
"""
|
| 76 |
+
# Basic presence checks
|
| 77 |
+
if "pred_masks" not in sample or not isinstance(sample["pred_masks"], list):
|
| 78 |
+
return sample # nothing to do / preserve as-is
|
| 79 |
+
|
| 80 |
+
pred_masks = sample["pred_masks"]
|
| 81 |
+
N = len(pred_masks)
|
| 82 |
+
|
| 83 |
+
# --- Early exit: 0 or 1 mask -> do NOT modify the JSON at all ---
|
| 84 |
+
if N <= 1:
|
| 85 |
+
return sample
|
| 86 |
+
|
| 87 |
+
# From here on we have at least 2 masks
|
| 88 |
+
h = int(sample["orig_img_h"])
|
| 89 |
+
w = int(sample["orig_img_w"])
|
| 90 |
+
pred_scores = sample.get("pred_scores", [1.0] * N) # fallback if scores missing
|
| 91 |
+
pred_boxes = sample.get("pred_boxes", None)
|
| 92 |
+
|
| 93 |
+
assert N == len(pred_scores), "pred_masks and pred_scores must have same length"
|
| 94 |
+
if pred_boxes is not None:
|
| 95 |
+
assert N == len(pred_boxes), "pred_masks and pred_boxes must have same length"
|
| 96 |
+
|
| 97 |
+
masks_bool = _decode_masks_to_torch_bool(pred_masks, h, w) # (N, H, W)
|
| 98 |
+
|
| 99 |
+
order = sorted(range(N), key=lambda i: float(pred_scores[i]), reverse=True)
|
| 100 |
+
kept_idx: List[int] = []
|
| 101 |
+
kept_masks: List[torch.Tensor] = []
|
| 102 |
+
|
| 103 |
+
for i in order:
|
| 104 |
+
cand = masks_bool[i].unsqueeze(0) # (1, H, W)
|
| 105 |
+
if len(kept_masks) == 0:
|
| 106 |
+
kept_idx.append(i)
|
| 107 |
+
kept_masks.append(masks_bool[i])
|
| 108 |
+
continue
|
| 109 |
+
|
| 110 |
+
kept_stack = torch.stack(kept_masks, dim=0) # (K, H, W)
|
| 111 |
+
iom_vals = mask_iom(cand, kept_stack).squeeze(0) # (K,)
|
| 112 |
+
if torch.any(iom_vals > iom_thresh):
|
| 113 |
+
continue # overlaps too much with a higher-scored kept mask
|
| 114 |
+
kept_idx.append(i)
|
| 115 |
+
kept_masks.append(masks_bool[i])
|
| 116 |
+
|
| 117 |
+
kept_idx_sorted = sorted(kept_idx)
|
| 118 |
+
|
| 119 |
+
# Build filtered JSON (this *does* modify fields; only for N>=2 case)
|
| 120 |
+
out = dict(sample)
|
| 121 |
+
out["pred_masks"] = [pred_masks[i] for i in kept_idx_sorted]
|
| 122 |
+
out["pred_scores"] = [pred_scores[i] for i in kept_idx_sorted]
|
| 123 |
+
if pred_boxes is not None:
|
| 124 |
+
out["pred_boxes"] = [pred_boxes[i] for i in kept_idx_sorted]
|
| 125 |
+
out["kept_indices"] = kept_idx_sorted
|
| 126 |
+
out["removed_indices"] = [i for i in range(N) if i not in set(kept_idx_sorted)]
|
| 127 |
+
out["iom_threshold"] = float(iom_thresh)
|
| 128 |
+
return out
|
source_code/sam3/sam3/agent/helpers/masks.py
ADDED
|
@@ -0,0 +1,560 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved
|
| 2 |
+
|
| 3 |
+
import copy
|
| 4 |
+
import itertools
|
| 5 |
+
from typing import Any, Iterator, List, Union
|
| 6 |
+
|
| 7 |
+
import numpy as np
|
| 8 |
+
import pycocotools.mask as mask_util
|
| 9 |
+
import torch
|
| 10 |
+
from torch import device
|
| 11 |
+
|
| 12 |
+
from .boxes import Boxes
|
| 13 |
+
from .memory import retry_if_cuda_oom
|
| 14 |
+
|
| 15 |
+
from .roi_align import ROIAlign
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
def polygon_area(x, y):
|
| 19 |
+
# Using the shoelace formula
|
| 20 |
+
# https://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates
|
| 21 |
+
return 0.5 * np.abs(np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1)))
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
def polygons_to_bitmask(
|
| 25 |
+
polygons: List[np.ndarray], height: int, width: int
|
| 26 |
+
) -> np.ndarray:
|
| 27 |
+
"""
|
| 28 |
+
Args:
|
| 29 |
+
polygons (list[ndarray]): each array has shape (Nx2,)
|
| 30 |
+
height, width (int)
|
| 31 |
+
|
| 32 |
+
Returns:
|
| 33 |
+
ndarray: a bool mask of shape (height, width)
|
| 34 |
+
"""
|
| 35 |
+
if len(polygons) == 0:
|
| 36 |
+
# COCOAPI does not support empty polygons
|
| 37 |
+
return np.zeros((height, width)).astype(bool)
|
| 38 |
+
rles = mask_util.frPyObjects(polygons, height, width)
|
| 39 |
+
rle = mask_util.merge(rles)
|
| 40 |
+
return mask_util.decode(rle).astype(bool)
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
def rasterize_polygons_within_box(
|
| 44 |
+
polygons: List[np.ndarray], box: np.ndarray, mask_size: int
|
| 45 |
+
) -> torch.Tensor:
|
| 46 |
+
"""
|
| 47 |
+
Rasterize the polygons into a mask image and
|
| 48 |
+
crop the mask content in the given box.
|
| 49 |
+
The cropped mask is resized to (mask_size, mask_size).
|
| 50 |
+
|
| 51 |
+
This function is used when generating training targets for mask head in Mask R-CNN.
|
| 52 |
+
Given original ground-truth masks for an image, new ground-truth mask
|
| 53 |
+
training targets in the size of `mask_size x mask_size`
|
| 54 |
+
must be provided for each predicted box. This function will be called to
|
| 55 |
+
produce such targets.
|
| 56 |
+
|
| 57 |
+
Args:
|
| 58 |
+
polygons (list[ndarray[float]]): a list of polygons, which represents an instance.
|
| 59 |
+
box: 4-element numpy array
|
| 60 |
+
mask_size (int):
|
| 61 |
+
|
| 62 |
+
Returns:
|
| 63 |
+
Tensor: BoolTensor of shape (mask_size, mask_size)
|
| 64 |
+
"""
|
| 65 |
+
# 1. Shift the polygons w.r.t the boxes
|
| 66 |
+
w, h = box[2] - box[0], box[3] - box[1]
|
| 67 |
+
|
| 68 |
+
polygons = copy.deepcopy(polygons)
|
| 69 |
+
for p in polygons:
|
| 70 |
+
p[0::2] = p[0::2] - box[0]
|
| 71 |
+
p[1::2] = p[1::2] - box[1]
|
| 72 |
+
|
| 73 |
+
# 2. Rescale the polygons to the new box size
|
| 74 |
+
# max() to avoid division by small number
|
| 75 |
+
ratio_h = mask_size / max(h, 0.1)
|
| 76 |
+
ratio_w = mask_size / max(w, 0.1)
|
| 77 |
+
|
| 78 |
+
if ratio_h == ratio_w:
|
| 79 |
+
for p in polygons:
|
| 80 |
+
p *= ratio_h
|
| 81 |
+
else:
|
| 82 |
+
for p in polygons:
|
| 83 |
+
p[0::2] *= ratio_w
|
| 84 |
+
p[1::2] *= ratio_h
|
| 85 |
+
|
| 86 |
+
# 3. Rasterize the polygons with coco api
|
| 87 |
+
mask = polygons_to_bitmask(polygons, mask_size, mask_size)
|
| 88 |
+
mask = torch.from_numpy(mask)
|
| 89 |
+
return mask
|
| 90 |
+
|
| 91 |
+
|
| 92 |
+
class BitMasks:
|
| 93 |
+
"""
|
| 94 |
+
This class stores the segmentation masks for all objects in one image, in
|
| 95 |
+
the form of bitmaps.
|
| 96 |
+
|
| 97 |
+
Attributes:
|
| 98 |
+
tensor: bool Tensor of N,H,W, representing N instances in the image.
|
| 99 |
+
"""
|
| 100 |
+
|
| 101 |
+
def __init__(self, tensor: Union[torch.Tensor, np.ndarray]):
|
| 102 |
+
"""
|
| 103 |
+
Args:
|
| 104 |
+
tensor: bool Tensor of N,H,W, representing N instances in the image.
|
| 105 |
+
"""
|
| 106 |
+
if isinstance(tensor, torch.Tensor):
|
| 107 |
+
tensor = tensor.to(torch.bool)
|
| 108 |
+
else:
|
| 109 |
+
tensor = torch.as_tensor(
|
| 110 |
+
tensor, dtype=torch.bool, device=torch.device("cpu")
|
| 111 |
+
)
|
| 112 |
+
assert tensor.dim() == 3, tensor.size()
|
| 113 |
+
self.image_size = tensor.shape[1:]
|
| 114 |
+
self.tensor = tensor
|
| 115 |
+
|
| 116 |
+
@torch.jit.unused
|
| 117 |
+
def to(self, *args: Any, **kwargs: Any) -> "BitMasks":
|
| 118 |
+
return BitMasks(self.tensor.to(*args, **kwargs))
|
| 119 |
+
|
| 120 |
+
@property
|
| 121 |
+
def device(self) -> torch.device:
|
| 122 |
+
return self.tensor.device
|
| 123 |
+
|
| 124 |
+
@torch.jit.unused
|
| 125 |
+
def __getitem__(self, item: Union[int, slice, torch.BoolTensor]) -> "BitMasks":
|
| 126 |
+
"""
|
| 127 |
+
Returns:
|
| 128 |
+
BitMasks: Create a new :class:`BitMasks` by indexing.
|
| 129 |
+
|
| 130 |
+
The following usage are allowed:
|
| 131 |
+
|
| 132 |
+
1. `new_masks = masks[3]`: return a `BitMasks` which contains only one mask.
|
| 133 |
+
2. `new_masks = masks[2:10]`: return a slice of masks.
|
| 134 |
+
3. `new_masks = masks[vector]`, where vector is a torch.BoolTensor
|
| 135 |
+
with `length = len(masks)`. Nonzero elements in the vector will be selected.
|
| 136 |
+
|
| 137 |
+
Note that the returned object might share storage with this object,
|
| 138 |
+
subject to Pytorch's indexing semantics.
|
| 139 |
+
"""
|
| 140 |
+
if isinstance(item, int):
|
| 141 |
+
return BitMasks(self.tensor[item].unsqueeze(0))
|
| 142 |
+
m = self.tensor[item]
|
| 143 |
+
assert (
|
| 144 |
+
m.dim() == 3
|
| 145 |
+
), "Indexing on BitMasks with {} returns a tensor with shape {}!".format(
|
| 146 |
+
item, m.shape
|
| 147 |
+
)
|
| 148 |
+
return BitMasks(m)
|
| 149 |
+
|
| 150 |
+
@torch.jit.unused
|
| 151 |
+
def __iter__(self) -> torch.Tensor:
|
| 152 |
+
yield from self.tensor
|
| 153 |
+
|
| 154 |
+
@torch.jit.unused
|
| 155 |
+
def __repr__(self) -> str:
|
| 156 |
+
s = self.__class__.__name__ + "("
|
| 157 |
+
s += "num_instances={})".format(len(self.tensor))
|
| 158 |
+
return s
|
| 159 |
+
|
| 160 |
+
def __len__(self) -> int:
|
| 161 |
+
return self.tensor.shape[0]
|
| 162 |
+
|
| 163 |
+
def nonempty(self) -> torch.Tensor:
|
| 164 |
+
"""
|
| 165 |
+
Find masks that are non-empty.
|
| 166 |
+
|
| 167 |
+
Returns:
|
| 168 |
+
Tensor: a BoolTensor which represents
|
| 169 |
+
whether each mask is empty (False) or non-empty (True).
|
| 170 |
+
"""
|
| 171 |
+
return self.tensor.flatten(1).any(dim=1)
|
| 172 |
+
|
| 173 |
+
@staticmethod
|
| 174 |
+
def from_polygon_masks(
|
| 175 |
+
polygon_masks: Union["PolygonMasks", List[List[np.ndarray]]],
|
| 176 |
+
height: int,
|
| 177 |
+
width: int,
|
| 178 |
+
) -> "BitMasks":
|
| 179 |
+
"""
|
| 180 |
+
Args:
|
| 181 |
+
polygon_masks (list[list[ndarray]] or PolygonMasks)
|
| 182 |
+
height, width (int)
|
| 183 |
+
"""
|
| 184 |
+
if isinstance(polygon_masks, PolygonMasks):
|
| 185 |
+
polygon_masks = polygon_masks.polygons
|
| 186 |
+
masks = [polygons_to_bitmask(p, height, width) for p in polygon_masks]
|
| 187 |
+
if len(masks):
|
| 188 |
+
return BitMasks(torch.stack([torch.from_numpy(x) for x in masks]))
|
| 189 |
+
else:
|
| 190 |
+
return BitMasks(torch.empty(0, height, width, dtype=torch.bool))
|
| 191 |
+
|
| 192 |
+
@staticmethod
|
| 193 |
+
def from_roi_masks(roi_masks: "ROIMasks", height: int, width: int) -> "BitMasks":
|
| 194 |
+
"""
|
| 195 |
+
Args:
|
| 196 |
+
roi_masks:
|
| 197 |
+
height, width (int):
|
| 198 |
+
"""
|
| 199 |
+
return roi_masks.to_bitmasks(height, width)
|
| 200 |
+
|
| 201 |
+
def crop_and_resize(self, boxes: torch.Tensor, mask_size: int) -> torch.Tensor:
|
| 202 |
+
"""
|
| 203 |
+
Crop each bitmask by the given box, and resize results to (mask_size, mask_size).
|
| 204 |
+
This can be used to prepare training targets for Mask R-CNN.
|
| 205 |
+
It has less reconstruction error compared to rasterization with polygons.
|
| 206 |
+
However we observe no difference in accuracy,
|
| 207 |
+
but BitMasks requires more memory to store all the masks.
|
| 208 |
+
|
| 209 |
+
Args:
|
| 210 |
+
boxes (Tensor): Nx4 tensor storing the boxes for each mask
|
| 211 |
+
mask_size (int): the size of the rasterized mask.
|
| 212 |
+
|
| 213 |
+
Returns:
|
| 214 |
+
Tensor:
|
| 215 |
+
A bool tensor of shape (N, mask_size, mask_size), where
|
| 216 |
+
N is the number of predicted boxes for this image.
|
| 217 |
+
"""
|
| 218 |
+
assert len(boxes) == len(self), "{} != {}".format(len(boxes), len(self))
|
| 219 |
+
device = self.tensor.device
|
| 220 |
+
|
| 221 |
+
batch_inds = torch.arange(len(boxes), device=device).to(dtype=boxes.dtype)[
|
| 222 |
+
:, None
|
| 223 |
+
]
|
| 224 |
+
rois = torch.cat([batch_inds, boxes], dim=1) # Nx5
|
| 225 |
+
|
| 226 |
+
bit_masks = self.tensor.to(dtype=torch.float32)
|
| 227 |
+
rois = rois.to(device=device)
|
| 228 |
+
output = (
|
| 229 |
+
ROIAlign((mask_size, mask_size), 1.0, 0, aligned=True)
|
| 230 |
+
.forward(bit_masks[:, None, :, :], rois)
|
| 231 |
+
.squeeze(1)
|
| 232 |
+
)
|
| 233 |
+
output = output >= 0.5
|
| 234 |
+
return output
|
| 235 |
+
|
| 236 |
+
def get_bounding_boxes(self) -> Boxes:
|
| 237 |
+
"""
|
| 238 |
+
Returns:
|
| 239 |
+
Boxes: tight bounding boxes around bitmasks.
|
| 240 |
+
If a mask is empty, it's bounding box will be all zero.
|
| 241 |
+
"""
|
| 242 |
+
boxes = torch.zeros(self.tensor.shape[0], 4, dtype=torch.float32)
|
| 243 |
+
x_any = torch.any(self.tensor, dim=1)
|
| 244 |
+
y_any = torch.any(self.tensor, dim=2)
|
| 245 |
+
for idx in range(self.tensor.shape[0]):
|
| 246 |
+
x = torch.where(x_any[idx, :])[0]
|
| 247 |
+
y = torch.where(y_any[idx, :])[0]
|
| 248 |
+
if len(x) > 0 and len(y) > 0:
|
| 249 |
+
boxes[idx, :] = torch.as_tensor(
|
| 250 |
+
[x[0], y[0], x[-1] + 1, y[-1] + 1], dtype=torch.float32
|
| 251 |
+
)
|
| 252 |
+
return Boxes(boxes)
|
| 253 |
+
|
| 254 |
+
@staticmethod
|
| 255 |
+
def cat(bitmasks_list: List["BitMasks"]) -> "BitMasks":
|
| 256 |
+
"""
|
| 257 |
+
Concatenates a list of BitMasks into a single BitMasks
|
| 258 |
+
|
| 259 |
+
Arguments:
|
| 260 |
+
bitmasks_list (list[BitMasks])
|
| 261 |
+
|
| 262 |
+
Returns:
|
| 263 |
+
BitMasks: the concatenated BitMasks
|
| 264 |
+
"""
|
| 265 |
+
assert isinstance(bitmasks_list, (list, tuple))
|
| 266 |
+
assert len(bitmasks_list) > 0
|
| 267 |
+
assert all(isinstance(bitmask, BitMasks) for bitmask in bitmasks_list)
|
| 268 |
+
|
| 269 |
+
cat_bitmasks = type(bitmasks_list[0])(
|
| 270 |
+
torch.cat([bm.tensor for bm in bitmasks_list], dim=0)
|
| 271 |
+
)
|
| 272 |
+
return cat_bitmasks
|
| 273 |
+
|
| 274 |
+
|
| 275 |
+
class PolygonMasks:
|
| 276 |
+
"""
|
| 277 |
+
This class stores the segmentation masks for all objects in one image, in the form of polygons.
|
| 278 |
+
|
| 279 |
+
Attributes:
|
| 280 |
+
polygons: list[list[ndarray]]. Each ndarray is a float64 vector representing a polygon.
|
| 281 |
+
"""
|
| 282 |
+
|
| 283 |
+
def __init__(self, polygons: List[List[Union[torch.Tensor, np.ndarray]]]):
|
| 284 |
+
"""
|
| 285 |
+
Arguments:
|
| 286 |
+
polygons (list[list[np.ndarray]]): The first
|
| 287 |
+
level of the list correspond to individual instances,
|
| 288 |
+
the second level to all the polygons that compose the
|
| 289 |
+
instance, and the third level to the polygon coordinates.
|
| 290 |
+
The third level array should have the format of
|
| 291 |
+
[x0, y0, x1, y1, ..., xn, yn] (n >= 3).
|
| 292 |
+
"""
|
| 293 |
+
if not isinstance(polygons, list):
|
| 294 |
+
raise ValueError(
|
| 295 |
+
"Cannot create PolygonMasks: Expect a list of list of polygons per image. "
|
| 296 |
+
"Got '{}' instead.".format(type(polygons))
|
| 297 |
+
)
|
| 298 |
+
|
| 299 |
+
def _make_array(t: Union[torch.Tensor, np.ndarray]) -> np.ndarray:
|
| 300 |
+
# Use float64 for higher precision, because why not?
|
| 301 |
+
# Always put polygons on CPU (self.to is a no-op) since they
|
| 302 |
+
# are supposed to be small tensors.
|
| 303 |
+
# May need to change this assumption if GPU placement becomes useful
|
| 304 |
+
if isinstance(t, torch.Tensor):
|
| 305 |
+
t = t.cpu().numpy()
|
| 306 |
+
return np.asarray(t).astype("float64")
|
| 307 |
+
|
| 308 |
+
def process_polygons(
|
| 309 |
+
polygons_per_instance: List[Union[torch.Tensor, np.ndarray]],
|
| 310 |
+
) -> List[np.ndarray]:
|
| 311 |
+
if not isinstance(polygons_per_instance, list):
|
| 312 |
+
raise ValueError(
|
| 313 |
+
"Cannot create polygons: Expect a list of polygons per instance. "
|
| 314 |
+
"Got '{}' instead.".format(type(polygons_per_instance))
|
| 315 |
+
)
|
| 316 |
+
# transform each polygon to a numpy array
|
| 317 |
+
polygons_per_instance = [_make_array(p) for p in polygons_per_instance]
|
| 318 |
+
for polygon in polygons_per_instance:
|
| 319 |
+
if len(polygon) % 2 != 0 or len(polygon) < 6:
|
| 320 |
+
raise ValueError(
|
| 321 |
+
f"Cannot create a polygon from {len(polygon)} coordinates."
|
| 322 |
+
)
|
| 323 |
+
return polygons_per_instance
|
| 324 |
+
|
| 325 |
+
self.polygons: List[List[np.ndarray]] = [
|
| 326 |
+
process_polygons(polygons_per_instance)
|
| 327 |
+
for polygons_per_instance in polygons
|
| 328 |
+
]
|
| 329 |
+
|
| 330 |
+
def to(self, *args: Any, **kwargs: Any) -> "PolygonMasks":
|
| 331 |
+
return self
|
| 332 |
+
|
| 333 |
+
@property
|
| 334 |
+
def device(self) -> torch.device:
|
| 335 |
+
return torch.device("cpu")
|
| 336 |
+
|
| 337 |
+
def get_bounding_boxes(self) -> Boxes:
|
| 338 |
+
"""
|
| 339 |
+
Returns:
|
| 340 |
+
Boxes: tight bounding boxes around polygon masks.
|
| 341 |
+
"""
|
| 342 |
+
boxes = torch.zeros(len(self.polygons), 4, dtype=torch.float32)
|
| 343 |
+
for idx, polygons_per_instance in enumerate(self.polygons):
|
| 344 |
+
minxy = torch.as_tensor([float("inf"), float("inf")], dtype=torch.float32)
|
| 345 |
+
maxxy = torch.zeros(2, dtype=torch.float32)
|
| 346 |
+
for polygon in polygons_per_instance:
|
| 347 |
+
coords = torch.from_numpy(polygon).view(-1, 2).to(dtype=torch.float32)
|
| 348 |
+
minxy = torch.min(minxy, torch.min(coords, dim=0).values)
|
| 349 |
+
maxxy = torch.max(maxxy, torch.max(coords, dim=0).values)
|
| 350 |
+
boxes[idx, :2] = minxy
|
| 351 |
+
boxes[idx, 2:] = maxxy
|
| 352 |
+
return Boxes(boxes)
|
| 353 |
+
|
| 354 |
+
def nonempty(self) -> torch.Tensor:
|
| 355 |
+
"""
|
| 356 |
+
Find masks that are non-empty.
|
| 357 |
+
|
| 358 |
+
Returns:
|
| 359 |
+
Tensor:
|
| 360 |
+
a BoolTensor which represents whether each mask is empty (False) or not (True).
|
| 361 |
+
"""
|
| 362 |
+
keep = [1 if len(polygon) > 0 else 0 for polygon in self.polygons]
|
| 363 |
+
return torch.from_numpy(np.asarray(keep, dtype=bool))
|
| 364 |
+
|
| 365 |
+
def __getitem__(
|
| 366 |
+
self, item: Union[int, slice, List[int], torch.BoolTensor]
|
| 367 |
+
) -> "PolygonMasks":
|
| 368 |
+
"""
|
| 369 |
+
Support indexing over the instances and return a `PolygonMasks` object.
|
| 370 |
+
`item` can be:
|
| 371 |
+
|
| 372 |
+
1. An integer. It will return an object with only one instance.
|
| 373 |
+
2. A slice. It will return an object with the selected instances.
|
| 374 |
+
3. A list[int]. It will return an object with the selected instances,
|
| 375 |
+
correpsonding to the indices in the list.
|
| 376 |
+
4. A vector mask of type BoolTensor, whose length is num_instances.
|
| 377 |
+
It will return an object with the instances whose mask is nonzero.
|
| 378 |
+
"""
|
| 379 |
+
if isinstance(item, int):
|
| 380 |
+
selected_polygons = [self.polygons[item]]
|
| 381 |
+
elif isinstance(item, slice):
|
| 382 |
+
selected_polygons = self.polygons[item]
|
| 383 |
+
elif isinstance(item, list):
|
| 384 |
+
selected_polygons = [self.polygons[i] for i in item]
|
| 385 |
+
elif isinstance(item, torch.Tensor):
|
| 386 |
+
# Polygons is a list, so we have to move the indices back to CPU.
|
| 387 |
+
if item.dtype == torch.bool:
|
| 388 |
+
assert item.dim() == 1, item.shape
|
| 389 |
+
item = item.nonzero().squeeze(1).cpu().numpy().tolist()
|
| 390 |
+
elif item.dtype in [torch.int32, torch.int64]:
|
| 391 |
+
item = item.cpu().numpy().tolist()
|
| 392 |
+
else:
|
| 393 |
+
raise ValueError(
|
| 394 |
+
"Unsupported tensor dtype={} for indexing!".format(item.dtype)
|
| 395 |
+
)
|
| 396 |
+
selected_polygons = [self.polygons[i] for i in item]
|
| 397 |
+
return PolygonMasks(selected_polygons)
|
| 398 |
+
|
| 399 |
+
def __iter__(self) -> Iterator[List[np.ndarray]]:
|
| 400 |
+
"""
|
| 401 |
+
Yields:
|
| 402 |
+
list[ndarray]: the polygons for one instance.
|
| 403 |
+
Each Tensor is a float64 vector representing a polygon.
|
| 404 |
+
"""
|
| 405 |
+
return iter(self.polygons)
|
| 406 |
+
|
| 407 |
+
def __repr__(self) -> str:
|
| 408 |
+
s = self.__class__.__name__ + "("
|
| 409 |
+
s += "num_instances={})".format(len(self.polygons))
|
| 410 |
+
return s
|
| 411 |
+
|
| 412 |
+
def __len__(self) -> int:
|
| 413 |
+
return len(self.polygons)
|
| 414 |
+
|
| 415 |
+
def crop_and_resize(self, boxes: torch.Tensor, mask_size: int) -> torch.Tensor:
|
| 416 |
+
"""
|
| 417 |
+
Crop each mask by the given box, and resize results to (mask_size, mask_size).
|
| 418 |
+
This can be used to prepare training targets for Mask R-CNN.
|
| 419 |
+
|
| 420 |
+
Args:
|
| 421 |
+
boxes (Tensor): Nx4 tensor storing the boxes for each mask
|
| 422 |
+
mask_size (int): the size of the rasterized mask.
|
| 423 |
+
|
| 424 |
+
Returns:
|
| 425 |
+
Tensor: A bool tensor of shape (N, mask_size, mask_size), where
|
| 426 |
+
N is the number of predicted boxes for this image.
|
| 427 |
+
"""
|
| 428 |
+
assert len(boxes) == len(self), "{} != {}".format(len(boxes), len(self))
|
| 429 |
+
|
| 430 |
+
device = boxes.device
|
| 431 |
+
# Put boxes on the CPU, as the polygon representation is not efficient GPU-wise
|
| 432 |
+
# (several small tensors for representing a single instance mask)
|
| 433 |
+
boxes = boxes.to(torch.device("cpu"))
|
| 434 |
+
|
| 435 |
+
results = [
|
| 436 |
+
rasterize_polygons_within_box(poly, box.numpy(), mask_size)
|
| 437 |
+
for poly, box in zip(self.polygons, boxes)
|
| 438 |
+
]
|
| 439 |
+
"""
|
| 440 |
+
poly: list[list[float]], the polygons for one instance
|
| 441 |
+
box: a tensor of shape (4,)
|
| 442 |
+
"""
|
| 443 |
+
if len(results) == 0:
|
| 444 |
+
return torch.empty(0, mask_size, mask_size, dtype=torch.bool, device=device)
|
| 445 |
+
return torch.stack(results, dim=0).to(device=device)
|
| 446 |
+
|
| 447 |
+
def area(self):
|
| 448 |
+
"""
|
| 449 |
+
Computes area of the mask.
|
| 450 |
+
Only works with Polygons, using the shoelace formula:
|
| 451 |
+
https://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates
|
| 452 |
+
|
| 453 |
+
Returns:
|
| 454 |
+
Tensor: a vector, area for each instance
|
| 455 |
+
"""
|
| 456 |
+
|
| 457 |
+
area = []
|
| 458 |
+
for polygons_per_instance in self.polygons:
|
| 459 |
+
area_per_instance = 0
|
| 460 |
+
for p in polygons_per_instance:
|
| 461 |
+
area_per_instance += polygon_area(p[0::2], p[1::2])
|
| 462 |
+
area.append(area_per_instance)
|
| 463 |
+
|
| 464 |
+
return torch.tensor(area)
|
| 465 |
+
|
| 466 |
+
@staticmethod
|
| 467 |
+
def cat(polymasks_list: List["PolygonMasks"]) -> "PolygonMasks":
|
| 468 |
+
"""
|
| 469 |
+
Concatenates a list of PolygonMasks into a single PolygonMasks
|
| 470 |
+
|
| 471 |
+
Arguments:
|
| 472 |
+
polymasks_list (list[PolygonMasks])
|
| 473 |
+
|
| 474 |
+
Returns:
|
| 475 |
+
PolygonMasks: the concatenated PolygonMasks
|
| 476 |
+
"""
|
| 477 |
+
assert isinstance(polymasks_list, (list, tuple))
|
| 478 |
+
assert len(polymasks_list) > 0
|
| 479 |
+
assert all(isinstance(polymask, PolygonMasks) for polymask in polymasks_list)
|
| 480 |
+
|
| 481 |
+
cat_polymasks = type(polymasks_list[0])(
|
| 482 |
+
list(itertools.chain.from_iterable(pm.polygons for pm in polymasks_list))
|
| 483 |
+
)
|
| 484 |
+
return cat_polymasks
|
| 485 |
+
|
| 486 |
+
|
| 487 |
+
class ROIMasks:
|
| 488 |
+
"""
|
| 489 |
+
Represent masks by N smaller masks defined in some ROIs. Once ROI boxes are given,
|
| 490 |
+
full-image bitmask can be obtained by "pasting" the mask on the region defined
|
| 491 |
+
by the corresponding ROI box.
|
| 492 |
+
"""
|
| 493 |
+
|
| 494 |
+
def __init__(self, tensor: torch.Tensor):
|
| 495 |
+
"""
|
| 496 |
+
Args:
|
| 497 |
+
tensor: (N, M, M) mask tensor that defines the mask within each ROI.
|
| 498 |
+
"""
|
| 499 |
+
if tensor.dim() != 3:
|
| 500 |
+
raise ValueError("ROIMasks must take a masks of 3 dimension.")
|
| 501 |
+
self.tensor = tensor
|
| 502 |
+
|
| 503 |
+
def to(self, device: torch.device) -> "ROIMasks":
|
| 504 |
+
return ROIMasks(self.tensor.to(device))
|
| 505 |
+
|
| 506 |
+
@property
|
| 507 |
+
def device(self) -> device:
|
| 508 |
+
return self.tensor.device
|
| 509 |
+
|
| 510 |
+
def __len__(self):
|
| 511 |
+
return self.tensor.shape[0]
|
| 512 |
+
|
| 513 |
+
def __getitem__(self, item) -> "ROIMasks":
|
| 514 |
+
"""
|
| 515 |
+
Returns:
|
| 516 |
+
ROIMasks: Create a new :class:`ROIMasks` by indexing.
|
| 517 |
+
|
| 518 |
+
The following usage are allowed:
|
| 519 |
+
|
| 520 |
+
1. `new_masks = masks[2:10]`: return a slice of masks.
|
| 521 |
+
2. `new_masks = masks[vector]`, where vector is a torch.BoolTensor
|
| 522 |
+
with `length = len(masks)`. Nonzero elements in the vector will be selected.
|
| 523 |
+
|
| 524 |
+
Note that the returned object might share storage with this object,
|
| 525 |
+
subject to Pytorch's indexing semantics.
|
| 526 |
+
"""
|
| 527 |
+
t = self.tensor[item]
|
| 528 |
+
if t.dim() != 3:
|
| 529 |
+
raise ValueError(
|
| 530 |
+
f"Indexing on ROIMasks with {item} returns a tensor with shape {t.shape}!"
|
| 531 |
+
)
|
| 532 |
+
return ROIMasks(t)
|
| 533 |
+
|
| 534 |
+
@torch.jit.unused
|
| 535 |
+
def __repr__(self) -> str:
|
| 536 |
+
s = self.__class__.__name__ + "("
|
| 537 |
+
s += "num_instances={})".format(len(self.tensor))
|
| 538 |
+
return s
|
| 539 |
+
|
| 540 |
+
@torch.jit.unused
|
| 541 |
+
def to_bitmasks(self, boxes: torch.Tensor, height, width, threshold=0.5):
|
| 542 |
+
"""
|
| 543 |
+
Args: see documentation of :func:`paste_masks_in_image`.
|
| 544 |
+
"""
|
| 545 |
+
from detectron2.layers.mask_ops import (
|
| 546 |
+
_paste_masks_tensor_shape,
|
| 547 |
+
paste_masks_in_image,
|
| 548 |
+
)
|
| 549 |
+
|
| 550 |
+
if torch.jit.is_tracing():
|
| 551 |
+
if isinstance(height, torch.Tensor):
|
| 552 |
+
paste_func = _paste_masks_tensor_shape
|
| 553 |
+
else:
|
| 554 |
+
paste_func = paste_masks_in_image
|
| 555 |
+
else:
|
| 556 |
+
paste_func = retry_if_cuda_oom(paste_masks_in_image)
|
| 557 |
+
bitmasks = paste_func(
|
| 558 |
+
self.tensor, boxes.tensor, (height, width), threshold=threshold
|
| 559 |
+
)
|
| 560 |
+
return BitMasks(bitmasks)
|
source_code/sam3/sam3/agent/helpers/rle.py
ADDED
|
@@ -0,0 +1,122 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved
|
| 2 |
+
|
| 3 |
+
"""Some utilities for RLE encoding that doesn't require downloading the masks to the cpu"""
|
| 4 |
+
|
| 5 |
+
import numpy as np
|
| 6 |
+
import torch
|
| 7 |
+
from pycocotools import mask as mask_util
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
@torch.no_grad()
|
| 11 |
+
def rle_encode(orig_mask, return_areas=False):
|
| 12 |
+
"""Encodes a collection of masks in RLE format
|
| 13 |
+
|
| 14 |
+
This function emulates the behavior of the COCO API's encode function, but
|
| 15 |
+
is executed partially on the GPU for faster execution.
|
| 16 |
+
|
| 17 |
+
Args:
|
| 18 |
+
mask (torch.Tensor): A mask of shape (N, H, W) with dtype=torch.bool
|
| 19 |
+
return_areas (bool): If True, add the areas of the masks as a part of
|
| 20 |
+
the RLE output dict under the "area" key. Default is False.
|
| 21 |
+
|
| 22 |
+
Returns:
|
| 23 |
+
str: The RLE encoded masks
|
| 24 |
+
"""
|
| 25 |
+
assert orig_mask.ndim == 3, "Mask must be of shape (N, H, W)"
|
| 26 |
+
assert orig_mask.dtype == torch.bool, "Mask must have dtype=torch.bool"
|
| 27 |
+
|
| 28 |
+
if orig_mask.numel() == 0:
|
| 29 |
+
return []
|
| 30 |
+
|
| 31 |
+
# First, transpose the spatial dimensions.
|
| 32 |
+
# This is necessary because the COCO API uses Fortran order
|
| 33 |
+
mask = orig_mask.transpose(1, 2)
|
| 34 |
+
|
| 35 |
+
# Flatten the mask
|
| 36 |
+
flat_mask = mask.reshape(mask.shape[0], -1)
|
| 37 |
+
if return_areas:
|
| 38 |
+
mask_areas = flat_mask.sum(-1).tolist()
|
| 39 |
+
# Find the indices where the mask changes
|
| 40 |
+
differences = torch.ones(
|
| 41 |
+
mask.shape[0], flat_mask.shape[1] + 1, device=mask.device, dtype=torch.bool
|
| 42 |
+
)
|
| 43 |
+
differences[:, 1:-1] = flat_mask[:, :-1] != flat_mask[:, 1:]
|
| 44 |
+
differences[:, 0] = flat_mask[:, 0]
|
| 45 |
+
_, change_indices = torch.where(differences)
|
| 46 |
+
|
| 47 |
+
try:
|
| 48 |
+
boundaries = torch.cumsum(differences.sum(-1), 0).cpu()
|
| 49 |
+
except RuntimeError as _:
|
| 50 |
+
boundaries = torch.cumsum(differences.cpu().sum(-1), 0)
|
| 51 |
+
|
| 52 |
+
change_indices_clone = change_indices.clone()
|
| 53 |
+
# First pass computes the RLEs on GPU, in a flatten format
|
| 54 |
+
for i in range(mask.shape[0]):
|
| 55 |
+
# Get the change indices for this batch item
|
| 56 |
+
beg = 0 if i == 0 else boundaries[i - 1].item()
|
| 57 |
+
end = boundaries[i].item()
|
| 58 |
+
change_indices[beg + 1 : end] -= change_indices_clone[beg : end - 1]
|
| 59 |
+
|
| 60 |
+
# Now we can split the RLES of each batch item, and convert them to strings
|
| 61 |
+
# No more gpu at this point
|
| 62 |
+
change_indices = change_indices.tolist()
|
| 63 |
+
|
| 64 |
+
batch_rles = []
|
| 65 |
+
# Process each mask in the batch separately
|
| 66 |
+
for i in range(mask.shape[0]):
|
| 67 |
+
beg = 0 if i == 0 else boundaries[i - 1].item()
|
| 68 |
+
end = boundaries[i].item()
|
| 69 |
+
run_lengths = change_indices[beg:end]
|
| 70 |
+
|
| 71 |
+
uncompressed_rle = {"counts": run_lengths, "size": list(orig_mask.shape[1:])}
|
| 72 |
+
h, w = uncompressed_rle["size"]
|
| 73 |
+
rle = mask_util.frPyObjects(uncompressed_rle, h, w)
|
| 74 |
+
rle["counts"] = rle["counts"].decode("utf-8")
|
| 75 |
+
if return_areas:
|
| 76 |
+
rle["area"] = mask_areas[i]
|
| 77 |
+
batch_rles.append(rle)
|
| 78 |
+
|
| 79 |
+
return batch_rles
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
def robust_rle_encode(masks):
|
| 83 |
+
"""Encodes a collection of masks in RLE format. Uses the gpu version fist, falls back to the cpu version if it fails"""
|
| 84 |
+
|
| 85 |
+
assert masks.ndim == 3, "Mask must be of shape (N, H, W)"
|
| 86 |
+
assert masks.dtype == torch.bool, "Mask must have dtype=torch.bool"
|
| 87 |
+
|
| 88 |
+
try:
|
| 89 |
+
return rle_encode(masks)
|
| 90 |
+
except RuntimeError as _:
|
| 91 |
+
masks = masks.cpu().numpy()
|
| 92 |
+
rles = [
|
| 93 |
+
mask_util.encode(
|
| 94 |
+
np.array(mask[:, :, np.newaxis], dtype=np.uint8, order="F")
|
| 95 |
+
)[0]
|
| 96 |
+
for mask in masks
|
| 97 |
+
]
|
| 98 |
+
for rle in rles:
|
| 99 |
+
rle["counts"] = rle["counts"].decode("utf-8")
|
| 100 |
+
return rles
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
def ann_to_rle(segm, im_info):
|
| 104 |
+
"""Convert annotation which can be polygons, uncompressed RLE to RLE.
|
| 105 |
+
Args:
|
| 106 |
+
ann (dict) : annotation object
|
| 107 |
+
Returns:
|
| 108 |
+
ann (rle)
|
| 109 |
+
"""
|
| 110 |
+
h, w = im_info["height"], im_info["width"]
|
| 111 |
+
if isinstance(segm, list):
|
| 112 |
+
# polygon -- a single object might consist of multiple parts
|
| 113 |
+
# we merge all parts into one mask rle code
|
| 114 |
+
rles = mask_util.frPyObjects(segm, h, w)
|
| 115 |
+
rle = mask_util.merge(rles)
|
| 116 |
+
elif isinstance(segm["counts"], list):
|
| 117 |
+
# uncompressed RLE
|
| 118 |
+
rle = mask_util.frPyObjects(segm, h, w)
|
| 119 |
+
else:
|
| 120 |
+
# rle
|
| 121 |
+
rle = segm
|
| 122 |
+
return rle
|
source_code/sam3/sam3/agent/helpers/roi_align.py
ADDED
|
@@ -0,0 +1,75 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved
|
| 2 |
+
|
| 3 |
+
from torch import nn
|
| 4 |
+
from torchvision.ops import roi_align
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
# NOTE: torchvision's RoIAlign has a different default aligned=False
|
| 8 |
+
class ROIAlign(nn.Module):
|
| 9 |
+
def __init__(self, output_size, spatial_scale, sampling_ratio, aligned=True):
|
| 10 |
+
"""
|
| 11 |
+
Args:
|
| 12 |
+
output_size (tuple): h, w
|
| 13 |
+
spatial_scale (float): scale the input boxes by this number
|
| 14 |
+
sampling_ratio (int): number of inputs samples to take for each output
|
| 15 |
+
sample. 0 to take samples densely.
|
| 16 |
+
aligned (bool): if False, use the legacy implementation in
|
| 17 |
+
Detectron. If True, align the results more perfectly.
|
| 18 |
+
|
| 19 |
+
Note:
|
| 20 |
+
The meaning of aligned=True:
|
| 21 |
+
|
| 22 |
+
Given a continuous coordinate c, its two neighboring pixel indices (in our
|
| 23 |
+
pixel model) are computed by floor(c - 0.5) and ceil(c - 0.5). For example,
|
| 24 |
+
c=1.3 has pixel neighbors with discrete indices [0] and [1] (which are sampled
|
| 25 |
+
from the underlying signal at continuous coordinates 0.5 and 1.5). But the original
|
| 26 |
+
roi_align (aligned=False) does not subtract the 0.5 when computing neighboring
|
| 27 |
+
pixel indices and therefore it uses pixels with a slightly incorrect alignment
|
| 28 |
+
(relative to our pixel model) when performing bilinear interpolation.
|
| 29 |
+
|
| 30 |
+
With `aligned=True`,
|
| 31 |
+
we first appropriately scale the ROI and then shift it by -0.5
|
| 32 |
+
prior to calling roi_align. This produces the correct neighbors; see
|
| 33 |
+
detectron2/tests/test_roi_align.py for verification.
|
| 34 |
+
|
| 35 |
+
The difference does not make a difference to the model's performance if
|
| 36 |
+
ROIAlign is used together with conv layers.
|
| 37 |
+
"""
|
| 38 |
+
super().__init__()
|
| 39 |
+
self.output_size = output_size
|
| 40 |
+
self.spatial_scale = spatial_scale
|
| 41 |
+
self.sampling_ratio = sampling_ratio
|
| 42 |
+
self.aligned = aligned
|
| 43 |
+
|
| 44 |
+
from torchvision import __version__
|
| 45 |
+
|
| 46 |
+
version = tuple(int(x) for x in __version__.split(".")[:2])
|
| 47 |
+
# https://github.com/pytorch/vision/pull/2438
|
| 48 |
+
assert version >= (0, 7), "Require torchvision >= 0.7"
|
| 49 |
+
|
| 50 |
+
def forward(self, input, rois):
|
| 51 |
+
"""
|
| 52 |
+
Args:
|
| 53 |
+
input: NCHW images
|
| 54 |
+
rois: Bx5 boxes. First column is the index into N. The other 4 columns are xyxy.
|
| 55 |
+
"""
|
| 56 |
+
assert rois.dim() == 2 and rois.size(1) == 5
|
| 57 |
+
if input.is_quantized:
|
| 58 |
+
input = input.dequantize()
|
| 59 |
+
return roi_align(
|
| 60 |
+
input,
|
| 61 |
+
rois.to(dtype=input.dtype),
|
| 62 |
+
self.output_size,
|
| 63 |
+
self.spatial_scale,
|
| 64 |
+
self.sampling_ratio,
|
| 65 |
+
self.aligned,
|
| 66 |
+
)
|
| 67 |
+
|
| 68 |
+
def __repr__(self):
|
| 69 |
+
tmpstr = self.__class__.__name__ + "("
|
| 70 |
+
tmpstr += "output_size=" + str(self.output_size)
|
| 71 |
+
tmpstr += ", spatial_scale=" + str(self.spatial_scale)
|
| 72 |
+
tmpstr += ", sampling_ratio=" + str(self.sampling_ratio)
|
| 73 |
+
tmpstr += ", aligned=" + str(self.aligned)
|
| 74 |
+
tmpstr += ")"
|
| 75 |
+
return tmpstr
|