mally-2000 commited on
Commit
e8c53ea
·
verified ·
1 Parent(s): 2ce449d

Add Overthrust evaluation progress logs

Browse files

Print method, device, dataset, batch progress, metric computation, and output paths during evaluation so long Colab runs show progress.

Files changed (1) hide show
  1. codes/eval_overthrust.py +24 -1
codes/eval_overthrust.py CHANGED
@@ -104,8 +104,12 @@ def evaluate_overthrust(
104
  output_dir = Path(output_dir)
105
  output_dir.mkdir(parents=True, exist_ok=True)
106
  device = torch.device(device or ("cuda" if torch.cuda.is_available() else "cpu"))
 
 
 
107
  pipe = pipe.to(device)
108
 
 
109
  dataset = OverthrustTrueimpDataset(
110
  size=OVERTHRUST_CONFIG["size"],
111
  normalize=OVERTHRUST_CONFIG["normalize"],
@@ -125,6 +129,11 @@ def evaluate_overthrust(
125
  fixed_noise_snr=OVERTHRUST_CONFIG["noise_snr"],
126
  fixed_f0_phase=OVERTHRUST_CONFIG["f0_phase"],
127
  )
 
 
 
 
 
128
  loader = DataLoader(
129
  dataset,
130
  batch_size=OVERTHRUST_CONFIG["batch_size"],
@@ -135,8 +144,14 @@ def evaluate_overthrust(
135
  all_predictions: list[np.ndarray] = []
136
  all_targets: list[np.ndarray] = []
137
  all_reconstructions: list[np.ndarray] = []
138
- for batch in loader:
 
139
  seeds = batch["seed"].tolist()
 
 
 
 
 
140
  dipin = batch["dipin"].to(device)
141
  record = batch["record"].to(device)
142
  image = batch["image"].to(device)
@@ -151,6 +166,8 @@ def evaluate_overthrust(
151
  device=device,
152
  ),
153
  }
 
 
154
  output = pipe(
155
  dipin=dipin,
156
  record=record,
@@ -159,6 +176,7 @@ def evaluate_overthrust(
159
  seeds=seeds,
160
  **extra_kwargs,
161
  )
 
162
  prediction = output.impedance_samples
163
  reconstruction = output.impedance_reconstructed
164
  for local_idx in range(prediction.shape[0]):
@@ -166,6 +184,7 @@ def evaluate_overthrust(
166
  all_targets.append(image[local_idx, 0].detach().cpu().numpy())
167
  all_reconstructions.append(reconstruction[local_idx, 0].detach().cpu().numpy())
168
 
 
169
  full_target = stitch_patches(
170
  all_targets, dataset.splits, dataset.big_img.shape, OVERTHRUST_CONFIG["size"]
171
  )
@@ -176,10 +195,12 @@ def evaluate_overthrust(
176
  all_reconstructions, dataset.splits, dataset.big_img.shape, OVERTHRUST_CONFIG["size"]
177
  )
178
 
 
179
  full_target_impedance = dataset.fan(full_target)
180
  full_prediction_impedance = dataset.fan(full_prediction)
181
  full_reconstruction_impedance = dataset.fan(full_reconstruction)
182
 
 
183
  metrics_summary = {
184
  "config": {
185
  **OVERTHRUST_CONFIG,
@@ -200,11 +221,13 @@ def evaluate_overthrust(
200
  "comparison": output_dir / "comparison_impedance.png",
201
  "metrics": output_dir / "metrics_summary.json",
202
  }
 
203
  np.save(paths["full_target"], full_target)
204
  np.save(paths["full_prediction"], full_prediction)
205
  np.save(paths["full_reconstruction"], full_reconstruction)
206
  save_comparison(full_target_impedance, full_prediction_impedance, paths["comparison"])
207
  paths["metrics"].write_text(json.dumps(metrics_summary, indent=2), encoding="utf-8")
 
208
  return {
209
  "metrics": metrics_summary,
210
  "paths": {key: str(value) for key, value in paths.items()},
 
104
  output_dir = Path(output_dir)
105
  output_dir.mkdir(parents=True, exist_ok=True)
106
  device = torch.device(device or ("cuda" if torch.cuda.is_available() else "cpu"))
107
+ print(f"[eval] method={method}, steps={num_inference_steps}, device={device}")
108
+ print(f"[eval] output_dir={output_dir}")
109
+ print("[eval] moving pipeline to device...")
110
  pipe = pipe.to(device)
111
 
112
+ print("[eval] building Overthrust dataset...")
113
  dataset = OverthrustTrueimpDataset(
114
  size=OVERTHRUST_CONFIG["size"],
115
  normalize=OVERTHRUST_CONFIG["normalize"],
 
129
  fixed_noise_snr=OVERTHRUST_CONFIG["noise_snr"],
130
  fixed_f0_phase=OVERTHRUST_CONFIG["f0_phase"],
131
  )
132
+ print(
133
+ "[eval] dataset ready: "
134
+ f"patches={len(dataset)}, batch_size={OVERTHRUST_CONFIG['batch_size']}, "
135
+ f"patch_indices={OVERTHRUST_CONFIG['patch_indices']}"
136
+ )
137
  loader = DataLoader(
138
  dataset,
139
  batch_size=OVERTHRUST_CONFIG["batch_size"],
 
144
  all_predictions: list[np.ndarray] = []
145
  all_targets: list[np.ndarray] = []
146
  all_reconstructions: list[np.ndarray] = []
147
+ total_batches = len(loader)
148
+ for batch_idx, batch in enumerate(loader, start=1):
149
  seeds = batch["seed"].tolist()
150
+ batch_size = len(seeds)
151
+ print(
152
+ f"[eval] batch {batch_idx}/{total_batches}: "
153
+ f"batch_size={batch_size}, seeds={seeds}"
154
+ )
155
  dipin = batch["dipin"].to(device)
156
  record = batch["record"].to(device)
157
  image = batch["image"].to(device)
 
166
  device=device,
167
  ),
168
  }
169
+ print(f"[eval] batch {batch_idx}/{total_batches}: CLDM operator ready")
170
+ print(f"[eval] batch {batch_idx}/{total_batches}: running pipeline...")
171
  output = pipe(
172
  dipin=dipin,
173
  record=record,
 
176
  seeds=seeds,
177
  **extra_kwargs,
178
  )
179
+ print(f"[eval] batch {batch_idx}/{total_batches}: collecting predictions...")
180
  prediction = output.impedance_samples
181
  reconstruction = output.impedance_reconstructed
182
  for local_idx in range(prediction.shape[0]):
 
184
  all_targets.append(image[local_idx, 0].detach().cpu().numpy())
185
  all_reconstructions.append(reconstruction[local_idx, 0].detach().cpu().numpy())
186
 
187
+ print("[eval] stitching patches...")
188
  full_target = stitch_patches(
189
  all_targets, dataset.splits, dataset.big_img.shape, OVERTHRUST_CONFIG["size"]
190
  )
 
195
  all_reconstructions, dataset.splits, dataset.big_img.shape, OVERTHRUST_CONFIG["size"]
196
  )
197
 
198
+ print("[eval] converting normalized predictions to impedance...")
199
  full_target_impedance = dataset.fan(full_target)
200
  full_prediction_impedance = dataset.fan(full_prediction)
201
  full_reconstruction_impedance = dataset.fan(full_reconstruction)
202
 
203
+ print("[eval] computing metrics...")
204
  metrics_summary = {
205
  "config": {
206
  **OVERTHRUST_CONFIG,
 
221
  "comparison": output_dir / "comparison_impedance.png",
222
  "metrics": output_dir / "metrics_summary.json",
223
  }
224
+ print("[eval] saving outputs...")
225
  np.save(paths["full_target"], full_target)
226
  np.save(paths["full_prediction"], full_prediction)
227
  np.save(paths["full_reconstruction"], full_reconstruction)
228
  save_comparison(full_target_impedance, full_prediction_impedance, paths["comparison"])
229
  paths["metrics"].write_text(json.dumps(metrics_summary, indent=2), encoding="utf-8")
230
+ print(f"[eval] done. metrics={paths['metrics']}")
231
  return {
232
  "metrics": metrics_summary,
233
  "paths": {key: str(value) for key, value in paths.items()},