dreamlessx commited on
Commit
893c358
·
verified ·
1 Parent(s): 6071b18

Update landmarkdiff/benchmark.py to v0.3.2

Browse files
Files changed (1) hide show
  1. landmarkdiff/benchmark.py +14 -18
landmarkdiff/benchmark.py CHANGED
@@ -63,19 +63,17 @@ class InferenceBenchmark:
63
  if throughput_fps == 0.0 and latency_ms > 0:
64
  throughput_fps = 1000.0 / latency_ms * batch_size
65
 
66
- self.results.append(
67
- BenchmarkResult(
68
- config_name=config_name,
69
- latency_ms=latency_ms,
70
- throughput_fps=throughput_fps,
71
- vram_gb=vram_gb,
72
- batch_size=batch_size,
73
- resolution=resolution,
74
- num_inference_steps=num_inference_steps,
75
- device=device,
76
- metadata=metadata,
77
- )
78
- )
79
 
80
  def mean_latency(self, config_name: str | None = None) -> float:
81
  """Mean latency in ms, optionally filtered by config."""
@@ -89,8 +87,9 @@ class InferenceBenchmark:
89
  results = self._filter(config_name)
90
  if not results:
91
  return float("nan")
 
92
  sorted_latencies = sorted(r.latency_ms for r in results)
93
- idx = max(0, int(len(sorted_latencies) * 0.99) - 1)
94
  return sorted_latencies[idx]
95
 
96
  def mean_throughput(self, config_name: str | None = None) -> float:
@@ -126,10 +125,7 @@ class InferenceBenchmark:
126
  if not configs:
127
  return "No benchmark results."
128
 
129
- header = (
130
- f"{'Config':>20s} | {'Mean(ms)':>10s} | {'P99(ms)':>10s}"
131
- f" | {'FPS':>8s} | {'VRAM(GB)':>8s} | {'N':>4s}"
132
- )
133
  lines = [
134
  f"Inference Benchmark: {self.model_name}",
135
  header,
 
63
  if throughput_fps == 0.0 and latency_ms > 0:
64
  throughput_fps = 1000.0 / latency_ms * batch_size
65
 
66
+ self.results.append(BenchmarkResult(
67
+ config_name=config_name,
68
+ latency_ms=latency_ms,
69
+ throughput_fps=throughput_fps,
70
+ vram_gb=vram_gb,
71
+ batch_size=batch_size,
72
+ resolution=resolution,
73
+ num_inference_steps=num_inference_steps,
74
+ device=device,
75
+ metadata=metadata,
76
+ ))
 
 
77
 
78
  def mean_latency(self, config_name: str | None = None) -> float:
79
  """Mean latency in ms, optionally filtered by config."""
 
87
  results = self._filter(config_name)
88
  if not results:
89
  return float("nan")
90
+ import math
91
  sorted_latencies = sorted(r.latency_ms for r in results)
92
+ idx = min(len(sorted_latencies) - 1, math.ceil(len(sorted_latencies) * 0.99) - 1)
93
  return sorted_latencies[idx]
94
 
95
  def mean_throughput(self, config_name: str | None = None) -> float:
 
125
  if not configs:
126
  return "No benchmark results."
127
 
128
+ header = f"{'Config':>20s} | {'Mean(ms)':>10s} | {'P99(ms)':>10s} | {'FPS':>8s} | {'VRAM(GB)':>8s} | {'N':>4s}"
 
 
 
129
  lines = [
130
  f"Inference Benchmark: {self.model_name}",
131
  header,