trohrbaugh commited on
Commit
bbd83fb
·
verified ·
1 Parent(s): 340004c

Add GGUF detection with friendly error; clarify scope in UI

Browse files
Files changed (2) hide show
  1. app.py +5 -2
  2. scan.py +16 -0
app.py CHANGED
@@ -208,13 +208,16 @@ with gr.Blocks(
208
  # 🧬 ModelDNA
209
  ### The DNA test for AI models — verify provenance before you download
210
  *Powered by ModelAtlas · a RadicalNotion product*
 
 
 
211
  ---
212
  """)
213
 
214
  with gr.Row():
215
  model_input = gr.Textbox(
216
- label="HuggingFace Model ID",
217
- placeholder="e.g. Qwen/Qwen3.5-27B or paste a HF URL",
218
  scale=4,
219
  )
220
  scan_btn = gr.Button("🔬 Scan", variant="primary", scale=1)
 
208
  # 🧬 ModelDNA
209
  ### The DNA test for AI models — verify provenance before you download
210
  *Powered by ModelAtlas · a RadicalNotion product*
211
+
212
+ > **Works with:** standard HuggingFace checkpoints (safetensors / PyTorch bin).
213
+ > **Not yet supported:** GGUF quantized models, private/gated models. No weight download needed — Stage 1 reads config.json only.
214
  ---
215
  """)
216
 
217
  with gr.Row():
218
  model_input = gr.Textbox(
219
+ label="HuggingFace Model ID or URL",
220
+ placeholder="e.g. Qwen/Qwen3.5-27B (not GGUF use the original checkpoint)",
221
  scale=4,
222
  )
223
  scan_btn = gr.Button("🔬 Scan", variant="primary", scale=1)
scan.py CHANGED
@@ -315,6 +315,22 @@ def generate_verdict(
315
  def scan(model_id: str) -> dict:
316
  """Full Stage 1 scan. Entry point."""
317
  t0 = time.time()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
318
  config = fetch_config(model_id)
319
  if not config:
320
  return {
 
315
  def scan(model_id: str) -> dict:
316
  """Full Stage 1 scan. Entry point."""
317
  t0 = time.time()
318
+
319
+ # Detect unsupported formats before attempting config fetch
320
+ name_lower = model_id.lower()
321
+ if "gguf" in name_lower:
322
+ return {
323
+ "model_id": model_id,
324
+ "error": (
325
+ "GGUF models pack weights into a single file and don't have a standard config.json. "
326
+ "Stage 1 scanning works with standard HuggingFace checkpoints (safetensors/PyTorch). "
327
+ "Try the original (non-quantized) model instead — e.g. the unsloth/Qwen3.6-35B-A3B "
328
+ "base would be Qwen/Qwen2.5-... or the upstream source. "
329
+ "GGUF support is on the roadmap."
330
+ ),
331
+ "scanned_at": datetime.now(timezone.utc).isoformat(),
332
+ }
333
+
334
  config = fetch_config(model_id)
335
  if not config:
336
  return {