surrogate1 commited on
Commit
99cf609
·
verified ·
1 Parent(s): 4df460e

fix: minimal gr.Interface tabs (TabbedInterface, not Blocks)

Browse files
Files changed (1) hide show
  1. app.py +73 -136
app.py CHANGED
@@ -1,173 +1,110 @@
1
- """surrogate1 ZeroGPU — Qwen2.5-Coder-32B AWQ synth+judge worker.
2
 
3
- Higher-quality synth pairs + better judge calibration with 32B vs 7B.
4
- AWQ pre-quantized = no bitsandbytes runtime quant = no startup error.
5
  """
6
- import os, json, re
7
- import gradio as gr
8
- import spaces
9
- import torch
10
 
11
- BASE_MODEL = os.environ.get("BASE_MODEL", "Qwen/Qwen2.5-Coder-14B-Instruct")
12
  HF_TOKEN = os.environ.get("HF_TOKEN", "")
13
 
14
- DOMAIN_SEEDS = {
15
- "code-python": "a Python coding task",
16
- "code-typescript": "a TypeScript / Node.js coding task",
17
- "code-rust": "a Rust coding task",
18
- "code-go": "a Go coding task",
19
- "devops-tf": "a Terraform / IaC task",
20
- "devops-k8s": "a Kubernetes manifest or operator task",
21
- "devops-cdk": "an AWS CDK construct task",
22
- "ci-github": "a GitHub Actions workflow task",
23
- "sec-iam": "an IAM least-privilege policy task",
24
- "sec-cve": "a CVE remediation task",
25
- "sre-runbook": "an incident runbook task",
26
- "sre-slo": "an SLI/SLO/error-budget definition task",
27
- "data-sql": "a parameterized SQL query task",
28
- "ai-eng": "a RAG / vector / LoRA / vLLM task",
29
- "api-rest": "a REST API design task",
30
- "test-pytest": "a pytest test-writing task",
31
- }
32
-
33
- JUDGE_PRINCIPLES = (
34
- "Score the response 0-10 on: correctness, security (no leaked secrets, "
35
- "IAM least-priv, input validated), idiomatic, completeness, citation "
36
- "(real APIs only). Return ONLY JSON: "
37
- "{\"score\": float, \"axes\": {...}, \"why\": \"...\"}"
38
- )
39
-
40
- _model = None
41
- _tok = None
42
-
43
-
44
- def _load_lazy():
45
- global _model, _tok
46
- if _model is not None:
47
- return _model, _tok
48
- from transformers import AutoModelForCausalLM, AutoTokenizer
49
- _tok = AutoTokenizer.from_pretrained(
50
- BASE_MODEL, token=HF_TOKEN or None, trust_remote_code=True)
51
- if _tok.pad_token_id is None:
52
- _tok.pad_token_id = _tok.eos_token_id
53
- from transformers import BitsAndBytesConfig
54
- bnb = BitsAndBytesConfig(
55
- load_in_4bit=True,
56
- bnb_4bit_compute_dtype=torch.bfloat16,
57
- bnb_4bit_quant_type="nf4",
58
- bnb_4bit_use_double_quant=True,
59
- )
60
- _model = AutoModelForCausalLM.from_pretrained(
61
  BASE_MODEL, token=HF_TOKEN or None, trust_remote_code=True,
62
  device_map="cuda", quantization_config=bnb)
63
- return _model, _tok
64
 
65
 
66
- def _generate(prompt, max_new=512, temp=0.7):
67
- model, tok = _load_lazy()
68
- inputs = tok(prompt, return_tensors="pt", truncation=True,
69
- max_length=8000).to("cuda")
70
- out = model.generate(
71
- **inputs, max_new_tokens=int(max_new), temperature=float(temp),
72
- top_p=0.9, do_sample=temp > 0,
73
- pad_token_id=tok.pad_token_id, eos_token_id=tok.eos_token_id)
74
- return tok.decode(out[0][inputs["input_ids"].shape[1]:],
75
- skip_special_tokens=True).strip()
76
 
77
 
78
  @spaces.GPU(duration=300)
79
- def synth_batch(domain="code-python", count=10):
80
- model, tok = _load_lazy()
81
- seed_text = DOMAIN_SEEDS.get(domain, DOMAIN_SEEDS["code-python"])
82
  out_lines = []
83
  for i in range(int(count)):
84
  instr_msgs = [
85
  {"role": "system",
86
- "content": f"Generate ONE realistic, diverse, high-quality user request about {seed_text}. "
87
  "Output ONLY the request as a single paragraph. No preamble."},
88
  {"role": "user", "content": ""},
89
  ]
90
- ip = tok.apply_chat_template(instr_msgs, tokenize=False, add_generation_prompt=True)
91
- instruction = _generate(ip, max_new=180, temp=0.95).strip().split("\n")[0][:600]
92
  if len(instruction) < 30: continue
93
-
94
  resp_msgs = [
95
- {"role": "system", "content": "You are Surrogate-1, expert DevSecOps + coding agent. "
96
- "Cite real APIs. No phantom imports."},
97
  {"role": "user", "content": instruction},
98
  ]
99
- rp = tok.apply_chat_template(resp_msgs, tokenize=False, add_generation_prompt=True)
100
- response = _generate(rp, max_new=512, temp=0.4)
101
  if len(response) < 50: continue
102
-
103
  out_lines.append(json.dumps({
104
  "prompt": instruction, "response": response,
105
- "source": "magpie-zerogpu-32b",
106
  "meta": {"domain": domain, "ix": i},
107
  }, ensure_ascii=False))
108
  return "\n".join(out_lines)
109
 
110
 
111
  @spaces.GPU(duration=120)
112
- def judge_pair(prompt, response, criteria="default"):
113
  if not prompt or not response:
114
  return json.dumps({"score": 0.0, "why": "empty"})
115
- model, tok = _load_lazy()
116
- j_msgs = [
117
- {"role": "system", "content": "You are a strict senior code reviewer. " + JUDGE_PRINCIPLES},
118
- {"role": "user", "content": f"PROMPT:\n{prompt[:2000]}\n\nRESPONSE:\n{response[:4000]}\n\nScore. JSON only."},
119
- ]
120
- jp = tok.apply_chat_template(j_msgs, tokenize=False, add_generation_prompt=True)
121
- raw = _generate(jp, max_new=400, temp=0.1)
122
- m = re.search(r"\{[^{}]*\"score\"[^{}]*\}", raw, re.DOTALL)
123
- if m:
124
- try: return json.dumps(json.loads(m.group(0)), ensure_ascii=False)
125
  except: pass
126
- return json.dumps({"score": 5.0, "why": raw[:300], "raw": True})
127
 
128
 
129
- @spaces.GPU(duration=300)
130
- def best_of_n(prompt, n=4, max_new=512):
131
- if not prompt: return json.dumps({"error": "empty"})
132
- model, tok = _load_lazy()
133
- sys_msg = "You are Surrogate-1. Solve the task with production-quality code."
134
- cands = []
135
- for i in range(int(n)):
136
- msgs = [{"role": "system", "content": sys_msg}, {"role": "user", "content": prompt}]
137
- p = tok.apply_chat_template(msgs, tokenize=False, add_generation_prompt=True)
138
- cands.append(_generate(p, max_new=int(max_new), temp=0.7 + 0.05 * i))
139
- scored = []
140
- for c in cands:
141
- s = min(1.0, len(c) / 800)
142
- if "```" in c: s += 0.2
143
- if "import " in c or "def " in c: s += 0.1
144
- scored.append((s, c))
145
- scored.sort(key=lambda x: -x[0])
146
- return json.dumps({"best": scored[0][1], "best_score": scored[0][0],
147
- "all": [c for _, c in scored]}, ensure_ascii=False)
148
-
149
-
150
- with gr.Blocks(title="Surrogate-1 32B synth+judge") as demo:
151
- gr.Markdown(
152
- f"# Surrogate-1 — synth + judge (Qwen2.5-Coder-32B AWQ)\n"
153
- f"3 endpoints: `/run/synth_batch`, `/run/judge_pair`, `/run/best_of_n`"
154
- )
155
- with gr.Tab("synth"):
156
- d = gr.Dropdown(list(DOMAIN_SEEDS.keys()), value="code-python", label="domain")
157
- n = gr.Slider(1, 20, value=10, step=1, label="count")
158
- gr.Button("synth_batch").click(synth_batch, [d, n], gr.Textbox(label="JSONL", lines=20),
159
- api_name="synth_batch")
160
- with gr.Tab("judge"):
161
- p = gr.Textbox(label="prompt", lines=3); r = gr.Textbox(label="response", lines=8)
162
- c = gr.Textbox(label="criteria", value="default")
163
- gr.Button("judge_pair").click(judge_pair, [p, r, c], gr.Textbox(label="JSON", lines=8),
164
- api_name="judge_pair")
165
- with gr.Tab("best-of-n"):
166
- bp = gr.Textbox(label="prompt", lines=3)
167
- bn = gr.Slider(2, 6, value=4, step=1, label="n")
168
- bm = gr.Slider(128, 1024, value=512, step=64, label="max_new")
169
- gr.Button("best_of_n").click(best_of_n, [bp, bn, bm], gr.Textbox(label="JSON", lines=15),
170
- api_name="best_of_n")
171
-
172
- if __name__ == "__main__":
173
- demo.queue(max_size=6).launch()
 
1
+ """surrogate1 ZeroGPU synth+judge minimal gr.Interface tabs via Blocks.
2
 
3
+ Same gradio-compat pattern as ashirato (gr.Interface only, no ChatInterface).
4
+ 3 endpoints exposed via separate Interfaces, mounted on TabbedInterface.
5
  """
6
+ import os, json, re, gradio as gr, spaces, torch
 
 
 
7
 
8
+ BASE_MODEL = os.environ.get("BASE_MODEL", "Qwen/Qwen2.5-Coder-7B-Instruct")
9
  HF_TOKEN = os.environ.get("HF_TOKEN", "")
10
 
11
+ DOMAINS = ["code-python", "code-typescript", "code-rust", "code-go",
12
+ "devops-tf", "devops-k8s", "devops-cdk", "ci-github",
13
+ "sec-iam", "sec-cve", "sre-runbook", "sre-slo",
14
+ "data-sql", "ai-eng", "api-rest", "test-pytest"]
15
+ SEED_TPL = "a {} task"
16
+
17
+ JUDGE_RULES = ("Score 0-10: correctness, security, idiomatic, completeness, "
18
+ "real-API citation. Return ONLY JSON: "
19
+ '{"score":float,"why":str}')
20
+
21
+ _m = _t = None
22
+
23
+
24
+ def _load():
25
+ global _m, _t
26
+ if _m is not None: return _m, _t
27
+ from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig
28
+ _t = AutoTokenizer.from_pretrained(BASE_MODEL, token=HF_TOKEN or None,
29
+ trust_remote_code=True)
30
+ if _t.pad_token_id is None: _t.pad_token_id = _t.eos_token_id
31
+ bnb = BitsAndBytesConfig(load_in_4bit=True,
32
+ bnb_4bit_compute_dtype=torch.bfloat16,
33
+ bnb_4bit_quant_type="nf4",
34
+ bnb_4bit_use_double_quant=True)
35
+ _m = AutoModelForCausalLM.from_pretrained(
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
36
  BASE_MODEL, token=HF_TOKEN or None, trust_remote_code=True,
37
  device_map="cuda", quantization_config=bnb)
38
+ return _m, _t
39
 
40
 
41
+ def _gen(prompt, max_new=512, temp=0.7):
42
+ m, t = _load()
43
+ inputs = t(prompt, return_tensors="pt", truncation=True, max_length=8000).to("cuda")
44
+ out = m.generate(**inputs, max_new_tokens=int(max_new),
45
+ temperature=float(temp), top_p=0.9, do_sample=temp > 0,
46
+ pad_token_id=t.pad_token_id, eos_token_id=t.eos_token_id)
47
+ return t.decode(out[0][inputs["input_ids"].shape[1]:], skip_special_tokens=True).strip()
 
 
 
48
 
49
 
50
  @spaces.GPU(duration=300)
51
+ def synth_batch(domain: str, count: int) -> str:
52
+ m, t = _load()
53
+ seed = SEED_TPL.format(domain.replace("-", " "))
54
  out_lines = []
55
  for i in range(int(count)):
56
  instr_msgs = [
57
  {"role": "system",
58
+ "content": f"Generate ONE realistic, diverse, high-quality user request about {seed}. "
59
  "Output ONLY the request as a single paragraph. No preamble."},
60
  {"role": "user", "content": ""},
61
  ]
62
+ ip = t.apply_chat_template(instr_msgs, tokenize=False, add_generation_prompt=True)
63
+ instruction = _gen(ip, max_new=180, temp=0.95).strip().split("\n")[0][:600]
64
  if len(instruction) < 30: continue
 
65
  resp_msgs = [
66
+ {"role": "system",
67
+ "content": "You are Surrogate-1, expert DevSecOps + coding agent. Real APIs only."},
68
  {"role": "user", "content": instruction},
69
  ]
70
+ rp = t.apply_chat_template(resp_msgs, tokenize=False, add_generation_prompt=True)
71
+ response = _gen(rp, max_new=512, temp=0.4)
72
  if len(response) < 50: continue
 
73
  out_lines.append(json.dumps({
74
  "prompt": instruction, "response": response,
75
+ "source": "magpie-zerogpu",
76
  "meta": {"domain": domain, "ix": i},
77
  }, ensure_ascii=False))
78
  return "\n".join(out_lines)
79
 
80
 
81
  @spaces.GPU(duration=120)
82
+ def judge_pair(prompt: str, response: str) -> str:
83
  if not prompt or not response:
84
  return json.dumps({"score": 0.0, "why": "empty"})
85
+ m, t = _load()
86
+ j_msgs = [{"role": "system", "content": "You are a strict reviewer. " + JUDGE_RULES},
87
+ {"role": "user", "content": f"PROMPT:\n{prompt[:2000]}\n\nRESPONSE:\n{response[:4000]}\n\nScore. JSON only."}]
88
+ raw = _gen(t.apply_chat_template(j_msgs, tokenize=False, add_generation_prompt=True),
89
+ max_new=300, temp=0.1)
90
+ mm = re.search(r"\{[^{}]*\"score\"[^{}]*\}", raw, re.DOTALL)
91
+ if mm:
92
+ try: return json.dumps(json.loads(mm.group(0)), ensure_ascii=False)
 
 
93
  except: pass
94
+ return json.dumps({"score": 5.0, "why": raw[:200], "raw": True})
95
 
96
 
97
+ synth_iface = gr.Interface(synth_batch,
98
+ [gr.Dropdown(DOMAINS, value="code-python", label="domain"),
99
+ gr.Slider(1, 20, value=10, step=1, label="count")],
100
+ gr.Textbox(label="JSONL", lines=20),
101
+ title="Magpie synth_batch")
102
+
103
+ judge_iface = gr.Interface(judge_pair,
104
+ [gr.Textbox(label="prompt", lines=3),
105
+ gr.Textbox(label="response", lines=8)],
106
+ gr.Textbox(label="JSON", lines=6),
107
+ title="LLM-as-judge")
108
+
109
+ demo = gr.TabbedInterface([synth_iface, judge_iface], ["synth", "judge"])
110
+ demo.queue(max_size=6).launch()