File size: 26,941 Bytes
df4727f
a956c3b
df4727f
35d2c31
 
df4727f
 
775c082
35d2c31
a956c3b
df4727f
 
 
 
 
 
 
 
 
35d2c31
 
 
 
 
 
 
df4727f
 
35d2c31
 
 
 
df4727f
 
 
35d2c31
 
df4727f
 
35d2c31
 
 
 
 
 
 
 
c7bbe0b
35d2c31
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e9ba07a
 
 
 
35d2c31
e9ba07a
 
 
c7bbe0b
 
35d2c31
c7bbe0b
 
a956c3b
c7bbe0b
35d2c31
c7bbe0b
a956c3b
c7bbe0b
a956c3b
c7bbe0b
 
35d2c31
a956c3b
df4727f
a956c3b
c7bbe0b
df4727f
c7bbe0b
35d2c31
 
 
 
df4727f
 
35d2c31
a956c3b
c7bbe0b
a956c3b
df4727f
a956c3b
35d2c31
df4727f
70dedbc
df4727f
a956c3b
df4727f
 
35d2c31
 
 
 
a956c3b
35d2c31
 
 
 
 
 
 
 
 
 
 
 
df4727f
35d2c31
df4727f
35d2c31
 
 
 
 
 
 
df4727f
 
 
35d2c31
df4727f
35d2c31
775c082
 
 
 
 
35d2c31
775c082
35d2c31
 
 
 
 
 
 
df4727f
35d2c31
df4727f
 
a956c3b
 
df4727f
35d2c31
df4727f
35d2c31
 
df4727f
 
35d2c31
df4727f
35d2c31
 
df4727f
70dedbc
df4727f
35d2c31
a956c3b
35d2c31
 
df4727f
a956c3b
35d2c31
 
 
 
70dedbc
a956c3b
35d2c31
 
 
 
a956c3b
df4727f
35d2c31
 
 
 
 
 
a956c3b
35d2c31
 
a956c3b
 
 
35d2c31
a956c3b
35d2c31
a956c3b
 
 
35d2c31
70dedbc
a956c3b
 
 
 
 
 
 
 
 
35d2c31
a956c3b
35d2c31
a956c3b
 
 
35d2c31
775c082
 
a956c3b
775c082
 
a956c3b
 
 
35d2c31
df4727f
a956c3b
 
70dedbc
a956c3b
70dedbc
 
a956c3b
 
 
70dedbc
 
 
 
 
 
 
a956c3b
 
70dedbc
a956c3b
70dedbc
 
a956c3b
70dedbc
a956c3b
70dedbc
a956c3b
70dedbc
a956c3b
70dedbc
 
 
a956c3b
 
70dedbc
 
a956c3b
df4727f
 
 
 
 
 
e25566e
70dedbc
 
e9ba07a
775c082
70dedbc
35d2c31
a956c3b
35d2c31
 
 
 
 
 
 
775c082
 
35d2c31
a956c3b
35d2c31
 
a956c3b
35d2c31
 
 
 
70dedbc
35d2c31
 
775c082
 
35d2c31
775c082
35d2c31
 
a956c3b
35d2c31
 
 
 
a956c3b
 
 
 
 
35d2c31
a956c3b
 
 
35d2c31
 
 
775c082
 
35d2c31
775c082
35d2c31
 
a956c3b
 
35d2c31
 
 
a956c3b
 
35d2c31
 
a956c3b
 
35d2c31
a956c3b
775c082
35d2c31
 
 
 
a956c3b
e9ba07a
 
 
a956c3b
e9ba07a
a956c3b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
70dedbc
a956c3b
 
70dedbc
 
 
 
 
df4727f
 
e25566e
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
"""
Garment Image -> 2D Sewing Pattern + Chat Editing + 3D Preview + Agentic Refinement
"""
import json, os, re, traceback, copy
from typing import Dict, Optional, Tuple, List
import gradio as gr
from PIL import Image
from pattern_generator import generate_pattern_from_analysis, get_pattern_pieces
from garment_3d import create_3d_figure
from refinement_loop import refinement_loop, render_3d_to_image

GARMENT_ANALYSIS_PROMPT = """You are a professional fashion pattern maker. Analyze this garment image and extract precise sewing pattern parameters.

Return ONLY a JSON object (no markdown, no explanation) with this exact structure:

{
  "garment_type": "<one of: shirt, blouse, top, t-shirt, dress, skirt, pants, trousers, jeans, jacket, coat, blazer, hoodie, vest>",
  "description": "<brief description of the garment style, fit, and key features>",
  "measurements": {
    "bust": <number 75-130>, "waist": <number 55-110>, "hip": <number 80-130>,
    "shoulder_width": <number 35-55>, "bodice_length": <number 35-75>,
    "sleeve_length": <number 15-75>, "skirt_length": <number 30-120>,
    "pant_length": <number 30-110>, "neckline_depth": <number 3-25>,
    "neckline_width": <number 5-15>, "bicep": <number 25-45>,
    "wrist": <number 15-25>, "cap_height": <number 8-18>,
    "collar_height": <number 3-10>, "flare": <number 0-15>
  },
  "features": {
    "has_collar": <true/false>, "collar_type": "<standard/mandarin/peter_pan/none>",
    "has_cuffs": <true/false>, "has_pockets": <true/false>,
    "pocket_type": "<patch/welt/none>", "has_hood": <true/false>,
    "fit": "<fitted/regular/oversized/loose>"
  }
}

Be precise. Estimate realistic measurements in cm for an average adult.
Only include measurements relevant to the garment type.
"""

EDIT_PROMPT_TEMPLATE = """You are a fashion pattern editing assistant. The user wants to edit their garment pattern.

Current pattern parameters:
{current_json}

User request: {user_message}

Apply the edit and return ONLY the complete updated JSON (no markdown, no explanation) with the same structure. Keep all unchanged values the same. Only modify what the user asked to change.

{{
  "garment_type": "<type>",
  "description": "<updated description>",
  "measurements": {{
    "bust": <number>, "waist": <number>, "hip": <number>,
    "shoulder_width": <number>, "bodice_length": <number>,
    "sleeve_length": <number>, "skirt_length": <number>,
    "pant_length": <number>, "neckline_depth": <number>,
    "neckline_width": <number>, "bicep": <number>, "wrist": <number>,
    "cap_height": <number>, "collar_height": <number>, "flare": <number>
  }},
  "features": {{
    "has_collar": <bool>, "collar_type": "<type>",
    "has_cuffs": <bool>, "has_pockets": <bool>,
    "pocket_type": "<type>", "has_hood": <bool>,
    "fit": "<type>"
  }}
}}"""

# Verified working VLMs (tested 2026-04-25)
# Llama-4-Scout: confirmed image support, answers in content field
# Kimi-K2.6: image support, answers in reasoning field
# Qwen3.5-9B: image support unclear, answers in reasoning field
VISION_MODELS = [
    ("meta-llama/Llama-4-Scout-17B-16E-Instruct", "nscale", "Llama-4-Scout 17B"),
    ("moonshotai/Kimi-K2.6", "together", "Kimi K2.6"),
    ("Qwen/Qwen3.5-9B", "together", "Qwen 3.5 9B"),
]

def _extract_response_text(message):
    content = message.get('content', '') or ''
    reasoning = message.get('reasoning', '') or ''
    return content.strip() or reasoning.strip() or ''

def _extract_json_from_text(text):
    json_match = re.search(r'```(?:json)?\s*([\s\S]*?)\s*```', text)
    if json_match: return json_match.group(1)
    json_match = re.search(r'\{[\s\S]*\}', text)
    if json_match: return json_match.group()
    return None

def _call_vlm(messages, timeout=180):
    import requests
    hf_token = os.environ.get("HF_TOKEN", "")
    if not hf_token: return None
    for model_id, provider, display_name in VISION_MODELS:
        try:
            url = f"https://router.huggingface.co/{provider}/v1/chat/completions"
            headers = {"Authorization": f"Bearer {hf_token}", "Content-Type": "application/json"}
            payload = {"model": model_id, "messages": messages, "max_tokens": 2000, "temperature": 0.1}
            print(f"[VLM] Trying {display_name} via {provider}...")
            response = requests.post(url, headers=headers, json=payload, timeout=timeout)
            if response.status_code == 200:
                result = response.json()
                text = _extract_response_text(result['choices'][0]['message'])
                if not text: continue
                json_str = _extract_json_from_text(text)
                if not json_str: continue
                analysis = json.loads(json_str)
                analysis['_model_used'] = display_name
                print(f"[VLM] OK: {display_name} detected {analysis.get('garment_type','?')}")
                return analysis
            else: print(f"[VLM] {display_name}: HTTP {response.status_code} - {response.text[:200]}")
        except Exception as e:
            print(f"[VLM] {display_name} failed: {e}"); continue
    return None

def analyze_with_vlm(image):
    import base64
    from io import BytesIO
    hf_token = os.environ.get("HF_TOKEN", "")
    if not hf_token: return None
    max_dim = 1024
    if max(image.size) > max_dim:
        ratio = max_dim / max(image.size)
        image = image.resize((int(image.size[0]*ratio), int(image.size[1]*ratio)), Image.LANCZOS)
    buf = BytesIO()
    image.convert('RGB').save(buf, format='JPEG', quality=85)
    img_b64 = base64.b64encode(buf.getvalue()).decode('utf-8')
    messages = [{"role": "user", "content": [
        {"type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{img_b64}"}},
        {"type": "text", "text": GARMENT_ANALYSIS_PROMPT}
    ]}]
    return _call_vlm(messages)

def get_default_analysis(garment_type="shirt"):
    defaults = {
        "shirt": {"garment_type":"shirt","description":"Standard button-up shirt","measurements":{"bust":96,"waist":80,"shoulder_width":44,"bodice_length":72,"sleeve_length":62,"neckline_depth":8,"neckline_width":7,"bicep":32,"wrist":18,"cap_height":14,"collar_height":4,"flare":0},"features":{"has_collar":True,"collar_type":"standard","has_cuffs":True,"has_pockets":True,"pocket_type":"patch","has_hood":False,"fit":"regular"}},
        "dress": {"garment_type":"dress","description":"A-line dress","measurements":{"bust":90,"waist":72,"hip":96,"shoulder_width":40,"bodice_length":42,"sleeve_length":25,"skirt_length":55,"neckline_depth":12,"neckline_width":8,"bicep":28,"wrist":17,"cap_height":12,"flare":8},"features":{"has_collar":False,"collar_type":"none","has_cuffs":False,"has_pockets":False,"pocket_type":"none","has_hood":False,"fit":"fitted"}},
        "pants": {"garment_type":"pants","description":"Straight-leg trousers","measurements":{"waist":78,"hip":98,"thigh":56,"knee":40,"ankle":26,"pant_length":100,"crotch_depth":27,"waistband_height":4,"flare":0},"features":{"has_pockets":True,"pocket_type":"welt","has_collar":False,"has_hood":False,"fit":"regular"}},
        "skirt": {"garment_type":"skirt","description":"A-line knee skirt","measurements":{"waist":72,"hip":96,"skirt_length":55,"waistband_height":4,"flare":6},"features":{"has_pockets":False,"has_collar":False,"has_hood":False,"fit":"regular"}},
        "jacket": {"garment_type":"jacket","description":"Tailored blazer","measurements":{"bust":100,"waist":86,"shoulder_width":46,"jacket_length":70,"sleeve_length":62,"neckline_depth":15,"neckline_width":9,"bicep":34,"wrist":20,"cap_height":15,"collar_height":6,"flare":0},"features":{"has_collar":True,"collar_type":"standard","has_cuffs":False,"has_pockets":True,"pocket_type":"welt","has_hood":False,"fit":"regular"}},
        "hoodie": {"garment_type":"hoodie","description":"Pullover hoodie","measurements":{"bust":108,"waist":100,"shoulder_width":50,"jacket_length":68,"sleeve_length":65,"neckline_depth":10,"neckline_width":8,"bicep":36,"wrist":22,"cap_height":13,"head_circumference":57,"flare":0},"features":{"has_collar":False,"collar_type":"none","has_cuffs":True,"has_pockets":True,"pocket_type":"patch","has_hood":True,"fit":"oversized"}},
        "vest": {"garment_type":"vest","description":"Classic vest","measurements":{"bust":96,"waist":80,"shoulder_width":42,"vest_length":55,"neckline_depth":18,"neckline_width":8,"flare":0},"features":{"has_collar":False,"has_cuffs":False,"has_pockets":False,"has_hood":False,"fit":"fitted"}},
    }
    return defaults.get(garment_type, defaults["shirt"])

_current_analysis = {"data": None}

def _generate_all_outputs(analysis):
    garment_type = analysis.get('garment_type', 'shirt')
    measurements = analysis.get('measurements', {})
    features = analysis.get('features', {})
    params = {**measurements, **features}
    pattern_pieces = get_pattern_pieces(garment_type, params)
    pattern_image, summary = generate_pattern_from_analysis(analysis)
    fig_3d = create_3d_figure(analysis, pattern_pieces=pattern_pieces)
    display = {k: v for k, v in analysis.items() if k != '_model_used'}
    model_info = f"\n\n*AI: {analysis.get('_model_used', 'Default')}*" if analysis.get('_model_used') else ""
    desc = analysis.get('description', 'No description')
    summary = f"**Garment:** {desc}\n\n{summary}{model_info}"
    return pattern_image, fig_3d, summary, json.dumps(display, indent=2)

def process_image(image, garment_type_override="Auto-detect"):
    if image is None and garment_type_override == "Auto-detect":
        return None, None, "Please upload a garment image or select a type.", "{}", []
    analysis = None
    if image is not None:
        try: analysis = analyze_with_vlm(image)
        except Exception as e: print(f"VLM failed: {e}")
    if analysis is None:
        gt = garment_type_override.lower() if garment_type_override != "Auto-detect" else "shirt"
        analysis = get_default_analysis(gt)
        if image is not None and garment_type_override == "Auto-detect":
            analysis['_model_used'] = 'Default (set HF_TOKEN for AI)'
    if garment_type_override != "Auto-detect":
        analysis['garment_type'] = garment_type_override.lower()
    _current_analysis["data"] = copy.deepcopy(analysis)
    try:
        p2d, p3d, summary, j = _generate_all_outputs(analysis)
        return p2d, p3d, summary, j, []
    except Exception as e:
        traceback.print_exc(); return None, None, f"Error: {e}", "{}", []

def process_text(description):
    if not description.strip(): return None, None, "Enter a description.", "{}", []
    analysis = None
    hf_token = os.environ.get("HF_TOKEN", "")
    if hf_token:
        messages = [{"role": "user", "content": f"Based on this garment description, extract sewing pattern parameters.\n\nDescription: {description}\n\nReturn ONLY JSON with: garment_type, description, measurements (bust, waist, hip, shoulder_width, bodice_length, sleeve_length, skirt_length, pant_length, neckline_depth, neckline_width, bicep, wrist, cap_height, collar_height, flare), features (has_collar, collar_type, has_cuffs, has_pockets, pocket_type, has_hood, fit)."}]
        analysis = _call_vlm(messages, timeout=90)
    if analysis is None:
        desc_lower = description.lower()
        for gt in ['hoodie','jacket','coat','blazer','dress','skirt','pants','trousers','jeans','vest','shirt','blouse','top']:
            if gt in desc_lower: analysis = get_default_analysis(gt); analysis['description'] = description; break
        if analysis is None: analysis = get_default_analysis("shirt"); analysis['description'] = description
    _current_analysis["data"] = copy.deepcopy(analysis)
    try:
        p2d, p3d, summary, j = _generate_all_outputs(analysis)
        return p2d, p3d, summary, j, []
    except Exception as e: return None, None, f"Error: {e}", "{}", []

def process_manual(gt,bust,waist,hip,shoulder,bodice,sleeve,skirt,pant,neck,flare_c,collar,ctype,cuffs,pockets,hood,fit):
    analysis = {"garment_type":gt.lower(),"description":f"Custom {gt.lower()}","measurements":{"bust":bust,"waist":waist,"hip":hip,"shoulder_width":shoulder,"bodice_length":bodice,"sleeve_length":sleeve,"skirt_length":skirt,"pant_length":pant,"neckline_depth":neck,"neckline_width":7,"bicep":30,"wrist":18,"cap_height":14,"collar_height":5,"flare":flare_c},"features":{"has_collar":collar,"collar_type":ctype.lower(),"has_cuffs":cuffs,"has_pockets":pockets,"pocket_type":"patch","has_hood":hood,"fit":fit.lower()}}
    _current_analysis["data"] = copy.deepcopy(analysis)
    try:
        p2d, p3d, summary, j = _generate_all_outputs(analysis)
        return p2d, p3d, summary, j, []
    except Exception as e: return None, None, f"Error: {e}", "{}", []

def chat_edit(message, history):
    if not message.strip(): return history, None, None, "Please enter an edit request.", "{}"
    current = _current_analysis.get("data") or get_default_analysis("shirt")
    _current_analysis["data"] = current
    current_clean = {k: v for k, v in current.items() if k != '_model_used'}
    edit_prompt = EDIT_PROMPT_TEMPLATE.format(current_json=json.dumps(current_clean, indent=2), user_message=message)
    updated = None
    if os.environ.get("HF_TOKEN", ""):
        try: updated = _call_vlm([{"role": "user", "content": edit_prompt}], timeout=90)
        except: pass
    if updated is None:
        updated = copy.deepcopy(current); msg_lower = message.lower()
        if "long sleeve" in msg_lower: updated['measurements']['sleeve_length'] = 65
        elif "short sleeve" in msg_lower: updated['measurements']['sleeve_length'] = 25
        if "no collar" in msg_lower: updated['features']['has_collar'] = False; updated['features']['collar_type'] = 'none'
        if "add collar" in msg_lower: updated['features']['has_collar'] = True; updated['features']['collar_type'] = 'standard'
        if "add hood" in msg_lower: updated['features']['has_hood'] = True
        if "no hood" in msg_lower: updated['features']['has_hood'] = False
        if "oversized" in msg_lower: updated['features']['fit'] = 'oversized'; updated['measurements']['bust'] = updated['measurements'].get('bust', 96) + 10
        if "fitted" in msg_lower: updated['features']['fit'] = 'fitted'
        if "flare" in msg_lower: updated['measurements']['flare'] = max(updated['measurements'].get('flare', 0), 8)
        updated['_model_used'] = 'Rule-based edit'
    if 'garment_type' not in updated: updated['garment_type'] = current.get('garment_type', 'shirt')
    _current_analysis["data"] = copy.deepcopy(updated)
    try: p2d, p3d, summary, j = _generate_all_outputs(updated)
    except Exception as e: p2d, p3d, summary, j = None, None, f"Error: {e}", "{}"
    ai_msg = f"Applied: {message}\n"
    changes = []
    for k in set(list(current.get('measurements',{}).keys()) + list(updated.get('measurements',{}).keys())):
        ov, nv = current.get('measurements',{}).get(k), updated.get('measurements',{}).get(k)
        if ov != nv and ov is not None and nv is not None: changes.append(f"  {k}: {ov}{nv}")
    for k in set(list(current.get('features',{}).keys()) + list(updated.get('features',{}).keys())):
        ov, nv = current.get('features',{}).get(k), updated.get('features',{}).get(k)
        if ov != nv: changes.append(f"  {k}: {ov}{nv}")
    ai_msg += ("\n".join(changes)) if changes else "No changes."
    history = history or []; history.append((message, ai_msg))
    return history, p2d, p3d, summary, j

def run_refinement(image, garment_type_override, max_iters):
    if image is None:
        yield None, None, None, "Please upload a garment image.", "{}", None; return
    analysis = None
    try: analysis = analyze_with_vlm(image)
    except Exception as e: print(f"VLM failed: {e}")
    if analysis is None:
        gt = garment_type_override.lower() if garment_type_override != "Auto-detect" else "shirt"
        analysis = get_default_analysis(gt)
    if garment_type_override != "Auto-detect": analysis['garment_type'] = garment_type_override.lower()
    def gen_fn(a): return _generate_all_outputs(a)
    result = refinement_loop(original_image=image, initial_analysis=analysis, generate_fn=gen_fn,
                             max_iterations=int(max_iters), target_composite=0.82, plateau_patience=3, lr=0.7)
    log_lines = [f"## Refinement Results\n", f"**Converged:** {'✅ Yes' if result['converged'] else '❌ No'}",
                 f"**Iterations:** {result['total_iterations']}", f"**Best Score:** {result['best_score']:.4f}"]
    if result['scores']: log_lines.append(f"**Scores:** {' → '.join(f'{s:.3f}' for s in result['scores'])}")
    log_lines.append("")
    for step in result['history']:
        it, status, metrics = step['iteration'], step.get('status','?'), step.get('metrics',{})
        log_lines.append(f"### Iteration {it}{status}")
        if metrics: log_lines.append(f"SSIM={metrics.get('ssim',0):.3f} | Edge={metrics.get('edge_ssim',0):.3f} | Composite={metrics.get('composite',0):.3f}")
        if step.get('new_best'): log_lines.append("⭐ **New best!**")
        diffs = step.get('vlm_differences', [])
        if diffs: log_lines.append("**Differences:** " + "; ".join(diffs[:3]))
        adj = step.get('adjustments', {})
        if adj: log_lines.append("**Adjustments:** " + ", ".join(f"{k}={v}" for k, v in adj.items()))
        reason = step.get('reason', '')
        if reason: log_lines.append(f"*{reason}*")
        log_lines.append("")
    best = result['best_analysis']; _current_analysis["data"] = copy.deepcopy(best)
    try: p2d, p3d, summary, j = _generate_all_outputs(best)
    except: p2d, p3d, summary, j = None, None, "Error", "{}"
    last_proj = None
    for step in reversed(result['history']):
        if 'projection' in step: last_proj = step['projection']; break
    yield p2d, p3d, last_proj, "\n".join(log_lines), j, summary

CSS = """
.main-header { text-align: center; margin-bottom: 20px; }
.info-box { padding: 15px; border-radius: 10px; background: #f0f7ff; border: 1px solid #cce0ff; margin: 10px 0; }
.ref-box { padding: 10px; border-radius: 8px; background: #f8f8f8; border: 1px solid #e0e0e0; font-size: 0.85em; }
"""

with gr.Blocks(title="Garment Pattern Studio") as demo:
    gr.HTML("""<div class="main-header"><h1>🧵 Garment Pattern Studio</h1>
        <p style="font-size:1.1em;color:#555;">Analyze garments, edit with chat, preview in 3D, refine with AI agent</p></div>
    <div class="info-box"><b>Powered by:</b> Llama-4-Scout · Kimi K2.6 · Qwen 3.5 via
        <a href="https://huggingface.co/docs/inference-providers">HF Inference Providers</a>
        &nbsp;|&nbsp; <b>3D view built from actual 2D pattern pieces</b></div>""")

    with gr.Tab("📸 From Image"):
        with gr.Row():
            with gr.Column(scale=1):
                input_image = gr.Image(type="pil", label="Upload Garment Image", height=350)
                garment_override = gr.Dropdown(choices=["Auto-detect","Shirt","Dress","Skirt","Pants","Jacket","Hoodie","Vest"], value="Auto-detect", label="Garment Type Override")
                analyze_btn = gr.Button("Analyze & Generate", variant="primary", size="lg")
            with gr.Column(scale=2):
                with gr.Row():
                    with gr.Column(): out_pattern_2d = gr.Image(label="2D Sewing Pattern", height=400)
                    with gr.Column(): out_3d = gr.Plot(label="3D Garment Preview")
                out_summary = gr.Markdown(label="Pattern Summary")
                with gr.Accordion("Raw JSON", open=False): out_json = gr.Code(language="json")
        analyze_btn.click(process_image, inputs=[input_image, garment_override], outputs=[out_pattern_2d, out_3d, out_summary, out_json])

    with gr.Tab("✍️ From Text"):
        with gr.Row():
            with gr.Column(scale=1):
                text_input = gr.Textbox(label="Describe the garment", placeholder="e.g., A fitted A-line dress with cap sleeves", lines=3)
                text_btn = gr.Button("Generate Pattern", variant="primary", size="lg")
                gr.Examples(examples=[["A classic dress shirt with long sleeves and button-down collar"],["A flared midi skirt with high waist"],["An oversized hoodie with kangaroo pocket"],["A fitted blazer with notched lapel collar"],["Slim-fit straight-leg jeans with pockets"],["A knee-length A-line dress with cap sleeves"]], inputs=text_input)
            with gr.Column(scale=2):
                with gr.Row():
                    with gr.Column(): txt_pattern_2d = gr.Image(label="2D Pattern", height=400)
                    with gr.Column(): txt_3d = gr.Plot(label="3D Preview")
                txt_summary = gr.Markdown()
                with gr.Accordion("Raw JSON", open=False): txt_json = gr.Code(language="json")
        text_btn.click(process_text, inputs=[text_input], outputs=[txt_pattern_2d, txt_3d, txt_summary, txt_json])

    with gr.Tab("📐 Manual"):
        with gr.Row():
            with gr.Column(scale=1):
                m_type = gr.Dropdown(choices=["Shirt","Dress","Skirt","Pants","Jacket","Hoodie","Vest"], value="Shirt", label="Garment Type")
                gr.Markdown("### Measurements (cm)")
                with gr.Row(): m_bust = gr.Slider(70,130,value=92,step=1,label="Bust"); m_waist = gr.Slider(55,110,value=74,step=1,label="Waist")
                with gr.Row(): m_hip = gr.Slider(75,130,value=96,step=1,label="Hip"); m_shoulder = gr.Slider(35,55,value=42,step=1,label="Shoulder")
                with gr.Row(): m_bodice = gr.Slider(30,80,value=42,step=1,label="Bodice Length"); m_sleeve = gr.Slider(10,75,value=60,step=1,label="Sleeve Length")
                with gr.Row(): m_skirt = gr.Slider(25,120,value=55,step=1,label="Skirt Length"); m_pant = gr.Slider(25,115,value=100,step=1,label="Pant Length")
                with gr.Row(): m_neck = gr.Slider(3,25,value=8,step=1,label="Neckline Depth"); m_flare = gr.Slider(0,20,value=0,step=1,label="Hem Flare")
                gr.Markdown("### Features")
                with gr.Row(): m_collar = gr.Checkbox(value=True,label="Collar"); m_ctype = gr.Dropdown(["Standard","Mandarin","Peter_pan"],value="Standard",label="Collar Type")
                with gr.Row(): m_cuffs = gr.Checkbox(value=True,label="Cuffs"); m_pockets = gr.Checkbox(value=False,label="Pockets")
                with gr.Row(): m_hood = gr.Checkbox(value=False,label="Hood"); m_fit = gr.Dropdown(["Fitted","Regular","Oversized","Loose"],value="Regular",label="Fit")
                manual_btn = gr.Button("Generate Pattern", variant="primary", size="lg")
            with gr.Column(scale=2):
                with gr.Row():
                    with gr.Column(): man_pattern_2d = gr.Image(label="2D Pattern", height=400)
                    with gr.Column(): man_3d = gr.Plot(label="3D Preview")
                man_summary = gr.Markdown()
                with gr.Accordion("Raw JSON", open=False): man_json = gr.Code(language="json")
        manual_btn.click(process_manual, inputs=[m_type,m_bust,m_waist,m_hip,m_shoulder,m_bodice,m_sleeve,m_skirt,m_pant,m_neck,m_flare,m_collar,m_ctype,m_cuffs,m_pockets,m_hood,m_fit], outputs=[man_pattern_2d, man_3d, man_summary, man_json])

    with gr.Tab("💬 Chat & Edit"):
        gr.Markdown("### Edit pattern with natural language\nGenerate a pattern first, then refine here.")
        with gr.Row():
            with gr.Column(scale=1):
                chatbot = gr.Chatbot(label="Pattern Editor", height=400)
                chat_input = gr.Textbox(label="Edit instruction", placeholder="e.g., Make sleeves longer, Add a hood", lines=2)
                with gr.Row(): chat_send = gr.Button("Apply Edit", variant="primary"); chat_clear = gr.Button("Clear")
            with gr.Column(scale=2):
                with gr.Row():
                    with gr.Column(): chat_pattern_2d = gr.Image(label="Updated 2D", height=400)
                    with gr.Column(): chat_3d = gr.Plot(label="Updated 3D")
                chat_summary = gr.Markdown()
                with gr.Accordion("JSON", open=False): chat_json = gr.Code(language="json")
        def clear_chat(): return [], None, None, "", "{}"
        chat_send.click(chat_edit, inputs=[chat_input, chatbot], outputs=[chatbot, chat_pattern_2d, chat_3d, chat_summary, chat_json])
        chat_input.submit(chat_edit, inputs=[chat_input, chatbot], outputs=[chatbot, chat_pattern_2d, chat_3d, chat_summary, chat_json])
        chat_clear.click(clear_chat, outputs=[chatbot, chat_pattern_2d, chat_3d, chat_summary, chat_json])

    with gr.Tab("🔄 Agentic Refinement"):
        gr.Markdown("""### ⚠️ Work In Progress — Iterative Refinement Loop
Upload a garment image. The AI agent will iteratively refine pattern parameters.
See [README](https://huggingface.co/spaces/vikashmakeit/garment-to-pattern) for full design docs.

**Status:** Core components (projection, similarity, convergence loop) work. VLM feedback integration needs further testing.""")
        with gr.Row():
            with gr.Column(scale=1):
                refine_image = gr.Image(type="pil", label="Upload Garment Image", height=300)
                refine_type = gr.Dropdown(choices=["Auto-detect","Shirt","Dress","Skirt","Pants","Jacket","Hoodie","Vest"], value="Auto-detect", label="Garment Type")
                refine_iters = gr.Slider(1, 15, value=5, step=1, label="Max Iterations")
                refine_btn = gr.Button("🚀 Start Refinement", variant="primary", size="lg")
            with gr.Column(scale=2):
                with gr.Row():
                    with gr.Column(): refine_2d = gr.Image(label="Best 2D Pattern", height=350)
                    with gr.Column(): refine_proj = gr.Image(label="3D→2D Projection", height=350)
                with gr.Row():
                    with gr.Column(): refine_3d = gr.Plot(label="Best 3D Preview")
                    with gr.Column(): refine_log = gr.Markdown(label="Refinement Log")
                refine_summary = gr.Markdown()
                with gr.Accordion("Best Parameters JSON", open=False): refine_json = gr.Code(language="json")
        refine_btn.click(run_refinement, inputs=[refine_image, refine_type, refine_iters],
                         outputs=[refine_2d, refine_3d, refine_proj, refine_log, refine_json, refine_summary])

    gr.HTML("""<div class="ref-box" style="margin-top:20px;"><h4>Research References</h4><ul>
        <li><b>ChatGarment</b> (2024) [<a href="https://arxiv.org/abs/2412.17811">Paper</a>]</li>
        <li><b>NGL-Prompter</b> (2025) [<a href="https://arxiv.org/abs/2602.20700">Paper</a>]</li>
        <li><b>RRVF</b> (2025) — Render-compare visual feedback [<a href="https://arxiv.org/abs/2507.20766">Paper</a>]</li>
        <li><b>SceneAssistant</b> (2026) — Agentic VLM refinement [<a href="https://arxiv.org/abs/2603.12238">Paper</a>]</li></ul></div>""")

if __name__ == "__main__":
    demo.launch(server_name="0.0.0.0", server_port=7860, css=CSS, theme=gr.themes.Soft())