diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..f148b4d11261d2145993363f9dfc4867d91c1613
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,30 @@
+# Credentials and Secrets
+.env
+
+# Python Compiled Files
+__pycache__/
+*.py[cod]
+
+# Virtual Environments
+venv/
+.venv/
+
+# System Files
+.DS_Store
+Thumbs.db
+
+# Logs
+*.log
+
+# Editor Directories
+.vscode/
+.idea/
+
+*.jpg
+
+stitch_merchflow_ai_dashboard.zip
+screen.jpg
+test_image.jpg
+*.zip
+*.jpeg
+*.png
\ No newline at end of file
diff --git a/.vite/deps/_metadata.json b/.vite/deps/_metadata.json
new file mode 100644
index 0000000000000000000000000000000000000000..53b40cc8acdeb1529f877875a6c89b7630f5e272
--- /dev/null
+++ b/.vite/deps/_metadata.json
@@ -0,0 +1,8 @@
+{
+ "hash": "7678463b",
+ "configHash": "aaeefbd1",
+ "lockfileHash": "e3b0c442",
+ "browserHash": "7b5c57bd",
+ "optimized": {},
+ "chunks": {}
+}
\ No newline at end of file
diff --git a/.vite/deps/package.json b/.vite/deps/package.json
new file mode 100644
index 0000000000000000000000000000000000000000..3dbc1ca591c0557e35b6004aeba250e6a70b56e3
--- /dev/null
+++ b/.vite/deps/package.json
@@ -0,0 +1,3 @@
+{
+ "type": "module"
+}
diff --git a/Dockerfile b/Dockerfile
new file mode 100644
index 0000000000000000000000000000000000000000..9c667ec66f8a68287dfd6489180bb02d06040bfc
--- /dev/null
+++ b/Dockerfile
@@ -0,0 +1,10 @@
+FROM python:3.11-slim
+WORKDIR /code
+COPY ./requirements.txt /code/requirements.txt
+RUN pip install --no-cache-dir --upgrade -r /code/requirements.txt
+COPY . /code
+# Fix permissions for libraries that write to home
+RUN mkdir -p /tmp/home
+ENV HOME=/tmp/home
+# Start the FastAPI server on port 7860 (required by Hugging Face)
+CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "7860"]
diff --git a/README.md b/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..b3617fca72fc57dca8df072e450f5415eae79360
--- /dev/null
+++ b/README.md
@@ -0,0 +1,151 @@
+---
+title: StyleSync AI
+emoji: š
+colorFrom: blue
+colorTo: purple
+sdk: docker
+pinned: false
+---
+
+
+
+# š StyleSync AI
+
+### Autonomous E-Commerce Catalog Intelligence
+
+
+
+
+
+
+
+---
+
+*A multi-agent AI pipeline that converts raw product imagery into enterprise-grade, SEO-optimized e-commerce catalogs in seconds.*
+
+
+
+---
+
+## šÆ Core Value Proposition
+
+StyleSync AI eliminates the manual bottleneck of product catalog creation. By orchestrating **Computer Vision**, **Retrieval-Augmented Generation (RAG)**, and **Large Language Models** in a seamless autonomous pipeline, it delivers production-ready product listings ā from a single image upload ā with zero human intervention.
+
+---
+
+## šļø System Architecture
+
+The system employs a high-performance, event-driven architecture orchestrated by **FastAPI**:
+
+### šļø Visual Analyst Agent
+- **Function**: Zero-shot product image analysis
+- **Process**: Extracts granular visual attributes ā dominant colors, material composition, design style, branding elements, and product classification
+- **Engine**: `Gemini 2.5 Flash` via the unified **Google GenAI SDK** (`google-genai`)
+
+### š§ Semantic Memory Agent
+- **Function**: RAG-based keyword retrieval with intelligent fallback
+- **Process**: Vectorizes visual attributes to query a high-dimensional index, retrieving historically high-converting SEO keywords and market trends. When the database has no match for a niche, the **Intelligence Fallback** system autonomously generates keywords via Gemini ā ensuring **0% empty results**
+- **Engine**: `Pinecone Vector DB` with `gemini-embedding-001` embeddings (768 dimensions)
+
+### āļø Writer Agent
+- **Function**: High-conversion copy synthesis
+- **Process**: Fuses visual intelligence with retrieved market data to generate persuasive, conversion-optimized titles, descriptions, and feature bullet points
+- **Engine**: `Meta Llama 3.3 70B` (via Groq Cloud)
+
+### āļø Pipeline Orchestrator
+- **Function**: Async pipeline management & delivery
+- **Process**: Handles non-blocking agent execution, error propagation, and API lifecycle management. Results are delivered instantly through the **Premium Glassmorphism UI**
+- **Engine**: `FastAPI` with async/await architecture
+
+---
+
+## š„ļø Production Interface
+
+StyleSync AI ships with a **Premium Glassmorphism UI** built for instant catalog generation:
+
+- šØ Frosted-glass aesthetic with dynamic gradient backgrounds
+- š¤ Drag-and-drop image upload with real-time processing feedback
+- š Structured JSON output display for visual data, SEO keywords, and generated listings
+- š± Fully responsive design across desktop, tablet, and mobile
+
+---
+
+## š ļø Technology Stack
+
+| Layer | Technology | Purpose |
+|-------|-----------|---------|
+| **Runtime** | Python 3.10+ | Core language |
+| **Framework** | FastAPI | Async API orchestration |
+| **AI SDK** | `google-genai` (Unified) | Vision & embedding inference |
+| **Vision Model** | Gemini 2.5 Flash | Product image analysis |
+| **Embeddings** | `gemini-embedding-001` | 768-dim vector generation |
+| **Vector DB** | Pinecone (Serverless) | Semantic keyword retrieval |
+| **LLM** | Llama 3.3 70B (Groq) | Copywriting synthesis |
+| **UI** | Glassmorphism / Tailwind CSS | Production dashboard |
+| **Deployment** | Docker / Hugging Face Spaces | Containerized hosting |
+
+---
+
+## š System Updates & Technical Milestones
+
+| Date | Milestone |
+|------|-----------|
+| **Feb 2026** | ā
Full migration from deprecated `google-generativeai` to unified `google-genai` SDK |
+| **Feb 2026** | ā
Vision model upgraded to `Gemini 2.5 Flash` for industry-leading latency |
+| **Feb 2026** | ā
Pinecone vector alignment to `768 dimensions` with `gemini-embedding-001` for mathematical precision |
+| **Feb 2026** | ā
**Intelligence Fallback** system deployed ā guarantees 0% empty SEO keyword results |
+| **Feb 2026** | ā
n8n webhook decoupled ā pipeline relies strictly on the Glassmorphism UI for delivery |
+| **Feb 2026** | ā
Production Glassmorphism dashboard launched |
+
+---
+
+## š Quick Start
+
+### 1. Environment Configuration
+
+Create a `.env` file in the project root with the following keys:
+
+```env
+GEMINI_API_KEY=your_google_genai_api_key
+GROQ_API_KEY=your_groq_cloud_api_key
+PINECONE_API_KEY=your_pinecone_api_key
+```
+
+### 2. Installation
+
+```bash
+pip install -r requirements.txt
+```
+
+### 3. Launch
+
+```bash
+python main.py
+```
+
+The production dashboard will be available at `http://localhost:7860`.
+
+---
+
+## š Project Structure
+
+```
+StyleSync-AI/
+āāā main.py # FastAPI orchestrator & pipeline logic
+āāā dashboard.html # Glassmorphism production UI
+āāā Dockerfile # Container deployment config
+āāā requirements.txt # Python dependencies
+āāā agents/
+ā āāā visual_analyst.py # Gemini 2.5 Flash vision agent
+ā āāā memory_agent.py # Pinecone RAG + embedding agent
+ā āāā writer_agent.py # Llama 3.3 copywriting agent
+āāā .env # Environment variables (not tracked)
+```
+
+---
+
+
+
+**StyleSync AI** ā Autonomous catalog intelligence for modern e-commerce.
+
+
diff --git a/agents/__init__.py b/agents/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/agents/manager.py b/agents/manager.py
new file mode 100644
index 0000000000000000000000000000000000000000..a246213c4fc762489896742ea0a63a09225c006c
--- /dev/null
+++ b/agents/manager.py
@@ -0,0 +1,46 @@
+import csv
+import time
+import os
+import datetime
+from agents.trend_spotter import TrendSpotter
+from agents.visionary import Visionary
+
+class MerchManager:
+ def __init__(self):
+ self.trend_spotter = TrendSpotter()
+ self.visionary = Visionary()
+ self.results_dir = "results"
+ if not os.path.exists(self.results_dir):
+ os.makedirs(self.results_dir)
+
+ def generate_batch(self, niche: str) -> str:
+ # Step 1: Get slogans
+ print(f"š Analyzing trends for niche: {niche}...")
+ slogans = self.trend_spotter.get_trends(niche)
+
+ results = []
+
+ # Step 2: Generate art prompts
+ print(f"šØ Generating designs for {len(slogans)} slogans...")
+ for i, slogan in enumerate(slogans):
+ print(f"Generating design {i+1}/{len(slogans)}...")
+ prompt = self.visionary.generate_art_prompt(slogan, niche)
+ results.append({
+ "Niche": niche,
+ "Slogan": slogan,
+ "Art Prompt": prompt
+ })
+ time.sleep(10)
+
+ # Step 3 & 4: Save to CSV
+ timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
+ filename = f"merch_batch_{niche}_{timestamp}.csv"
+ filepath = os.path.join(self.results_dir, filename)
+
+ with open(filepath, mode='w', newline='', encoding='utf-8') as file:
+ writer = csv.DictWriter(file, fieldnames=["Niche", "Slogan", "Art Prompt"])
+ writer.writeheader()
+ writer.writerows(results)
+
+ print(f"ā
Batch complete! Saved to {filepath}")
+ return filename
diff --git a/agents/memory_agent.py b/agents/memory_agent.py
new file mode 100644
index 0000000000000000000000000000000000000000..ff9272bc8b503e5d745511c528963ac4186c987b
--- /dev/null
+++ b/agents/memory_agent.py
@@ -0,0 +1,104 @@
+import os
+import time
+from dotenv import load_dotenv
+from pinecone import Pinecone, ServerlessSpec
+from google import genai
+
+load_dotenv()
+
+class MemoryAgent:
+ def __init__(self):
+ # Configure Gemini
+ self.gemini_api_key = os.getenv("GEMINI_API_KEY")
+ if not self.gemini_api_key:
+ raise ValueError("GEMINI_API_KEY not found")
+ self.client = genai.Client(api_key=self.gemini_api_key)
+
+ # Configure Pinecone
+ self.pinecone_api_key = os.getenv("PINECONE_API_KEY")
+ if not self.pinecone_api_key:
+ raise ValueError("PINECONE_API_KEY not found")
+
+ self.pc = Pinecone(api_key=self.pinecone_api_key)
+ self.index_name = "stylesync-index"
+
+ # Check and create index
+ existing_indexes = [i.name for i in self.pc.list_indexes()]
+ if self.index_name not in existing_indexes:
+ print(f"Creating index {self.index_name}...")
+ self.pc.create_index(
+ name=self.index_name,
+ dimension=768, # gemini-embedding-001 output dimension
+ metric='cosine',
+ spec=ServerlessSpec(
+ cloud='aws',
+ region='us-east-1'
+ )
+ )
+ # Wait for index to be ready
+ while not self.pc.describe_index(self.index_name).status['ready']:
+ time.sleep(1)
+ print("Index created.")
+
+ self.index = self.pc.Index(self.index_name)
+
+ def _get_embedding(self, text):
+ # Using gemini-embedding-001
+ result = self.client.models.embed_content(
+ model="gemini-embedding-001",
+ contents=text,
+ config=genai.types.EmbedContentConfig(output_dimensionality=768)
+ )
+ return result.embeddings[0].values
+
+ def seed_database(self):
+ # Check if empty
+ stats = self.index.describe_index_stats()
+ if stats.total_vector_count > 0:
+ print("Database already seeded.")
+ return
+
+ print("Seeding database...")
+ items = [
+ {
+ "id": "item1",
+ "text": "Running Shoe",
+ "keywords": "breathable, shock absorption, marathon training, lightweight"
+ },
+ {
+ "id": "item2",
+ "text": "Graphic T-Shirt",
+ "keywords": "100% cotton, vintage wash, pre-shrunk, soft feel"
+ },
+ {
+ "id": "item3",
+ "text": "Leather Wallet",
+ "keywords": "genuine leather, RFID blocking, minimalist, bifold"
+ }
+ ]
+
+ vectors = []
+ for item in items:
+ embedding = self._get_embedding(item['text'])
+ vectors.append({
+ "id": item['id'],
+ "values": embedding,
+ "metadata": {"keywords": item['keywords'], "text": item['text']}
+ })
+
+ self.index.upsert(vectors=vectors)
+ print(f"Seeded {len(vectors)} items.")
+
+ def retrieve_keywords(self, query_text: str):
+ try:
+ query_embedding = self._get_embedding(query_text)
+
+ results = self.index.query(
+ vector=query_embedding,
+ top_k=5,
+ include_metadata=True
+ )
+ return [m.metadata['keywords'] for m in results.matches if m.metadata and 'keywords' in m.metadata]
+ except Exception as e:
+ print(f"ā Keyword Retrieval Failed: {e}")
+ return []
diff --git a/agents/visual_analyst.py b/agents/visual_analyst.py
new file mode 100644
index 0000000000000000000000000000000000000000..597c4800c42aecd29d4f08568a2ec3e7d20aa4e2
--- /dev/null
+++ b/agents/visual_analyst.py
@@ -0,0 +1,61 @@
+import os
+import json
+import re
+from google import genai
+from dotenv import load_dotenv
+
+load_dotenv()
+
+class VisualAnalyst:
+ def __init__(self):
+ self.api_key = os.getenv("GEMINI_API_KEY")
+ if not self.api_key:
+ raise ValueError("GEMINI_API_KEY not found")
+
+ self.client = genai.Client(api_key=self.api_key)
+ self.model_name = "gemini-2.5-flash"
+ print(f"ā
VisualAnalyst stored Gemini model: {self.model_name}")
+
+ async def analyze_image(self, image_path: str):
+ try:
+ # Upload the file to Gemini
+ # Note: For efficiency in production, files should be managed (uploads/deletes)
+ # but for this agentic flow, we'll upload per request or assume local path usage helper if needed.
+ # However, the standard `model.generate_content` can take PIL images or file objects directly for some sdk versions,
+ # but using the File API is cleaner for 1.5 Flash multi-modal.
+ # Let's use the simpler PIL integration if available, or just path if the SDK supports it.
+ # actually, standard genai usage for images usually involves PIL or uploading.
+ # Let's try the PIL approach first as it's often more direct for local scripts.
+ import PIL.Image
+ img = PIL.Image.open(image_path)
+
+ user_prompt = (
+ "Analyze this product image. "
+ "Return ONLY valid JSON with keys: main_color, product_type, design_style, visual_features."
+ )
+
+ # We'll stick to prompt engineering for now to match the "Return ONLY valid JSON" instruction.
+ response = self.client.models.generate_content(
+ model=self.model_name,
+ contents=[user_prompt, img]
+ )
+
+ response_text = response.text
+
+ # Use regex to find the JSON block robustly
+ match = re.search(r'\{.*\}', response_text, re.DOTALL)
+ if match:
+ cleaned_content = match.group(0)
+ else:
+ cleaned_content = response_text
+
+ return json.loads(cleaned_content.strip())
+
+ except Exception as e:
+ print(f"ā Analysis Failed: {e}")
+ return {
+ "main_color": "Unknown",
+ "product_type": "Unknown",
+ "design_style": "Unknown",
+ "visual_features": [f"Error: {str(e)}"]
+ }
diff --git a/agents/writer_agent.py b/agents/writer_agent.py
new file mode 100644
index 0000000000000000000000000000000000000000..826bf305076b9d9167062be77a0d20f1c508b8c2
--- /dev/null
+++ b/agents/writer_agent.py
@@ -0,0 +1,48 @@
+import os
+import json
+from groq import Groq
+from dotenv import load_dotenv
+
+load_dotenv()
+
+class WriterAgent:
+ def __init__(self):
+ self.api_key = os.getenv("GROQ_API_KEY")
+ if not self.api_key:
+ raise ValueError("GROQ_API_KEY not found in environment variables")
+ self.client = Groq(api_key=self.api_key)
+ self.model = "llama-3.3-70b-versatile"
+
+ def write_listing(self, visual_data: dict, seo_keywords: list) -> dict:
+ system_prompt = (
+ "You are an expert e-commerce copywriter. "
+ "Write a persuasive product listing based on these visual attributes and SEO keywords. "
+ "Return JSON with keys: title, description, bullet_points."
+ )
+
+ user_content = f"""
+ Visual Attributes: {json.dumps(visual_data, indent=2)}
+
+ SEO Keywords: {', '.join(seo_keywords)}
+
+ Please generate the listing in JSON format.
+ """
+
+ try:
+ completion = self.client.chat.completions.create(
+ model=self.model,
+ messages=[
+ {"role": "system", "content": system_prompt},
+ {"role": "user", "content": user_content}
+ ],
+ temperature=0.7,
+ response_format={"type": "json_object"},
+ timeout=15.0
+ )
+
+ response_text = completion.choices[0].message.content
+ return json.loads(response_text)
+
+ except Exception as e:
+ print(f"Error generating listing: {e}")
+ return {"error": str(e)}
diff --git a/apply_qa_fixes.py b/apply_qa_fixes.py
new file mode 100644
index 0000000000000000000000000000000000000000..7b21cfd70eb26dc6a4a596510f242b56a1ea3f44
--- /dev/null
+++ b/apply_qa_fixes.py
@@ -0,0 +1,111 @@
+import os
+import re
+
+def patch_main():
+ with open("main.py", "r", encoding="utf-8") as f:
+ content = f.read()
+
+ old_pattern = r'return JSONResponse\(\s*content=\{\s*"error": str\(e\),\s*"type": type\(e\)\.__name__,\s*"details": error_details\s*\},\s*status_code=500\s*\)'
+ new_text = '''return JSONResponse(
+ content={
+ "error": "An internal server error occurred.",
+ "type": type(e).__name__
+ },
+ status_code=500
+ )'''
+ content = re.sub(old_pattern, new_text, content[1:] if content.startswith('\ufeff') else content)
+
+ with open("main.py", "w", encoding="utf-8") as f:
+ f.write(content)
+ print("Patched main.py")
+
+
+def patch_memory_agent():
+ path = "agents/memory_agent.py"
+ with open(path, "r", encoding="utf-8") as f:
+ content = f.read()
+
+ old_func = r'def retrieve_keywords\(.*?: str\):.*?return \[m\.metadata\[\'keywords\'\] for m in results\.matches if m\.metadata and \'keywords\' in m\.metadata\]'
+ new_func = '''def retrieve_keywords(self, query_text: str):
+ try:
+ query_embedding = self._get_embedding(query_text)
+
+ results = self.index.query(
+ vector=query_embedding,
+ top_k=5,
+ include_metadata=True
+ )
+ return [m.metadata['keywords'] for m in results.matches if m.metadata and 'keywords' in m.metadata]
+ except Exception as e:
+ print(f"ā Keyword Retrieval Failed: {e}")
+ return []'''
+
+ content = re.sub(old_func, new_func, content, flags=re.DOTALL)
+
+ with open(path, "w", encoding="utf-8") as f:
+ f.write(content)
+ print("Patched agents/memory_agent.py")
+
+
+def patch_visual_analyst():
+ path = "agents/visual_analyst.py"
+ with open(path, "r", encoding="utf-8") as f:
+ content = f.read()
+
+ # Add timeout
+ content = content.replace(
+ "response = self.model.generate_content([user_prompt, img])",
+ "response = self.model.generate_content([user_prompt, img], request_options={'timeout': 15.0})"
+ )
+
+ if "import re" not in content:
+ content = content.replace("import json", "import json\nimport re")
+
+ # Rewrite JSON parsing
+ old_parsing = r'# Clean up potential markdown code fences.*?return json\.loads\(cleaned_content\.strip\(\)\)'
+ new_parsing = '''# Use regex to find the JSON block robustly
+ match = re.search(r'\\{.*\\}', response_text, re.DOTALL)
+ if match:
+ cleaned_content = match.group(0)
+ else:
+ cleaned_content = response_text
+
+ return json.loads(cleaned_content.strip())'''
+
+ content = re.sub(old_parsing, new_parsing, content, flags=re.DOTALL)
+
+ with open(path, "w", encoding="utf-8") as f:
+ f.write(content)
+ print("Patched agents/visual_analyst.py")
+
+
+def patch_writer_agent():
+ path = "agents/writer_agent.py"
+ with open(path, "r", encoding="utf-8") as f:
+ content = f.read()
+
+ old_completion = r'completion = self\.client\.chat\.completions\.create\(\s*model=self\.model,\s*messages=\[\s*\{"role": "system", "content": system_prompt\},\s*\{"role": "user", "content": user_content\}\s*\],\s*temperature=0\.7,\s*response_format=\{"type": "json_object"\}\s*\)'
+
+ new_completion = '''completion = self.client.chat.completions.create(
+ model=self.model,
+ messages=[
+ {"role": "system", "content": system_prompt},
+ {"role": "user", "content": user_content}
+ ],
+ temperature=0.7,
+ response_format={"type": "json_object"},
+ timeout=15.0
+ )'''
+
+ content = re.sub(old_completion, new_completion, content)
+
+ with open(path, "w", encoding="utf-8") as f:
+ f.write(content)
+ print("Patched agents/writer_agent.py")
+
+if __name__ == "__main__":
+ patch_main()
+ patch_memory_agent()
+ patch_visual_analyst()
+ patch_writer_agent()
+ print("All file patching completed successfully.")
diff --git a/args.json b/args.json
new file mode 100644
index 0000000000000000000000000000000000000000..932aab106c59eb00118aaa29363078216419750f
--- /dev/null
+++ b/args.json
@@ -0,0 +1,4 @@
+{
+ "projectId": "13453765122851154258",
+ "screenId": "cf3af7f8a9e74daf85096241ed88c75c"
+}
diff --git a/check_basic.py b/check_basic.py
new file mode 100644
index 0000000000000000000000000000000000000000..62ed83a48e0a428b6347cdd23e262080cb85ed5d
--- /dev/null
+++ b/check_basic.py
@@ -0,0 +1,20 @@
+import os
+from huggingface_hub import InferenceClient
+from dotenv import load_dotenv
+
+load_dotenv()
+
+api_key = os.getenv("HF_TOKEN")
+client = InferenceClient(api_key=api_key)
+
+print(f"Testing token with microsoft/resnet-50")
+
+try:
+ # Pass the URL directly as the input (InferenceClient handles URLs for image tasks)
+ result = client.image_classification(
+ model="microsoft/resnet-50",
+ image="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/car.jpg?download=true"
+ )
+ print("Success:", result)
+except Exception as e:
+ print("Failed:", e)
diff --git a/check_gemini.py b/check_gemini.py
new file mode 100644
index 0000000000000000000000000000000000000000..cfca9358a8a65cf29acb003b394c594983f845f0
--- /dev/null
+++ b/check_gemini.py
@@ -0,0 +1,26 @@
+import os
+import google.generativeai as genai
+from dotenv import load_dotenv
+
+load_dotenv()
+
+api_key = os.getenv("GEMINI_API_KEY") or os.getenv("GOOGLE_API_KEY")
+genai.configure(api_key=api_key)
+
+print("Listing available Gemini models...")
+try:
+ for m in genai.list_models():
+ if 'generateContent' in m.supported_generation_methods:
+ print(m.name)
+except Exception as e:
+ print(f"List models failed: {e}")
+
+model_name = "gemini-1.5-flash"
+print(f"\nTesting model: {model_name}")
+
+try:
+ model = genai.GenerativeModel(model_name)
+ response = model.generate_content("Hello, can you see this?")
+ print("Response:", response.text)
+except Exception as e:
+ print(f"Test failed: {e}")
diff --git a/check_gemini_clean.py b/check_gemini_clean.py
new file mode 100644
index 0000000000000000000000000000000000000000..751fe58712a396513b8f40bd9b9773b8407f90ad
--- /dev/null
+++ b/check_gemini_clean.py
@@ -0,0 +1,24 @@
+import os
+import google.generativeai as genai
+from dotenv import load_dotenv
+
+load_dotenv()
+
+api_key = os.getenv("GEMINI_API_KEY") or os.getenv("GOOGLE_API_KEY")
+genai.configure(api_key=api_key)
+
+candidates = [
+ "gemini-2.0-flash",
+ "gemini-2.0-flash-exp",
+ "models/gemini-2.0-flash"
+]
+
+for model_name in candidates:
+ print(f"\nTesting model: {model_name}")
+ try:
+ model = genai.GenerativeModel(model_name)
+ response = model.generate_content("Hello")
+ print(f"ā
Success with {model_name}: {response.text}")
+ break
+ except Exception as e:
+ print(f"ā Failed with {model_name}: {e}")
diff --git a/check_groq.py b/check_groq.py
new file mode 100644
index 0000000000000000000000000000000000000000..1f1cfc8daf0a05db639ec0c8a57894f8937e5d93
--- /dev/null
+++ b/check_groq.py
@@ -0,0 +1,11 @@
+import os
+from groq import Groq
+from dotenv import load_dotenv
+
+load_dotenv()
+client = Groq(api_key=os.getenv("GROQ_API_KEY"))
+
+print("Listing Groq models...")
+models = client.models.list()
+for m in models.data:
+ print(m.id)
diff --git a/check_groq_models.py b/check_groq_models.py
new file mode 100644
index 0000000000000000000000000000000000000000..95ef4f8e708e6ef50024201903a48ad7c058663c
--- /dev/null
+++ b/check_groq_models.py
@@ -0,0 +1,14 @@
+import os
+from groq import Groq
+from dotenv import load_dotenv
+
+load_dotenv()
+
+try:
+ client = Groq(api_key=os.getenv("GROQ_API_KEY"))
+ models = client.models.list()
+ print("Available Models:")
+ for model in models.data:
+ print(f"- {model.id}")
+except Exception as e:
+ print(f"Error listing models: {e}")
diff --git a/check_groq_vision.py b/check_groq_vision.py
new file mode 100644
index 0000000000000000000000000000000000000000..2fef204aa21a954072c0646b9fd04ea68d80a0d2
--- /dev/null
+++ b/check_groq_vision.py
@@ -0,0 +1,36 @@
+import os
+from groq import Groq
+from dotenv import load_dotenv
+import base64
+
+load_dotenv()
+
+client = Groq(api_key=os.getenv("GROQ_API_KEY"))
+model = "llama-3.2-11b-vision-preview"
+
+print(f"Testing Groq Vision model: {model}")
+
+# Test 1: Image URL
+print("\n--- Test 1: Image URL ---")
+try:
+ image_url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/car.jpg?download=true"
+ completion = client.chat.completions.create(
+ model=model,
+ messages=[
+ {
+ "role": "user",
+ "content": [
+ {"type": "text", "text": "What's in this image?"},
+ {"type": "image_url", "image_url": {"url": image_url}},
+ ],
+ }
+ ],
+ temperature=1,
+ max_tokens=1024,
+ top_p=1,
+ stream=False,
+ stop=None,
+ )
+ print("Response:", completion.choices[0].message.content)
+except Exception as e:
+ print("Groq Vision failed:", e)
diff --git a/check_idefics.py b/check_idefics.py
new file mode 100644
index 0000000000000000000000000000000000000000..9a147a19999a3e66c6c125232153a5e26f9112f8
--- /dev/null
+++ b/check_idefics.py
@@ -0,0 +1,33 @@
+import os
+from huggingface_hub import InferenceClient
+from dotenv import load_dotenv
+
+load_dotenv()
+
+api_key = os.getenv("HF_TOKEN")
+client = InferenceClient(api_key=api_key)
+model = "HuggingFaceM4/idefics2-8b"
+
+print(f"Testing model: {model}")
+
+# Test 1: Image URL
+print("\n--- Test 1: Image URL ---")
+try:
+ image_url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/car.jpg?download=true"
+ messages = [
+ {
+ "role": "user",
+ "content": [
+ {"type": "image_url", "image_url": {"url": image_url}},
+ {"type": "text", "text": "What is in this image?"}
+ ]
+ }
+ ]
+ completion = client.chat.completions.create(
+ model=model,
+ messages=messages,
+ max_tokens=100
+ )
+ print("Response:", completion.choices[0].message.content)
+except Exception as e:
+ print("Image URL failed:", e)
diff --git a/check_idefics_raw.py b/check_idefics_raw.py
new file mode 100644
index 0000000000000000000000000000000000000000..63e5737d1812d52a2843276bbd54f98858b8b93c
--- /dev/null
+++ b/check_idefics_raw.py
@@ -0,0 +1,29 @@
+import os
+import requests
+import json
+from dotenv import load_dotenv
+
+load_dotenv()
+
+api_key = os.getenv("HF_TOKEN")
+model = "HuggingFaceM4/idefics2-8b"
+url = f"https://router.huggingface.co/models/{model}"
+
+headers = {"Authorization": f"Bearer {api_key}"}
+
+print(f"Testing URL: {url}")
+
+# Test A: Simple text inputs
+print("\n--- Test A: Simple Text ---")
+response = requests.post(url, headers=headers, json={"inputs": "Hello"})
+print(f"Status: {response.status_code}")
+print("Response:", response.text)
+
+# Test B: Formatted inputs (Standard for some VLM APIs)
+# Often they accept { "inputs": "User: ...", "parameters": ... }
+print("\n--- Test B: Formatted Prompt ---")
+image_url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/car.jpg?download=true"
+prompt = f"User:  Describe this image.\nAssistant:"
+response = requests.post(url, headers=headers, json={"inputs": prompt, "parameters": {"max_new_tokens": 50}})
+print(f"Status: {response.status_code}")
+print("Response:", response.text)
diff --git a/check_idefics_v2.py b/check_idefics_v2.py
new file mode 100644
index 0000000000000000000000000000000000000000..cff412d492377c765df36dfdf508f92fbc4211a4
--- /dev/null
+++ b/check_idefics_v2.py
@@ -0,0 +1,31 @@
+import os
+from huggingface_hub import InferenceClient
+from dotenv import load_dotenv
+
+load_dotenv()
+
+api_key = os.getenv("HF_TOKEN")
+client = InferenceClient(api_key=api_key)
+model = "HuggingFaceM4/idefics2-8b"
+
+print(f"Testing model: {model}")
+
+image_url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/car.jpg?download=true"
+
+# Format for Idefics2:
+# User: ![]() \nAssistant:
+prompt = f"User:  Describe this image.\nAssistant:"
+
+print(f"\n--- Testing with text_generation and specific prompt ---")
+print(f"Prompt: {prompt}")
+
+try:
+ # Use text_generation for models that don't support chat
+ response = client.text_generation(
+ prompt=prompt,
+ model=model,
+ max_new_tokens=100
+ )
+ print("Response:", response)
+except Exception as e:
+ print("Failed:", e)
diff --git a/check_idefics_v3.py b/check_idefics_v3.py
new file mode 100644
index 0000000000000000000000000000000000000000..c141fb6a5ab3ec0ccdc7486cf9f14be26624dc16
--- /dev/null
+++ b/check_idefics_v3.py
@@ -0,0 +1,30 @@
+import os
+import traceback
+from huggingface_hub import InferenceClient
+from dotenv import load_dotenv
+
+load_dotenv()
+
+api_key = os.getenv("HF_TOKEN")
+client = InferenceClient(api_key=api_key)
+model = "HuggingFaceM4/idefics2-8b"
+
+print(f"Testing model: {model}")
+
+print("\n--- Test 1: Image to Text (Captioning) ---")
+try:
+ # This might work if the API treats it as captioning
+ res = client.image_to_text(
+ "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/car.jpg?download=true",
+ model=model
+ )
+ print("Response:", res)
+except Exception:
+ traceback.print_exc()
+
+print("\n--- Test 2: Text Generation (Simple) ---")
+try:
+ res = client.text_generation("describe a car", model=model, max_new_tokens=50)
+ print("Response:", res)
+except Exception:
+ traceback.print_exc()
diff --git a/check_llama.py b/check_llama.py
new file mode 100644
index 0000000000000000000000000000000000000000..ce89a93096544562a342f56a6ac8eb2f0074892a
--- /dev/null
+++ b/check_llama.py
@@ -0,0 +1,33 @@
+import os
+from huggingface_hub import InferenceClient
+from dotenv import load_dotenv
+
+load_dotenv()
+
+api_key = os.getenv("HF_TOKEN")
+client = InferenceClient(api_key=api_key)
+model = "meta-llama/Llama-3.2-11B-Vision-Instruct"
+
+print(f"Testing model: {model}")
+
+# Test 1: Image URL (Llama Vision)
+print("\n--- Test 1: Image URL ---")
+try:
+ image_url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/car.jpg?download=true"
+ messages = [
+ {
+ "role": "user",
+ "content": [
+ {"type": "image_url", "image_url": {"url": image_url}},
+ {"type": "text", "text": "What is in this image?"}
+ ]
+ }
+ ]
+ completion = client.chat.completions.create(
+ model=model,
+ messages=messages,
+ max_tokens=100
+ )
+ print("Response:", completion.choices[0].message.content)
+except Exception as e:
+ print("Image URL failed:", e)
diff --git a/check_llava.py b/check_llava.py
new file mode 100644
index 0000000000000000000000000000000000000000..45f8838ebf1283406699c569e89065ea0ce4f8de
--- /dev/null
+++ b/check_llava.py
@@ -0,0 +1,33 @@
+import os
+from huggingface_hub import InferenceClient
+from dotenv import load_dotenv
+
+load_dotenv()
+
+api_key = os.getenv("HF_TOKEN")
+client = InferenceClient(api_key=api_key)
+model = "llava-hf/llava-1.5-7b-hf"
+
+print(f"Testing model: {model}")
+
+# Test 1: Image URL
+print("\n--- Test 1: Image URL ---")
+try:
+ image_url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/car.jpg?download=true"
+ messages = [
+ {
+ "role": "user",
+ "content": [
+ {"type": "image_url", "image_url": {"url": image_url}},
+ {"type": "text", "text": "What is in this image?"}
+ ]
+ }
+ ]
+ completion = client.chat.completions.create(
+ model=model,
+ messages=messages,
+ max_tokens=100
+ )
+ print("Response:", completion.choices[0].message.content)
+except Exception as e:
+ print("Image URL failed:", e)
diff --git a/check_models.py b/check_models.py
new file mode 100644
index 0000000000000000000000000000000000000000..959f25472fa8d8095ea27d730091b39b1be92b95
--- /dev/null
+++ b/check_models.py
@@ -0,0 +1,15 @@
+import google.generativeai as genai
+import os
+from dotenv import load_dotenv
+
+load_dotenv()
+
+api_key = os.getenv("GEMINI_API_KEY")
+if not api_key:
+ print("No API key found")
+else:
+ genai.configure(api_key=api_key)
+ print("Listing models...")
+ for m in genai.list_models():
+ if 'generateContent' in m.supported_generation_methods:
+ print(m.name)
diff --git a/check_models_list.py b/check_models_list.py
new file mode 100644
index 0000000000000000000000000000000000000000..2f7dd960dc1329afe118c05f897888e57353f001
--- /dev/null
+++ b/check_models_list.py
@@ -0,0 +1,15 @@
+import google.generativeai as genai
+import os
+from dotenv import load_dotenv
+
+load_dotenv()
+
+api_key = os.getenv("GEMINI_API_KEY")
+if not api_key:
+ print("ā API Key not found")
+else:
+ genai.configure(api_key=api_key)
+ print("Listing available models...")
+ for m in genai.list_models():
+ if 'generateContent' in m.supported_generation_methods:
+ print(m.name)
diff --git a/check_qwen.py b/check_qwen.py
new file mode 100644
index 0000000000000000000000000000000000000000..f91c5ec6dd133195b91d402ff082a927939b516f
--- /dev/null
+++ b/check_qwen.py
@@ -0,0 +1,48 @@
+import os
+from huggingface_hub import InferenceClient
+from dotenv import load_dotenv
+
+load_dotenv()
+
+api_key = os.getenv("HF_TOKEN")
+client = InferenceClient(api_key=api_key)
+model = "Qwen/Qwen2-VL-7B-Instruct"
+
+print(f"Testing model: {model}")
+
+# Test 1: Text only
+print("\n--- Test 1: Text Only ---")
+try:
+ messages = [
+ {"role": "user", "content": "Hello, are you working?"}
+ ]
+ completion = client.chat.completions.create(
+ model=model,
+ messages=messages,
+ max_tokens=100
+ )
+ print("Response:", completion.choices[0].message.content)
+except Exception as e:
+ print("Text only failed:", e)
+
+# Test 2: Image (using a public URL to avoid base64 issues first)
+print("\n--- Test 2: Image URL ---")
+try:
+ image_url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/car.jpg?download=true"
+ messages = [
+ {
+ "role": "user",
+ "content": [
+ {"type": "image_url", "image_url": {"url": image_url}},
+ {"type": "text", "text": "What is in this image?"}
+ ]
+ }
+ ]
+ completion = client.chat.completions.create(
+ model=model,
+ messages=messages,
+ max_tokens=100
+ )
+ print("Response:", completion.choices[0].message.content)
+except Exception as e:
+ print("Image URL failed:", e)
diff --git a/check_qwen_raw.py b/check_qwen_raw.py
new file mode 100644
index 0000000000000000000000000000000000000000..45c10fd2848b66ec8c269e8a3fef31daabf88dbc
--- /dev/null
+++ b/check_qwen_raw.py
@@ -0,0 +1,52 @@
+import os
+import requests
+import json
+from dotenv import load_dotenv
+
+load_dotenv()
+
+api_key = os.getenv("HF_TOKEN")
+model = "Qwen/Qwen2-VL-7B-Instruct"
+# Update URL to router
+url = f"https://router.huggingface.co/models/{model}"
+
+headers = {"Authorization": f"Bearer {api_key}"}
+
+print(f"Testing URL: {url}")
+
+# Test 1: Simple text generation payload (inputs string)
+data_text = {
+ "inputs": "Hello",
+ "parameters": {"max_new_tokens": 50}
+}
+print("\n--- Test 1: Text Generation (inputs string) ---")
+response = requests.post(url, headers=headers, json=data_text)
+print(f"Status: {response.status_code}")
+print("Response:", response.text)
+
+# Test 2: VQA format
+data_vqa = {
+ "inputs": {
+ "image": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/car.jpg?download=true",
+ "question": "What is in this image?"
+ }
+}
+print("\n--- Test 2: VQA Format ---")
+response = requests.post(url, headers=headers, json=data_vqa)
+print(f"Status: {response.status_code}")
+print("Response:", response.text)
+
+# Test 3: Chat Completions API (OpenAI style)
+url_chat = f"https://router.huggingface.co/models/{model}/v1/chat/completions"
+print(f"\nTesting URL: {url_chat}")
+data_chat = {
+ "model": model, # Sometimes required in body
+ "messages": [
+ {"role": "user", "content": "Hello"}
+ ],
+ "max_tokens": 50
+}
+print("\n--- Test 3: Chat Completion ---")
+response = requests.post(url_chat, headers=headers, json=data_chat)
+print(f"Status: {response.status_code}")
+print("Response:", response.text)
diff --git a/clean_binary_deploy.py b/clean_binary_deploy.py
new file mode 100644
index 0000000000000000000000000000000000000000..16df127fc6595fc521d36c9cd037911462ac9220
--- /dev/null
+++ b/clean_binary_deploy.py
@@ -0,0 +1,80 @@
+import os
+import subprocess
+
+def main():
+ files_to_delete = [
+ "stitch_stylesync_ai_dashboard.zip",
+ "screen.jpg",
+ "test_image.jpg"
+ ]
+
+ # Delete Local Binaries
+ print("Deleting local binaries...")
+ for filename in files_to_delete:
+ try:
+ os.remove(filename)
+ print(f"Deleted: {filename}")
+ except FileNotFoundError:
+ print(f"File not found (already deleted): {filename}")
+ except Exception as e:
+ print(f"Error deleting {filename}: {e}")
+
+ # Update .gitignore
+ print("Updating .gitignore...")
+ gitignore_path = ".gitignore"
+
+ # Read existing content to avoid duplicate entries
+ existing_ignores = []
+ try:
+ if os.path.exists(gitignore_path):
+ with open(gitignore_path, "r", encoding="utf-8") as f:
+ existing_ignores = f.read().splitlines()
+ except Exception as e:
+ print(f"Warning: Could not read .gitignore: {e}")
+
+ ignores_to_add = ["*.zip", "*.jpg"]
+
+ try:
+ with open(gitignore_path, "a", encoding="utf-8") as f:
+ for ignore in ignores_to_add:
+ if ignore not in existing_ignores:
+ # add a newline if files doesn't end with one, but simplier to just write
+ f.write(f"\n{ignore}\n")
+ print(f"Added '{ignore}' to .gitignore")
+ else:
+ print(f"'{ignore}' already in .gitignore")
+ except Exception as e:
+ print(f"Error updating .gitignore: {e}")
+
+ # The Orphan Branch Strategy
+ print("\nExecuting Git Orphan Branch Strategy...")
+
+ commands = [
+ "git checkout --orphan hf_clean_deploy_v2",
+ "git add .",
+ 'git commit -m "UI Update: Glassmorphism Design & Binary Cleanup"'
+ ]
+
+ for cmd in commands:
+ print(f"Running: {cmd}")
+ subprocess.run(cmd, shell=True, check=False)
+
+ push_cmd = "git push --force space hf_clean_deploy_v2:main"
+ print(f"\nRunning final push command: {push_cmd}")
+
+ result = subprocess.run(push_cmd, shell=True, capture_output=True, text=True)
+
+ # Expected Output
+ print("\n--- Push Command STDOUT ---")
+ print(result.stdout)
+ print("--- Push Command STDERR ---")
+ print(result.stderr)
+ print("---------------------------\n")
+
+ if result.returncode == 0:
+ print("ā
Force push successful!")
+ else:
+ print("ā Force push failed (see STDERR above).")
+
+if __name__ == "__main__":
+ main()
diff --git a/code.html b/code.html
new file mode 100644
index 0000000000000000000000000000000000000000..9104e6fde64fe5508e21d6326bd61d06bbd01df4
--- /dev/null
+++ b/code.html
@@ -0,0 +1,203 @@
+
+
+
+
+StyleSync AI Dashboard
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
Generated Output
+
+
+
+
+
+
+
+
+
+
+
+
+Vision Agent
+Gemini Pro 1.5
+
+
visibility
+
+
+
+
+
+
+Reasoning Agent
+Llama 3 70B
+
+
psychology
+
+
+
+
+
+
+SEO Context
+Pinecone DB
+
+
database
+
+
+
+
+
+
1 {
+2 "product_analysis": {
+3 "title": "Apex Terrain All-Weather Performance Jacket",
+4 "category": "Outerwear / Men's / Technical Shells",
+5 "features": [
+6 "Gore-Tex Pro Membrane",
+7 "Articulated Sleeves",
+8 "Helmet-Compatible Hood"
+9 ],
+10 "seo_tags": [
+11 "#hikinggear", "#waterproof", "#adventure"
+12 ],
+13 "sentiment_score": 0.98,
+14 "market_fit": "High Demand"
+15 },
+16 "deployment_status": "Ready"
+17 }
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/connect_n8n.py b/connect_n8n.py
new file mode 100644
index 0000000000000000000000000000000000000000..229041fe72e0225969167ddb119129031166cb5a
--- /dev/null
+++ b/connect_n8n.py
@@ -0,0 +1,126 @@
+import os
+import subprocess
+
+def update_requirements():
+ req_file = "requirements.txt"
+ if not os.path.exists(req_file):
+ with open(req_file, "w") as f:
+ f.write("httpx\n")
+ print(f"Created {req_file} with httpx.")
+ return
+
+ with open(req_file, "r") as f:
+ content = f.read()
+
+ if "httpx" not in content:
+ with open(req_file, "a") as f:
+ f.write("\nhttpx\n")
+ print("Appended httpx to requirements.txt.")
+ else:
+ print("httpx already in requirements.txt.")
+
+def update_main():
+ main_content = r'''import os
+import httpx
+import asyncio
+from fastapi import FastAPI, UploadFile, File
+from fastapi.responses import HTMLResponse, JSONResponse
+from dotenv import load_dotenv
+# Import Agents
+from agents.visual_analyst import VisualAnalyst
+from agents.memory_agent import MemoryAgent
+from agents.writer_agent import WriterAgent
+load_dotenv()
+app = FastAPI()
+# Initialize Agents
+try:
+ visual_agent = VisualAnalyst()
+ memory_agent = MemoryAgent()
+ writer_agent = WriterAgent()
+ memory_agent.seed_database()
+ print("ā
All Agents Online")
+except Exception as e:
+ print(f"ā ļø Agent Startup Warning: {e}")
+@app.get("/", response_class=HTMLResponse)
+async def read_root():
+ try:
+ with open("dashboard.html", "r") as f:
+ return f.read()
+ except FileNotFoundError:
+ return "Error: dashboard.html not found
"
+@app.post("/generate-catalog")
+async def generate_catalog(file: UploadFile = File(...)):
+ try:
+ # 1. Save Temp File
+ os.makedirs("uploads", exist_ok=True)
+ file_path = f"uploads/{file.filename}"
+ with open(file_path, "wb") as f:
+ f.write(await file.read())
+ # 2. Run AI Pipeline
+ visual_data = await visual_agent.analyze_image(file_path)
+
+ query = f"{visual_data.get('main_color', '')} {visual_data.get('product_type', 'product')}"
+ seo_keywords = memory_agent.retrieve_keywords(query)
+
+ listing = writer_agent.write_listing(visual_data, seo_keywords)
+
+ # 3. Construct Final Payload
+ final_data = {
+ "visual_data": visual_data,
+ "seo_keywords": seo_keywords,
+ "listing": listing
+ }
+ # 4. ā” N8N AUTOMATION TRIGGER ā”
+ n8n_url = os.getenv("N8N_WEBHOOK_URL")
+ if n8n_url:
+ print(f"š Sending data to N8N: {n8n_url}")
+ # Fire and forget (don't make the user wait for n8n)
+ asyncio.create_task(send_to_n8n(n8n_url, final_data))
+
+ # Cleanup
+ if os.path.exists(file_path):
+ os.remove(file_path)
+
+ return JSONResponse(content=final_data)
+ except Exception as e:
+ return JSONResponse(content={"error": str(e)}, status_code=500)
+# Async Helper to send data without blocking
+async def send_to_n8n(url, data):
+ try:
+ async with httpx.AsyncClient() as client:
+ await client.post(url, json=data, timeout=5.0)
+ print("ā
N8N Webhook Sent Successfully")
+ except Exception as e:
+ print(f"ā N8N Webhook Failed: {e}")
+if __name__ == "__main__":
+ import uvicorn
+ uvicorn.run(app, host="0.0.0.0", port=7860)
+'''
+ with open("main.py", "w", encoding="utf-8") as f:
+ f.write(main_content)
+ print("Updated main.py with N8N integration logic.")
+
+def deploy():
+ try:
+ subprocess.run(["git", "add", "."], check=True)
+ # Check if there are changes to commit
+ status = subprocess.run(["git", "status", "--porcelain"], capture_output=True, text=True)
+ if status.stdout.strip():
+ subprocess.run(["git", "commit", "-m", "Add N8N Integration"], check=True)
+ print("Git commit successful.")
+ else:
+ print("No changes to commit.")
+
+ print("Pushing to space...")
+ subprocess.run(["git", "push", "space", "clean_deploy:main"], check=True)
+ print("ā
Successfully deployed to Hugging Face Space.")
+
+ except subprocess.CalledProcessError as e:
+ print(f"ā Deployment failed: {e}")
+
+if __name__ == "__main__":
+ print("Starting N8N Integration Setup...")
+ update_requirements()
+ update_main()
+ deploy()
+ print("ā
connect_n8n.py completed.")
diff --git a/create_dockerfile.py b/create_dockerfile.py
new file mode 100644
index 0000000000000000000000000000000000000000..03ef0f80ff9e027e30a57aef41a8c93db4f1a38d
--- /dev/null
+++ b/create_dockerfile.py
@@ -0,0 +1,50 @@
+import subprocess
+import sys
+
+def run_command(command):
+ print(f"Running: {command}")
+ try:
+ # shell=True allows us to run the command string exactly as provided
+ subprocess.run(command, shell=True, check=True)
+ except subprocess.CalledProcessError as e:
+ print(f"Error executing command '{command}': {e}")
+ sys.exit(1)
+
+def main():
+ # 1. Create Dockerfile
+ dockerfile_content = """FROM python:3.9
+WORKDIR /code
+COPY ./requirements.txt /code/requirements.txt
+RUN pip install --no-cache-dir --upgrade -r /code/requirements.txt
+COPY . /code
+# Fix permissions for libraries that write to home
+RUN mkdir -p /tmp/home
+ENV HOME=/tmp/home
+# Start the FastAPI server on port 7860 (required by Hugging Face)
+CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "7860"]
+"""
+
+ print("Creating Dockerfile...")
+ try:
+ with open("Dockerfile", "w", newline='\n') as f:
+ f.write(dockerfile_content)
+ print("Dockerfile created successfully.")
+ except Exception as e:
+ print(f"Failed to create Dockerfile: {e}")
+ sys.exit(1)
+
+ # 2. Push to Space
+ print("Executing Git commands...")
+ commands = [
+ 'git add Dockerfile',
+ 'git commit -m "Add Dockerfile for Hugging Face deployment"',
+ 'git push -f space clean_deploy:main'
+ ]
+
+ for cmd in commands:
+ run_command(cmd)
+
+ print("\ncreate_dockerfile.py execution completed.")
+
+if __name__ == "__main__":
+ main()
diff --git a/dashboard.html b/dashboard.html
new file mode 100644
index 0000000000000000000000000000000000000000..55ec7781ef3b2ebe6493927756e5a8400c36ae24
--- /dev/null
+++ b/dashboard.html
@@ -0,0 +1,397 @@
+
+
+
+
+StyleSync AI Dashboard
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
Input Data
+Step 1 of 2
+
+
+
+
+
+
+cloud_upload
+
+
+
Drop Product Image Here
+
Supports JPG, PNG, WEBP
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
Generated Output
+
+
Step 2 of 2
+
+
+
+
+
+
+
+
+
+
+
+
+Vision Agent
+Gemini Pro 1.5
+
+
+
+
+
+
+
+Reasoning Agent
+Llama 3 70B
+
+
+
+
+
+
+
+SEO Context
+Pinecone DB
+
+
+
+
+
+
+
+code
+ output.json
+
+
+
+
+
01{
+02 "product_analysis": {
+03 "title": "Noir Elite Series Artisan Timepiece",
+04 "category": "Luxury / Accessories",
+05 "features": [
+06 "Obsidian Finish",
+07 "Golden Accents",
+08 "Smart Haptic Interface"
+09 ],
+10 "seo_tags": [
+11 "#luxurywear", "#amberstyle", "#premiumtech"
+12 ],
+13 "sentiment_score": 0.99,
+14 "market_fit": "Exceptional"
+15 },
+16 "deployment_status": "Authorized"
+17}
+
+
+
+
+
+
+
+
+
+
diff --git a/deploy_new_ui.py b/deploy_new_ui.py
new file mode 100644
index 0000000000000000000000000000000000000000..ac072140ef156dce97d09228046f6d5ffdd5c4a3
--- /dev/null
+++ b/deploy_new_ui.py
@@ -0,0 +1,60 @@
+import re
+import subprocess
+import sys
+
+def main():
+ glassui_path = "glassui.html"
+ dashboard_path = "dashboard.html"
+
+ # Read the New Design
+ try:
+ with open(glassui_path, 'r', encoding='utf-8') as f:
+ glassui_content = f.read()
+ except FileNotFoundError:
+ print(f"Error: Could not find '{glassui_path}'.")
+ sys.exit(1)
+
+ # The Safety Scan (Crucial)
+ required_ids = [
+ "dropZone",
+ "fileInput",
+ "browseBtn",
+ "startBtn",
+ "deployBtn",
+ "jsonOutput",
+ "copyBtn",
+ "downloadBtn"
+ ]
+
+ for element_id in required_ids:
+ # Check if id="element_id" or id='element_id' exists
+ pattern = rf'id\s*=\s*[\'"]{element_id}[\'"]'
+ if not re.search(pattern, glassui_content):
+ print(f"ā ļø WARNING: Missing ID {element_id}")
+
+ # Overwrite
+ try:
+ with open(dashboard_path, 'w', encoding='utf-8') as f:
+ f.write(glassui_content)
+ print(f"Successfully copied '{glassui_path}' to '{dashboard_path}'.")
+ except Exception as e:
+ print(f"Error overwriting '{dashboard_path}': {e}")
+ sys.exit(1)
+
+ # Deploy
+ print("Deploying to Hugging Face...")
+ git_commands = [
+ ["git", "add", "dashboard.html"],
+ ["git", "commit", "-m", "UI Update: Apply responsive Glassmorphism design from Stitch"],
+ ["git", "push", "space", "clean_deploy:main"]
+ ]
+
+ for cmd in git_commands:
+ print(f"Running: {' '.join(cmd)}")
+ # Use encoding='utf-8' as strictly requested
+ result = subprocess.run(cmd, text=True, encoding='utf-8')
+ if result.returncode != 0:
+ print(f"Command failed: {' '.join(cmd)}")
+
+if __name__ == "__main__":
+ main()
diff --git a/final_deploy_push.py b/final_deploy_push.py
new file mode 100644
index 0000000000000000000000000000000000000000..e7a346790c855f278cd154fd99622f4818eef4a2
--- /dev/null
+++ b/final_deploy_push.py
@@ -0,0 +1,20 @@
+import subprocess
+import sys
+
+# Force UTF-8 output for Windows terminals
+sys.stdout.reconfigure(encoding='utf-8')
+
+def deploy():
+ print("ā ļø Ensure you are inside the D:\\Projects\\StyleSync AI directory before running this!")
+
+ command = "git push --force space clean_deploy:main"
+ print(f"\nRunning: {command} ...")
+
+ try:
+ subprocess.run(command, check=True, shell=True)
+ print("\nā
Successfully pushed to Space!")
+ except subprocess.CalledProcessError as e:
+ print(f"\nā Push failed: {e}")
+
+if __name__ == "__main__":
+ deploy()
diff --git a/final_upload.py b/final_upload.py
new file mode 100644
index 0000000000000000000000000000000000000000..eae0ea28ec668c44afef4f19a75744e01fa7179f
--- /dev/null
+++ b/final_upload.py
@@ -0,0 +1,61 @@
+import subprocess
+import sys
+
+def run_command(command, check=True):
+ try:
+ subprocess.run(command, check=check, shell=True, text=True)
+ except subprocess.CalledProcessError as e:
+ print(f"Error executing currently: {command}")
+ # We don't exit here because some commands like 'remote remove' might fail meaningfully but we want to continue,
+ # or we handle them specifically in the main flow.
+ if check:
+ # Re-raise if we strictly wanted this to succeed
+ raise e
+
+def main():
+ # Input: Ask the user for the GitHub URL
+ if len(sys.argv) > 1:
+ github_url = sys.argv[1].strip()
+ else:
+ github_url = input('Please paste your GitHub URL here: ').strip()
+
+ if not github_url:
+ print("Error: No URL provided.")
+ return
+
+ try:
+ # Git Commands sequence
+ print("Initializing git...")
+ run_command("git init")
+
+ print("Adding files...")
+ run_command("git add .")
+
+ print("Committing files...")
+ try:
+ # Use check=True so it raises exception on failure, which we catch
+ run_command('git commit -m "Initial commit - StyleSync AI"', check=True)
+ except subprocess.CalledProcessError:
+ print("Commit failed (likely nothing to commit). Continuing...")
+
+ print("Renaming branch to main...")
+ run_command("git branch -M main")
+
+ print("Removing existing origin (if any)...")
+ # Don't check=True here because it fails if origin doesn't exist
+ run_command("git remote remove origin", check=False)
+
+ print(f"Adding remote origin: {github_url}")
+ run_command(f"git remote add origin {github_url}")
+
+ print("Pushing to GitHub...")
+ run_command("git push -u origin main")
+
+ # Success message
+ print('ā
Code is live on GitHub!')
+
+ except Exception as e:
+ print(f"\nā An error occurred: {e}")
+
+if __name__ == "__main__":
+ main()
diff --git a/fix_browse_button.py b/fix_browse_button.py
new file mode 100644
index 0000000000000000000000000000000000000000..8c6d43e8d99bfed44c4863798a0219b956b9cc67
--- /dev/null
+++ b/fix_browse_button.py
@@ -0,0 +1,57 @@
+import sys
+import subprocess
+import re
+
+def fix_html():
+ with open('dashboard.html', 'r', encoding='utf-8') as f:
+ content = f.read()
+
+ js_snippet = """
+ const browseBtn = document.getElementById('browseBtn');
+ browseBtn.addEventListener('click', (e) => {
+ e.preventDefault();
+ fileInput.click();
+ });
+ fileInput.addEventListener('change', () => {
+ if (fileInput.files.length > 0) {
+ // Provide a visual cue that a file was selected
+ const fileName = fileInput.files[0].name;
+ browseBtn.innerHTML = `check_circle ${fileName}`;
+ }
+ });
+"""
+
+ if "browseBtn.addEventListener('click', (e) => {" in content and "Provide a visual cue that a file was selected" in content:
+ print("Snippet already exists. Skipping injection.")
+ else:
+ # Find where the DOM elements are defined
+ target = "const downloadBtn = document.getElementById('downloadBtn');"
+ if target in content:
+ new_content = content.replace(target, target + "\n" + js_snippet)
+ with open('dashboard.html', 'w', encoding='utf-8') as f:
+ f.write(new_content)
+ print("Injected JS successfully.")
+ else:
+ print("Could not find insertion point!")
+ return False
+
+ # Run git commands
+ subprocess.run(['git', 'add', 'dashboard.html'], check=True)
+ try:
+ subprocess.run(['git', 'commit', '-m', 'Bugfix: Wire up Browse Files button to hidden input'], check=True)
+ except subprocess.CalledProcessError:
+ print("Nothing to commit")
+
+ subprocess.run(['git', 'push', '--force', 'space', 'HEAD:main'], check=True)
+
+ # Push to origin main as well
+ try:
+ subprocess.run(['git', 'push', 'origin', 'HEAD:main'], check=True)
+ except subprocess.CalledProcessError:
+ print("Push to origin failed or not needed")
+
+ print("Deployment triggered successfully.")
+ return True
+
+if __name__ == '__main__':
+ fix_html()
diff --git a/fix_dashboard_api.py b/fix_dashboard_api.py
new file mode 100644
index 0000000000000000000000000000000000000000..274ad87e08285a9ffd3b8bc6a496d0b91b51adb5
--- /dev/null
+++ b/fix_dashboard_api.py
@@ -0,0 +1,66 @@
+import sys
+import subprocess
+import re
+
+def patch_dashboard():
+ with open('dashboard.html', 'r', encoding='utf-8') as f:
+ content = f.read()
+
+ # Locate the setTimeout block inside startBtn.addEventListener and replace it with fetch logic
+ pattern = re.compile(r"setTimeout\(\(\) => \{[\s\S]*?\}, 1500\);", re.DOTALL)
+
+ new_block = """try {
+ const formData = new FormData();
+ formData.append('file', fileInput.files[0]);
+
+ const response = await fetch('/generate-catalog', {
+ method: 'POST',
+ body: formData
+ });
+
+ const data = await response.json();
+ jsonOutput.textContent = JSON.stringify(data, null, 2);
+ isCatalogGenerated = true;
+ } catch (error) {
+ console.error("Error generating catalog:", error);
+ } finally {
+ startBtn.innerHTML = 'Start Agent Workflowarrow_forward
';
+ startBtn.disabled = false;
+ startBtn.classList.add('animate-pulse-slow', 'animate-glow-pulse');
+ }"""
+
+ if not pattern.search(content):
+ print("Error: Could not find the target setTimeout block in dashboard.html.")
+ return False
+
+ new_content = pattern.sub(new_block, content)
+
+ with open('dashboard.html', 'w', encoding='utf-8') as f:
+ f.write(new_content)
+
+ print("Successfully patched dashboard.html")
+ return True
+
+def run_git_commands():
+ commands = [
+ ['git', 'add', 'dashboard.html'],
+ ['git', 'commit', '-m', 'Bugfix: Restore real API connection to Glassmorphism UI'],
+ ['git', 'push', '--force', 'space', 'HEAD:main']
+ ]
+
+ for cmd in commands:
+ print(f"Running: {' '.join(cmd)}")
+ result = subprocess.run(cmd, capture_output=True, text=True)
+ if result.returncode != 0:
+ print(f"Command failed with {result.returncode}: \\n{result.stderr}")
+ # Don't break here, let it try the other commands just in case, though push might fail if commit fails.
+ if cmd[1] == 'commit' and "nothing to commit" in result.stdout + result.stderr:
+ continue
+ if cmd[1] == 'push':
+ pass
+ else:
+ print(f"Success!\\n{result.stdout}")
+
+if __name__ == '__main__':
+ if patch_dashboard():
+ run_git_commands()
diff --git a/fix_dashboard_routing.py b/fix_dashboard_routing.py
new file mode 100644
index 0000000000000000000000000000000000000000..d1451fc81ac97f14e06a5387ada14e2d88cfef77
--- /dev/null
+++ b/fix_dashboard_routing.py
@@ -0,0 +1,87 @@
+import os
+import subprocess
+
+def main():
+ # Define the content for main.py
+ main_py_content = """import os
+from fastapi import FastAPI, UploadFile, File, HTTPException
+from fastapi.responses import HTMLResponse, JSONResponse
+from fastapi.staticfiles import StaticFiles
+from agents.visual_analyst import VisualAnalyst
+from dotenv import load_dotenv
+# Load environment variables
+load_dotenv()
+app = FastAPI()
+# Initialize Agent
+visual_agent = VisualAnalyst()
+# 1. READ THE DASHBOARD HTML FILE INTO MEMORY
+try:
+ with open("dashboard.html", "r") as f:
+ dashboard_html = f.read()
+except FileNotFoundError:
+ dashboard_html = "Error: dashboard.html not found. Please ensure the file exists.
"
+# 2. SERVE DASHBOARD AT ROOT (Home Page)
+@app.get("/", response_class=HTMLResponse)
+async def read_root():
+ return dashboard_html
+# 3. KEEP /dashboard ROUTE AS BACKUP
+@app.get("/dashboard", response_class=HTMLResponse)
+async def read_dashboard():
+ return dashboard_html
+@app.post("/analyze")
+async def analyze_merch(file: UploadFile = File(...)):
+ try:
+ os.makedirs("uploads", exist_ok=True)
+ file_path = f"uploads/{file.filename}"
+ with open(file_path, "wb") as f:
+ f.write(await file.read())
+ result = await visual_agent.analyze_image(file_path)
+
+ if os.path.exists(file_path):
+ os.remove(file_path)
+
+ return JSONResponse(content=result)
+ except Exception as e:
+ return JSONResponse(content={"error": str(e)}, status_code=500)
+if __name__ == "__main__":
+ import uvicorn
+ uvicorn.run(app, host="0.0.0.0", port=7860)
+"""
+
+ # Overwrite main.py
+ print("Overwriting main.py...")
+ try:
+ with open("main.py", "w", encoding="utf-8") as f:
+ f.write(main_py_content)
+ print("Successfully updated main.py")
+ except Exception as e:
+ print(f"Error writing main.py: {e}")
+ return
+
+ # Define git commands
+ git_commands = [
+ ["git", "add", "main.py"],
+ ["git", "commit", "-m", "Fix dashboard 404 by serving HTML at root"],
+ ["git", "push", "space", "clean_deploy:main"]
+ ]
+
+ # Run git commands
+ print("\nRunning git commands...")
+ for cmd in git_commands:
+ print(f"Executing: {' '.join(cmd)}")
+ try:
+ subprocess.run(cmd, check=True)
+ except subprocess.CalledProcessError as e:
+ print(f"Command failed: {e}")
+ # If commit fails (e.g. nothing to commit), we might want to continue or stop.
+ # But push should definitely happen if commit works.
+ # If commit fails because "nothing to commit, working tree clean", push might still be relevant if previous commit wasn't pushed?
+ # But the user logic implies we just made a change to main.py, so commit should succeed unless main.py was ALREADY this content.
+ # We will continue to try push even if commit fails, just in case.
+ # But wait, if commit fails, push might proceed.
+ pass
+
+ print("\nfix_dashboard_routing.py completed.")
+
+if __name__ == "__main__":
+ main()
diff --git a/fix_google_key.py b/fix_google_key.py
new file mode 100644
index 0000000000000000000000000000000000000000..cdabb024b499d627225436f7f41e66ecc88d51e3
--- /dev/null
+++ b/fix_google_key.py
@@ -0,0 +1,48 @@
+import os
+import sys
+
+# Force UTF-8 output for Windows terminals
+sys.stdout.reconfigure(encoding='utf-8')
+
+# 1. Update .env
+env_path = ".env"
+key = "GOOGLE_API_KEY"
+value = "AIzaSyDgIkagGBciWNZDTn07OlfY9tVPvo6KJ1on"
+
+print(f"Updating {key} in .env...")
+
+lines = []
+if os.path.exists(env_path):
+ with open(env_path, "r", encoding="utf-8") as f:
+ lines = f.readlines()
+
+found = False
+new_lines = []
+for line in lines:
+ if line.startswith(f"{key}="):
+ new_lines.append(f"{key}={value}\n")
+ found = True
+ else:
+ new_lines.append(line)
+
+if not found:
+ if new_lines and not new_lines[-1].endswith('\n'):
+ new_lines.append('\n')
+ new_lines.append(f"{key}={value}\n")
+
+with open(env_path, "w", encoding="utf-8") as f:
+ f.writelines(new_lines)
+
+print(f"ā
Updated {key} in .env")
+
+# 2. Upload to Cloud
+print("Syncing secrets to Hugging Face Space...")
+try:
+ # Build path to ensure we can import upload_secrets
+ sys.path.append(os.getcwd())
+ from upload_secrets import upload_secrets
+
+ upload_secrets()
+ print("ā
Google Key saved locally and uploaded to Hugging Face!")
+except Exception as e:
+ print(f"ā Failed to sync: {e}")
diff --git a/fix_js_syntax.py b/fix_js_syntax.py
new file mode 100644
index 0000000000000000000000000000000000000000..95d0f2eaa378687ade1abf854eea651a17deef13
--- /dev/null
+++ b/fix_js_syntax.py
@@ -0,0 +1,147 @@
+import sys
+import subprocess
+
+def fix_html():
+ with open('dashboard.html', 'r', encoding='utf-8') as f:
+ content = f.read()
+
+ parts = content.split('')
+ if len(parts) < 2:
+ print("Could not find in dashboard.html")
+ return False
+
+ top_part = parts[0] + '\n'
+
+ new_script = """
+