Spaces:
Sleeping
Sleeping
fix: audit fixes per assignment spec
Browse files- .env.example +4 -2
- README.md +5 -4
- agent/nodes.py +4 -3
- agent/tools.py +3 -5
- demo.py +0 -1
.env.example
CHANGED
|
@@ -2,12 +2,14 @@
|
|
| 2 |
# Copy this file to .env and fill in ONE provider key.
|
| 3 |
|
| 4 |
# --- LLM Provider (pick one) ---
|
| 5 |
-
|
|
|
|
| 6 |
# OPENAI_API_KEY=sk-... # GPT-4o-mini
|
| 7 |
# GOOGLE_API_KEY=... # Gemini 1.5 Flash
|
| 8 |
|
| 9 |
# --- Optional Overrides ---
|
| 10 |
-
# LLM_PROVIDER=
|
|
|
|
| 11 |
# ANTHROPIC_MODEL=claude-3-haiku-20240307
|
| 12 |
# OPENAI_MODEL=gpt-4o-mini
|
| 13 |
# GOOGLE_MODEL=gemini-1.5-flash
|
|
|
|
| 2 |
# Copy this file to .env and fill in ONE provider key.
|
| 3 |
|
| 4 |
# --- LLM Provider (pick one) ---
|
| 5 |
+
GROQ_API_KEY=gsk_... # Groq (llama-3.3-70b-versatile, fast + free)
|
| 6 |
+
# ANTHROPIC_API_KEY=sk-ant-... # Claude 3 Haiku
|
| 7 |
# OPENAI_API_KEY=sk-... # GPT-4o-mini
|
| 8 |
# GOOGLE_API_KEY=... # Gemini 1.5 Flash
|
| 9 |
|
| 10 |
# --- Optional Overrides ---
|
| 11 |
+
# LLM_PROVIDER=groq # Force: groq | anthropic | openai | google
|
| 12 |
+
# GROQ_MODEL=llama-3.3-70b-versatile
|
| 13 |
# ANTHROPIC_MODEL=claude-3-haiku-20240307
|
| 14 |
# OPENAI_MODEL=gpt-4o-mini
|
| 15 |
# GOOGLE_MODEL=gemini-1.5-flash
|
README.md
CHANGED
|
@@ -25,7 +25,7 @@ Built for the ServiceHive / Inflx ML Intern assignment: _Social-to-Lead Agentic
|
|
| 25 |
| Lead qualification | Progressive field collection (name → email → platform) |
|
| 26 |
| Tool execution guard | `mock_lead_capture` fires **only** after all 3 fields collected |
|
| 27 |
| State persistence | Full conversation history via LangGraph `AgentState` across turns |
|
| 28 |
-
| Multi-LLM support | Claude 3 Haiku · GPT-4o-mini · Gemini
|
| 29 |
|
| 30 |
---
|
| 31 |
|
|
@@ -44,9 +44,10 @@ pip install -r requirements.txt
|
|
| 44 |
```bash
|
| 45 |
cp .env.example .env
|
| 46 |
# Edit .env — add ONE of:
|
| 47 |
-
#
|
|
|
|
| 48 |
# OPENAI_API_KEY=sk-... ← GPT-4o-mini
|
| 49 |
-
# GOOGLE_API_KEY=... ← Gemini
|
| 50 |
```
|
| 51 |
|
| 52 |
### 3. Run
|
|
@@ -82,7 +83,7 @@ You: alex@example.com
|
|
| 82 |
Agent: Almost there! Which creator platform are you primarily on?
|
| 83 |
|
| 84 |
You: YouTube
|
| 85 |
-
|
| 86 |
|
| 87 |
Agent: You're all set, Alex! Our team will reach out to alex@example.com shortly.
|
| 88 |
Start your free trial at autostream.io/signup
|
|
|
|
| 25 |
| Lead qualification | Progressive field collection (name → email → platform) |
|
| 26 |
| Tool execution guard | `mock_lead_capture` fires **only** after all 3 fields collected |
|
| 27 |
| State persistence | Full conversation history via LangGraph `AgentState` across turns |
|
| 28 |
+
| Multi-LLM support | Groq llama-3.3-70b (default) · Claude 3 Haiku · GPT-4o-mini · Gemini 2.0 Flash |
|
| 29 |
|
| 30 |
---
|
| 31 |
|
|
|
|
| 44 |
```bash
|
| 45 |
cp .env.example .env
|
| 46 |
# Edit .env — add ONE of:
|
| 47 |
+
# GROQ_API_KEY=gsk_... ← llama-3.3-70b-versatile (default, free + fast)
|
| 48 |
+
# ANTHROPIC_API_KEY=sk-ant-... ← Claude 3 Haiku
|
| 49 |
# OPENAI_API_KEY=sk-... ← GPT-4o-mini
|
| 50 |
+
# GOOGLE_API_KEY=... ← Gemini 2.0 Flash
|
| 51 |
```
|
| 52 |
|
| 53 |
### 3. Run
|
|
|
|
| 83 |
Agent: Almost there! Which creator platform are you primarily on?
|
| 84 |
|
| 85 |
You: YouTube
|
| 86 |
+
Lead captured successfully: Alex Johnson, alex@example.com, YouTube
|
| 87 |
|
| 88 |
Agent: You're all set, Alex! Our team will reach out to alex@example.com shortly.
|
| 89 |
Start your free trial at autostream.io/signup
|
agent/nodes.py
CHANGED
|
@@ -137,12 +137,13 @@ def generate_response(state: AgentState) -> dict:
|
|
| 137 |
intent = state.get("intent", "unknown")
|
| 138 |
collecting = state.get("collecting_lead", False)
|
| 139 |
|
| 140 |
-
# Build KB context block
|
| 141 |
-
|
|
|
|
| 142 |
kb_context = (
|
| 143 |
f"\nKnowledge Base Context:\n{ctx}\n"
|
| 144 |
if ctx
|
| 145 |
-
else "\n[No
|
| 146 |
)
|
| 147 |
|
| 148 |
# Build lead collection status block
|
|
|
|
| 137 |
intent = state.get("intent", "unknown")
|
| 138 |
collecting = state.get("collecting_lead", False)
|
| 139 |
|
| 140 |
+
# Build KB context block — only inject when current turn is an inquiry.
|
| 141 |
+
# Prevents stale context from leaking into greeting / collecting turns.
|
| 142 |
+
ctx = state.get("rag_context", "") if intent == "inquiry" else ""
|
| 143 |
kb_context = (
|
| 144 |
f"\nKnowledge Base Context:\n{ctx}\n"
|
| 145 |
if ctx
|
| 146 |
+
else "\n[No KB context for this turn.]\n"
|
| 147 |
)
|
| 148 |
|
| 149 |
# Build lead collection status block
|
agent/tools.py
CHANGED
|
@@ -1,5 +1,3 @@
|
|
| 1 |
-
def mock_lead_capture(name: str, email: str, platform: str) ->
|
| 2 |
-
"""Mock lead capture API
|
| 3 |
-
|
| 4 |
-
print(f"\n[LEAD CAPTURE] {msg}")
|
| 5 |
-
return msg
|
|
|
|
| 1 |
+
def mock_lead_capture(name: str, email: str, platform: str) -> None:
|
| 2 |
+
"""Mock lead capture API — matches assignment spec exactly."""
|
| 3 |
+
print(f"Lead captured successfully: {name}, {email}, {platform}")
|
|
|
|
|
|
demo.py
CHANGED
|
@@ -5,7 +5,6 @@ Run: python demo.py
|
|
| 5 |
"""
|
| 6 |
from __future__ import annotations
|
| 7 |
import time
|
| 8 |
-
import os
|
| 9 |
from dotenv import load_dotenv
|
| 10 |
|
| 11 |
load_dotenv()
|
|
|
|
| 5 |
"""
|
| 6 |
from __future__ import annotations
|
| 7 |
import time
|
|
|
|
| 8 |
from dotenv import load_dotenv
|
| 9 |
|
| 10 |
load_dotenv()
|