DevelopedBy-Siva commited on
Commit
83fe4f9
·
1 Parent(s): 7e6a910

setup the initial app and deploy

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .dockerignore +10 -0
  2. .env +7 -0
  3. .env.example +7 -0
  4. .github/workflows/deploy.yml +61 -0
  5. Dockerfile +14 -0
  6. README.md +78 -1
  7. backend/__init__.py +1 -0
  8. backend/__pycache__/__init__.cpython-311.pyc +0 -0
  9. backend/__pycache__/__init__.cpython-312.pyc +0 -0
  10. backend/__pycache__/config.cpython-311.pyc +0 -0
  11. backend/__pycache__/config.cpython-312.pyc +0 -0
  12. backend/__pycache__/main.cpython-311.pyc +0 -0
  13. backend/__pycache__/main.cpython-312.pyc +0 -0
  14. backend/ai/__init__.py +1 -0
  15. backend/ai/__pycache__/__init__.cpython-311.pyc +0 -0
  16. backend/ai/__pycache__/__init__.cpython-312.pyc +0 -0
  17. backend/ai/__pycache__/analyzer.cpython-311.pyc +0 -0
  18. backend/ai/__pycache__/analyzer.cpython-312.pyc +0 -0
  19. backend/ai/__pycache__/classifier.cpython-311.pyc +0 -0
  20. backend/ai/__pycache__/classifier.cpython-312.pyc +0 -0
  21. backend/ai/__pycache__/client.cpython-311.pyc +0 -0
  22. backend/ai/__pycache__/client.cpython-312.pyc +0 -0
  23. backend/ai/__pycache__/composer.cpython-311.pyc +0 -0
  24. backend/ai/__pycache__/composer.cpython-312.pyc +0 -0
  25. backend/ai/__pycache__/extractor.cpython-311.pyc +0 -0
  26. backend/ai/__pycache__/extractor.cpython-312.pyc +0 -0
  27. backend/ai/__pycache__/prompts.cpython-311.pyc +0 -0
  28. backend/ai/__pycache__/prompts.cpython-312.pyc +0 -0
  29. backend/ai/__pycache__/workflow_builder.cpython-311.pyc +0 -0
  30. backend/ai/__pycache__/workflow_builder.cpython-312.pyc +0 -0
  31. backend/ai/__pycache__/workflow_suggester.cpython-311.pyc +0 -0
  32. backend/ai/__pycache__/workflow_suggester.cpython-312.pyc +0 -0
  33. backend/ai/analyzer.py +113 -0
  34. backend/ai/classifier.py +27 -0
  35. backend/ai/client.py +80 -0
  36. backend/ai/composer.py +28 -0
  37. backend/ai/extractor.py +29 -0
  38. backend/ai/prompts.py +313 -0
  39. backend/ai/workflow_builder.py +142 -0
  40. backend/ai/workflow_suggester.py +72 -0
  41. backend/api/__init__.py +1 -0
  42. backend/api/__pycache__/__init__.cpython-311.pyc +0 -0
  43. backend/api/__pycache__/__init__.cpython-312.pyc +0 -0
  44. backend/api/__pycache__/middleware.cpython-312.pyc +0 -0
  45. backend/api/__pycache__/routes.cpython-311.pyc +0 -0
  46. backend/api/__pycache__/routes.cpython-312.pyc +0 -0
  47. backend/api/middleware.py +6 -0
  48. backend/api/routes.py +121 -0
  49. backend/config.py +32 -0
  50. backend/engine/__init__.py +5 -0
.dockerignore ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ .git
2
+ .github
3
+ .pytest_cache
4
+ __pycache__
5
+ *.pyc
6
+ .env
7
+ .env.example
8
+ extension
9
+ tests
10
+ flowpilot-vertex-key.json
.env ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ AI_PROVIDER=vertex_ai
2
+ ALLOW_ORIGINS=*
3
+ GMAIL_POLL_SECONDS=30
4
+ VERTEX_PROJECT_ID=flow-pilot-493104
5
+ VERTEX_LOCATION=us-central1
6
+ VERTEX_MODEL=gemini-2.5-pro
7
+ GOOGLE_APPLICATION_CREDENTIALS=
.env.example ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ AI_PROVIDER=vertex_ai
2
+ ALLOW_ORIGINS=*
3
+ GMAIL_POLL_SECONDS=30
4
+ VERTEX_PROJECT_ID=your-gcp-project-id
5
+ VERTEX_LOCATION=us-central1
6
+ VERTEX_MODEL=gemini-2.5-pro
7
+ GOOGLE_APPLICATION_CREDENTIALS=
.github/workflows/deploy.yml ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Deploy Backend
2
+
3
+ on:
4
+ push:
5
+ branches:
6
+ - main
7
+ workflow_dispatch:
8
+
9
+ env:
10
+ PROJECT_ID: flow-pilot-493104
11
+ PROJECT_NUMBER: "707525727191"
12
+ REGION: us-central1
13
+ SERVICE: flowpilot-backend
14
+ REPOSITORY: flowpilot
15
+ IMAGE: backend
16
+ WIF_PROVIDER: projects/707525727191/locations/global/workloadIdentityPools/github-pool/providers/github-provider
17
+ SERVICE_ACCOUNT: github-deployer@flow-pilot-493104.iam.gserviceaccount.com
18
+
19
+ jobs:
20
+ deploy:
21
+ runs-on: ubuntu-latest
22
+ permissions:
23
+ contents: read
24
+ id-token: write
25
+
26
+ steps:
27
+ - name: Checkout
28
+ uses: actions/checkout@v4
29
+
30
+ - name: Authenticate to Google Cloud
31
+ uses: google-github-actions/auth@v3
32
+ with:
33
+ workload_identity_provider: ${{ env.WIF_PROVIDER }}
34
+ service_account: ${{ env.SERVICE_ACCOUNT }}
35
+
36
+ - name: Setup gcloud
37
+ uses: google-github-actions/setup-gcloud@v2
38
+
39
+ - name: Configure Docker auth
40
+ run: gcloud auth configure-docker ${{ env.REGION }}-docker.pkg.dev --quiet
41
+
42
+ - name: Build image
43
+ run: |
44
+ docker build \
45
+ -t ${{ env.REGION }}-docker.pkg.dev/${{ env.PROJECT_ID }}/${{ env.REPOSITORY }}/${{ env.IMAGE }}:${{ github.sha }} \
46
+ .
47
+
48
+ - name: Push image
49
+ run: |
50
+ docker push \
51
+ ${{ env.REGION }}-docker.pkg.dev/${{ env.PROJECT_ID }}/${{ env.REPOSITORY }}/${{ env.IMAGE }}:${{ github.sha }}
52
+
53
+ - name: Deploy to Cloud Run
54
+ uses: google-github-actions/deploy-cloudrun@v3
55
+ with:
56
+ service: ${{ env.SERVICE }}
57
+ region: ${{ env.REGION }}
58
+ image: ${{ env.REGION }}-docker.pkg.dev/${{ env.PROJECT_ID }}/${{ env.REPOSITORY }}/${{ env.IMAGE }}:${{ github.sha }}
59
+ flags: >-
60
+ --allow-unauthenticated
61
+ --set-env-vars=AI_PROVIDER=vertex_ai,VERTEX_PROJECT_ID=${{ env.PROJECT_ID }},VERTEX_LOCATION=${{ env.REGION }},VERTEX_MODEL=gemini-2.5-pro
Dockerfile ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.12-slim
2
+
3
+ ENV PYTHONDONTWRITEBYTECODE=1
4
+ ENV PYTHONUNBUFFERED=1
5
+ ENV PORT=8080
6
+
7
+ WORKDIR /app
8
+
9
+ COPY backend/requirements.txt /app/backend/requirements.txt
10
+ RUN pip install --no-cache-dir -r /app/backend/requirements.txt
11
+
12
+ COPY backend /app/backend
13
+
14
+ CMD ["sh", "-c", "uvicorn backend.main:app --host 0.0.0.0 --port ${PORT:-8080}"]
README.md CHANGED
@@ -1 +1,78 @@
1
- # flow-pilot
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # FlowPilot
2
+
3
+ FlowPilot is a Gmail-first automation layer for small business owners. The current scaffold includes:
4
+
5
+ - A FastAPI backend with analysis, workflow suggestion, workflow build, deploy, upload, status, and escalation endpoints.
6
+ - A primitive-based workflow engine that compiles and executes JSON workflows.
7
+ - A Chrome extension scaffold that injects a sidebar into Gmail and walks through the onboarding-to-live journey.
8
+ - Lightweight in-memory storage and tests for the core owner flow.
9
+
10
+ ## Project Structure
11
+
12
+ ```text
13
+ backend/
14
+ extension/
15
+ tests/
16
+ ```
17
+
18
+ ## Backend
19
+
20
+ Run locally:
21
+
22
+ ```bash
23
+ pip install -r backend/requirements.txt
24
+ uvicorn backend.main:app --reload
25
+ ```
26
+
27
+ Create a `.env` in the repo root first. FlowPilot is now wired to read Vertex AI settings from that file.
28
+
29
+ ```bash
30
+ cp .env.example .env
31
+ ```
32
+
33
+ ## Extension
34
+
35
+ Load `extension/` as an unpacked Chrome extension. The sidebar injects into Gmail and points to `http://localhost:8000/api`.
36
+
37
+ ## Deploying
38
+
39
+ The repo includes a Cloud Run GitHub Actions workflow at [`.github/workflows/deploy.yml`](/Users/sivasankernp/Desktop/flow-pilot/.github/workflows/deploy.yml:1).
40
+
41
+ Before the action will work, finish these one-time Google Cloud steps:
42
+
43
+ ```bash
44
+ gcloud iam workload-identity-pools create github-pool \
45
+ --project=flow-pilot-493104 \
46
+ --location=global \
47
+ --display-name="GitHub Pool"
48
+ ```
49
+
50
+ ```bash
51
+ gcloud iam workload-identity-pools providers create-oidc github-provider \
52
+ --project=flow-pilot-493104 \
53
+ --location=global \
54
+ --workload-identity-pool=github-pool \
55
+ --display-name="GitHub Provider" \
56
+ --issuer-uri="https://token.actions.githubusercontent.com" \
57
+ --attribute-mapping="google.subject=assertion.sub,attribute.repository=assertion.repository,attribute.actor=assertion.actor"
58
+ ```
59
+
60
+ ```bash
61
+ gcloud iam service-accounts add-iam-policy-binding \
62
+ github-deployer@flow-pilot-493104.iam.gserviceaccount.com \
63
+ --project=flow-pilot-493104 \
64
+ --role="roles/iam.workloadIdentityUser" \
65
+ --member="principalSet://iam.googleapis.com/projects/707525727191/locations/global/workloadIdentityPools/github-pool/attribute.repository/DevelopedBy-Siva/flow-pilot"
66
+ ```
67
+
68
+ The workflow builds the Docker image, pushes it to Artifact Registry, and deploys the FastAPI backend to Cloud Run.
69
+
70
+ ## Notes
71
+
72
+ - Backend storage is currently in-memory for demo speed; the folder layout is ready for SQLite migration work.
73
+ - AI services are still mocked for workflow behavior, but config is now set up for Vertex AI credentials and model selection.
74
+ - Fill in your real Vertex values in `.env`, especially `VERTEX_PROJECT_ID` and `VERTEX_LOCATION`.
75
+ - If you authenticate with `gcloud auth application-default login`, leave `GOOGLE_APPLICATION_CREDENTIALS` blank.
76
+ - Only set `GOOGLE_APPLICATION_CREDENTIALS` when you have a real service account JSON path available.
77
+ - The backend now attempts live Vertex AI calls through the `google-genai` SDK when credentials are configured.
78
+ - If the SDK is missing or `VERTEX_PROJECT_ID` is still a placeholder, FlowPilot falls back to local deterministic mock logic so tests and scaffolding still work.
backend/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ """FlowPilot backend package."""
backend/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (207 Bytes). View file
 
backend/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (157 Bytes). View file
 
backend/__pycache__/config.cpython-311.pyc ADDED
Binary file (2.8 kB). View file
 
backend/__pycache__/config.cpython-312.pyc ADDED
Binary file (2.32 kB). View file
 
backend/__pycache__/main.cpython-311.pyc ADDED
Binary file (1.76 kB). View file
 
backend/__pycache__/main.cpython-312.pyc ADDED
Binary file (1.55 kB). View file
 
backend/ai/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ """AI helpers."""
backend/ai/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (195 Bytes). View file
 
backend/ai/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (145 Bytes). View file
 
backend/ai/__pycache__/analyzer.cpython-311.pyc ADDED
Binary file (4.25 kB). View file
 
backend/ai/__pycache__/analyzer.cpython-312.pyc ADDED
Binary file (3.82 kB). View file
 
backend/ai/__pycache__/classifier.cpython-311.pyc ADDED
Binary file (1.43 kB). View file
 
backend/ai/__pycache__/classifier.cpython-312.pyc ADDED
Binary file (1.26 kB). View file
 
backend/ai/__pycache__/client.cpython-311.pyc ADDED
Binary file (5.01 kB). View file
 
backend/ai/__pycache__/client.cpython-312.pyc ADDED
Binary file (4.49 kB). View file
 
backend/ai/__pycache__/composer.cpython-311.pyc ADDED
Binary file (1.99 kB). View file
 
backend/ai/__pycache__/composer.cpython-312.pyc ADDED
Binary file (1.74 kB). View file
 
backend/ai/__pycache__/extractor.cpython-311.pyc ADDED
Binary file (1.7 kB). View file
 
backend/ai/__pycache__/extractor.cpython-312.pyc ADDED
Binary file (1.52 kB). View file
 
backend/ai/__pycache__/prompts.cpython-311.pyc ADDED
Binary file (8.73 kB). View file
 
backend/ai/__pycache__/prompts.cpython-312.pyc ADDED
Binary file (8.61 kB). View file
 
backend/ai/__pycache__/workflow_builder.cpython-311.pyc ADDED
Binary file (4.86 kB). View file
 
backend/ai/__pycache__/workflow_builder.cpython-312.pyc ADDED
Binary file (4.36 kB). View file
 
backend/ai/__pycache__/workflow_suggester.cpython-311.pyc ADDED
Binary file (2.51 kB). View file
 
backend/ai/__pycache__/workflow_suggester.cpython-312.pyc ADDED
Binary file (2.18 kB). View file
 
backend/ai/analyzer.py ADDED
@@ -0,0 +1,113 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from backend.ai.client import render_prompt, vertex_client
4
+ from backend.ai.prompts import ANALYZE_PROMPT, CUSTOM_TASK_PROMPT
5
+ from backend.models.schemas import TaskCategory
6
+
7
+
8
+ def analyze_business_description(description: str) -> dict:
9
+ if vertex_client.is_ready():
10
+ return vertex_client.generate_json(render_prompt(ANALYZE_PROMPT, description=description))
11
+
12
+ lower = description.lower()
13
+ fully_automatable = []
14
+ ai_assisted = []
15
+ manual = []
16
+ if "order" in lower:
17
+ fully_automatable.append(
18
+ {
19
+ "id": "task_order_processing",
20
+ "name": "Order processing",
21
+ "description": "Read order emails, check stock, log rows, and reply.",
22
+ "time_estimate": "10-15 hours/week",
23
+ "pain_level": "high",
24
+ }
25
+ )
26
+ if "friday" in lower or "weekly" in lower:
27
+ fully_automatable.append(
28
+ {
29
+ "id": "task_weekly_summary",
30
+ "name": "Weekly order summary report",
31
+ "description": "Compile the week's orders and send a summary.",
32
+ "time_estimate": "1-2 hours/week",
33
+ "pain_level": "medium",
34
+ }
35
+ )
36
+ ai_assisted.append(
37
+ {
38
+ "id": "task_availability_inquiries",
39
+ "name": "Availability inquiries",
40
+ "description": "Draft replies using current stock and pricing.",
41
+ "time_estimate": "2-3 hours/week",
42
+ "pain_level": "medium",
43
+ }
44
+ )
45
+ ai_assisted.append(
46
+ {
47
+ "id": "task_low_inventory_alerts",
48
+ "name": "Low inventory alerts",
49
+ "description": "Watch for thin inventory and propose next steps.",
50
+ "time_estimate": "30 minutes/week",
51
+ "pain_level": "medium",
52
+ }
53
+ )
54
+ manual.append(
55
+ {
56
+ "id": "task_special_requests",
57
+ "name": "Custom and relationship-sensitive requests",
58
+ "description": "Gift boxes, delivery arrangements, and VIP relationship notes.",
59
+ "reason_manual": "These need human judgment, pricing discretion, and a personal touch.",
60
+ }
61
+ )
62
+ return {
63
+ "business_type": "Email-driven small business",
64
+ "summary": "FlowPilot found repetitive inbox work that maps well to Gmail, Sheets, and scheduled automations.",
65
+ "tasks": {
66
+ "fully_automatable": fully_automatable,
67
+ "ai_assisted": ai_assisted,
68
+ "manual": manual,
69
+ },
70
+ }
71
+
72
+
73
+ def analyze_custom_task(business_description: str, existing_workflows: list[dict], custom_task: str) -> dict:
74
+ if vertex_client.is_ready():
75
+ return vertex_client.generate_json(
76
+ render_prompt(
77
+ CUSTOM_TASK_PROMPT,
78
+ business_description=business_description,
79
+ existing_workflows=existing_workflows,
80
+ primitives_list=[
81
+ "email_received",
82
+ "schedule",
83
+ "file_uploaded",
84
+ "manual_trigger",
85
+ "ai_extract",
86
+ "ai_classify",
87
+ "ai_compose",
88
+ "sheets_read",
89
+ "sheets_write",
90
+ "sheets_update",
91
+ "send_email",
92
+ "notify_owner",
93
+ "condition",
94
+ "loop",
95
+ "wait_for_input",
96
+ ],
97
+ custom_task=custom_task,
98
+ )
99
+ )
100
+
101
+ lower = custom_task.lower()
102
+ feasible = any(word in lower for word in ("flag", "priority", "sheet", "email"))
103
+ category = TaskCategory.FULLY_AUTOMATABLE if feasible else TaskCategory.MANUAL
104
+ return {
105
+ "feasible": feasible,
106
+ "category": category,
107
+ "task_name": "Restaurant priority handling",
108
+ "task_description": custom_task,
109
+ "missing_capabilities": [] if feasible else ["A matching primitive for this request is missing."],
110
+ "reason_if_manual": None if feasible else "This task needs a capability outside the current primitives.",
111
+ "time_estimate": "1 hour/week",
112
+ "pain_level": "medium",
113
+ }
backend/ai/classifier.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from backend.ai.client import render_prompt, vertex_client
2
+ from backend.ai.prompts import CLASSIFY_PROMPT
3
+
4
+
5
+ def classify_email(from_email: str, subject: str, body: str, business_context: str) -> dict:
6
+ if vertex_client.is_ready():
7
+ return vertex_client.generate_json(
8
+ render_prompt(
9
+ CLASSIFY_PROMPT,
10
+ categories="order, availability_inquiry, complaint, greeting, other",
11
+ from_email=from_email,
12
+ subject=subject,
13
+ body=body,
14
+ business_context=business_context,
15
+ )
16
+ )
17
+
18
+ text = f"{subject} {body}".lower()
19
+ category = "order" if "order" in text else "availability_inquiry" if "available" in text else "other"
20
+ priority = "high" if "restaurant" in from_email.lower() else "medium"
21
+ return {
22
+ "category": category,
23
+ "priority": priority,
24
+ "requires_response": True,
25
+ "sentiment": "neutral",
26
+ "reasoning": "Derived from keywords and sender profile.",
27
+ }
backend/ai/client.py ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import json
4
+ import os
5
+ from typing import Any
6
+
7
+ from backend.config import get_settings
8
+
9
+ try:
10
+ from google import genai
11
+ except ImportError: # pragma: no cover - depends on optional package
12
+ genai = None
13
+
14
+
15
+ class VertexAIClient:
16
+ def __init__(self) -> None:
17
+ self.settings = get_settings()
18
+ self._client = None
19
+
20
+ def generate_json(self, prompt: str) -> dict[str, Any]:
21
+ text = self.generate_text(prompt)
22
+ return _extract_json_object(text)
23
+
24
+ def generate_text(self, prompt: str) -> str:
25
+ client = self._get_client()
26
+ response = client.models.generate_content(
27
+ model=self.settings.vertex_model,
28
+ contents=prompt,
29
+ )
30
+ text = getattr(response, "text", None)
31
+ if not text:
32
+ raise RuntimeError("Vertex AI returned an empty response")
33
+ return text.strip()
34
+
35
+ def is_ready(self) -> bool:
36
+ placeholders = {"your-gcp-project-id"}
37
+ if self.settings.ai_provider != "vertex_ai":
38
+ return False
39
+ if genai is None:
40
+ return False
41
+ if self.settings.vertex_project_id in placeholders:
42
+ return False
43
+ credentials_path = self.settings.google_application_credentials.strip()
44
+ if credentials_path and credentials_path != "/absolute/path/to/service-account.json":
45
+ return os.path.exists(credentials_path)
46
+ return True
47
+
48
+ def _get_client(self):
49
+ if genai is None:
50
+ raise RuntimeError("google-genai is not installed")
51
+ if self._client is None:
52
+ self._client = genai.Client(
53
+ vertexai=True,
54
+ project=self.settings.vertex_project_id,
55
+ location=self.settings.vertex_location,
56
+ )
57
+ return self._client
58
+
59
+
60
+ def _extract_json_object(text: str) -> dict[str, Any]:
61
+ cleaned = text.strip()
62
+ if cleaned.startswith("```"):
63
+ parts = cleaned.split("```")
64
+ cleaned = next((part for part in parts if "{" in part and "}" in part), cleaned)
65
+ cleaned = cleaned.replace("json", "", 1).strip()
66
+ start = cleaned.find("{")
67
+ end = cleaned.rfind("}")
68
+ if start == -1 or end == -1 or end < start:
69
+ raise ValueError("No JSON object found in model response")
70
+ return json.loads(cleaned[start : end + 1])
71
+
72
+
73
+ vertex_client = VertexAIClient()
74
+
75
+
76
+ def render_prompt(template: str, **kwargs: Any) -> str:
77
+ rendered = template
78
+ for key, value in kwargs.items():
79
+ rendered = rendered.replace(f"{{{key}}}", str(value))
80
+ return rendered
backend/ai/composer.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+
3
+ from backend.ai.client import render_prompt, vertex_client
4
+ from backend.ai.prompts import COMPOSE_PROMPT
5
+
6
+
7
+ def compose_reply(context: dict, tone: str = "friendly") -> str:
8
+ if vertex_client.is_ready():
9
+ return vertex_client.generate_text(
10
+ render_prompt(
11
+ COMPOSE_PROMPT,
12
+ business_name=context.get("business_name", "FlowPilot customer"),
13
+ business_type=context.get("business_type", "small business"),
14
+ owner_name=context.get("owner_name", "Raj"),
15
+ tone=tone,
16
+ context=json.dumps(context, indent=2),
17
+ data=json.dumps(context, indent=2),
18
+ )
19
+ )
20
+
21
+ customer = context.get("customer_name", "there")
22
+ items = context.get("items", [])
23
+ item_summary = ", ".join(f"{item['quantity']} x {item['name']}" for item in items) or "your request"
24
+ return (
25
+ f"Hi {customer},\n\n"
26
+ f"Thanks for your order. We have {item_summary} ready and will follow up with pickup details shortly.\n\n"
27
+ "Raj"
28
+ )
backend/ai/extractor.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from backend.ai.client import render_prompt, vertex_client
2
+ from backend.ai.prompts import EXTRACT_PROMPT
3
+
4
+
5
+ def extract_email_data(from_email: str, subject: str, body: str) -> dict:
6
+ if vertex_client.is_ready():
7
+ return vertex_client.generate_json(
8
+ render_prompt(
9
+ EXTRACT_PROMPT,
10
+ from_email=from_email,
11
+ subject=subject,
12
+ body=body,
13
+ )
14
+ )
15
+
16
+ is_order = "order" in subject.lower() or "order" in body.lower()
17
+ return {
18
+ "is_order": is_order,
19
+ "message_type": "order" if is_order else "question",
20
+ "confidence": 0.91 if is_order else 0.72,
21
+ "customer_name": from_email.split("@")[0].replace(".", " ").title(),
22
+ "customer_email": from_email,
23
+ "items": [{"name": "Honeycrisp apples", "quantity": 2}] if is_order else [],
24
+ "pickup_date": "next available pickup",
25
+ "special_requests": "",
26
+ "needs_clarification": not is_order,
27
+ "clarification_question": "Can you confirm the items and quantities?" if not is_order else "",
28
+ "suggested_response": "Thanks for the note. I can help with that.",
29
+ }
backend/ai/prompts.py ADDED
@@ -0,0 +1,313 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ANALYZE_PROMPT = """
2
+ You are a business process analyst for small businesses.
3
+
4
+ The owner described their business and daily operations below.
5
+ Analyze it and categorize every task/operation into exactly
6
+ three categories:
7
+
8
+ 1. FULLY_AUTOMATABLE - tasks that can run without human
9
+ involvement using these capabilities:
10
+ - Reading and understanding emails
11
+ - Extracting structured data from text
12
+ - Reading/writing Google Sheets
13
+ - Sending email replies
14
+ - Running on a schedule
15
+ - Processing uploaded files (CSV/Excel/JSON)
16
+
17
+ 2. AI_ASSISTED - tasks where AI can do the heavy lifting
18
+ but a human should review or approve before action is taken.
19
+ This includes anything with ambiguity, financial decisions,
20
+ or relationship sensitivity.
21
+
22
+ 3. MANUAL - tasks that require human creativity, judgment,
23
+ physical presence, or personal touch. Explain WHY each
24
+ manual task can't be automated.
25
+
26
+ For each task also provide:
27
+ - time_estimate: how much time this task takes per week
28
+ - pain_level: low/medium/high
29
+ - description: one line explaining the task
30
+
31
+ Respond in this exact JSON format:
32
+ {
33
+ "business_type": "...",
34
+ "summary": "...",
35
+ "tasks": {
36
+ "fully_automatable": [
37
+ {
38
+ "id": "task_1",
39
+ "name": "...",
40
+ "description": "...",
41
+ "time_estimate": "...",
42
+ "pain_level": "high"
43
+ }
44
+ ],
45
+ "ai_assisted": [...],
46
+ "manual": [
47
+ {
48
+ "id": "task_5",
49
+ "name": "...",
50
+ "description": "...",
51
+ "reason_manual": "..."
52
+ }
53
+ ]
54
+ }
55
+ }
56
+
57
+ No markdown. No explanation. JSON only.
58
+
59
+ Owner's description:
60
+ {description}
61
+ """.strip()
62
+
63
+ SUGGEST_WORKFLOWS_PROMPT = """
64
+ You are a workflow architect for small business automation.
65
+
66
+ The owner selected this task to automate:
67
+ Task: {task_name}
68
+ Description: {task_description}
69
+ Category: {category}
70
+
71
+ Available primitives for building workflows:
72
+ TRIGGERS: email_received, schedule, file_uploaded, manual_trigger
73
+ AI ACTIONS: ai_extract, ai_classify, ai_compose
74
+ DATA ACTIONS: sheets_read, sheets_write, sheets_update
75
+ COMM ACTIONS: send_email, notify_owner
76
+ LOGIC: condition, loop, wait_for_input
77
+
78
+ The owner's tools:
79
+ - Gmail (for email)
80
+ - Google Sheets (spreadsheet: {spreadsheet_info})
81
+ - Uploaded data files: {uploaded_files}
82
+
83
+ Generate exactly 3 workflow options with DIFFERENT levels
84
+ of automation and human involvement:
85
+
86
+ Option A - Maximum automation (owner does nothing)
87
+ Option B - Balanced (AI handles most, owner approves key steps)
88
+ Option C - Minimal (AI organizes/prepares, owner acts)
89
+
90
+ Respond in this exact JSON format:
91
+ {
92
+ "options": [
93
+ {
94
+ "id": "A",
95
+ "name": "...",
96
+ "approach": "full_auto | balanced | minimal",
97
+ "steps_summary": [
98
+ "Step 1: ...",
99
+ "Step 2: ...",
100
+ "Step 3: ..."
101
+ ],
102
+ "owner_involvement": "what the owner does (or 'nothing')",
103
+ "time_saved": "estimated time saved per week",
104
+ "risk_note": "what could go wrong"
105
+ }
106
+ ]
107
+ }
108
+
109
+ No markdown. JSON only.
110
+ """.strip()
111
+
112
+ BUILD_WORKFLOW_PROMPT = """
113
+ You are a workflow compiler. Convert the selected workflow
114
+ option into an executable workflow JSON.
115
+
116
+ Selected option:
117
+ {selected_option}
118
+
119
+ Task: {task_name}
120
+ Owner config:
121
+ - email: {owner_email}
122
+ - spreadsheet_id: {spreadsheet_id}
123
+ - inventory_sheet: {inventory_sheet}
124
+ - orders_sheet: {orders_sheet}
125
+ - item_column: {item_column}
126
+ - stock_column: {stock_column}
127
+ - uploaded data: {uploaded_data_summary}
128
+
129
+ AVAILABLE PRIMITIVES (you can ONLY use these):
130
+
131
+ TRIGGERS:
132
+ email_received - fires when a new email arrives
133
+ schedule - fires on a cron schedule
134
+ file_uploaded - fires when owner uploads a file
135
+ manual_trigger - fires when owner clicks "run now"
136
+
137
+ AI ACTIONS:
138
+ ai_extract - send text to AI, get structured JSON back
139
+ params: input, extract_fields, output_var
140
+ ai_classify - send text to AI, get category back
141
+ params: input, categories, output_var
142
+ ai_compose - send context to AI, get composed text back
143
+ params: context, tone, output_var
144
+
145
+ DATA ACTIONS:
146
+ sheets_read - read rows from a Google Sheet
147
+ params: spreadsheet_id, sheet_name, filter (optional), output_var
148
+ sheets_write - append a new row to a sheet
149
+ params: spreadsheet_id, sheet_name, data
150
+ sheets_update - update existing cells
151
+ params: spreadsheet_id, sheet_name, lookup_column,
152
+ lookup_value, update_column, update_value
153
+
154
+ COMM ACTIONS:
155
+ send_email - send an email via Gmail
156
+ params: to, subject, body
157
+ notify_owner - send alert to owner's extension sidebar
158
+ params: message, severity (info/warning/critical),
159
+ options (optional - clickable choices)
160
+
161
+ LOGIC:
162
+ condition - if/else branching
163
+ params: check, if_true (step_id), if_false (step_id)
164
+ loop - iterate over a list
165
+ params: items, step_id (executed for each item)
166
+ wait_for_input - pause workflow until owner responds
167
+ params: prompt, options, output_var
168
+
169
+ VARIABLE REFERENCES:
170
+ Use {{step_id.output_var}} to reference output from
171
+ a previous step.
172
+ Use {{trigger.email.from}}, {{trigger.email.subject}},
173
+ {{trigger.email.body}} for email trigger data.
174
+ Use {{config.xxx}} for owner config values.
175
+
176
+ Generate a valid workflow JSON following this exact schema:
177
+ {
178
+ "name": "...",
179
+ "description": "...",
180
+ "trigger": {
181
+ "type": "email_received | schedule | file_uploaded | manual_trigger",
182
+ "config": {}
183
+ },
184
+ "steps": [
185
+ {
186
+ "id": "step_1",
187
+ "action": "primitive_name",
188
+ "params": {},
189
+ "on_error": "notify_owner | skip | abort"
190
+ }
191
+ ]
192
+ }
193
+
194
+ Rules:
195
+ - Every step must have a unique id
196
+ - Every variable reference must point to a step that
197
+ runs BEFORE the current step
198
+ - If a task needs a primitive that doesn't exist in the
199
+ list above, DO NOT include it. Add a notify_owner step
200
+ explaining what needs manual handling.
201
+ - Include on_error for every step
202
+
203
+ No markdown. No explanation. Valid JSON only.
204
+ """.strip()
205
+
206
+ EXTRACT_PROMPT = """
207
+ Extract order/request details from this email.
208
+
209
+ Return JSON:
210
+ {
211
+ "is_order": true,
212
+ "message_type": "order | question | complaint | greeting | other",
213
+ "confidence": 0.0,
214
+ "customer_name": "...",
215
+ "customer_email": "...",
216
+ "items": [{"name": "...", "quantity": 1}],
217
+ "pickup_date": "...",
218
+ "special_requests": "...",
219
+ "needs_clarification": false,
220
+ "clarification_question": "...",
221
+ "suggested_response": "..."
222
+ }
223
+
224
+ If confidence < 0.7, set needs_clarification: true.
225
+ If not an order, set is_order: false and provide
226
+ message_type and suggested_response.
227
+
228
+ Email:
229
+ From: {from_email}
230
+ Subject: {subject}
231
+ Body: {body}
232
+ """.strip()
233
+
234
+ CLASSIFY_PROMPT = """
235
+ Classify this email into one of these categories:
236
+ {categories}
237
+
238
+ Also determine:
239
+ - priority: low/medium/high/critical
240
+ - requires_response: true/false
241
+ - sentiment: positive/neutral/negative
242
+
243
+ Email:
244
+ From: {from_email}
245
+ Subject: {subject}
246
+ Body: {body}
247
+
248
+ Context about the business:
249
+ {business_context}
250
+
251
+ Respond in JSON only:
252
+ {
253
+ "category": "...",
254
+ "priority": "...",
255
+ "requires_response": true,
256
+ "sentiment": "...",
257
+ "reasoning": "..."
258
+ }
259
+ """.strip()
260
+
261
+ COMPOSE_PROMPT = """
262
+ Write an email reply for this business context.
263
+
264
+ Business: {business_name} ({business_type})
265
+ Owner: {owner_name}
266
+ Tone: {tone}
267
+
268
+ Context:
269
+ {context}
270
+
271
+ Data available:
272
+ {data}
273
+
274
+ Rules:
275
+ - Keep it short and warm
276
+ - Include all relevant details (items, quantities,
277
+ prices, dates)
278
+ - Match the tone specified
279
+ - Do not include a subject line
280
+ - Do not include "Dear" or overly formal greetings
281
+ - Sign off with the owner's name
282
+
283
+ Write the email body only.
284
+ """.strip()
285
+
286
+ CUSTOM_TASK_PROMPT = """
287
+ The business owner described a custom task they want handled.
288
+
289
+ Their business: {business_description}
290
+ Their existing workflows: {existing_workflows}
291
+ Available primitives: {primitives_list}
292
+
293
+ Custom task description:
294
+ {custom_task}
295
+
296
+ First determine:
297
+ 1. Can this task be accomplished using ONLY the
298
+ available primitives?
299
+ 2. If yes, categorize it: fully_automatable or ai_assisted
300
+ 3. If no, explain what's missing and categorize as manual
301
+
302
+ Respond in JSON:
303
+ {
304
+ "feasible": true,
305
+ "category": "fully_automatable | ai_assisted | manual",
306
+ "task_name": "short name",
307
+ "task_description": "one line",
308
+ "missing_capabilities": [],
309
+ "reason_if_manual": null,
310
+ "time_estimate": "...",
311
+ "pain_level": "low/medium/high"
312
+ }
313
+ """.strip()
backend/ai/workflow_builder.py ADDED
@@ -0,0 +1,142 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+
3
+ from backend.ai.client import render_prompt, vertex_client
4
+ from backend.ai.prompts import BUILD_WORKFLOW_PROMPT
5
+ from backend.models.schemas import BuildWorkflowRequest, WorkflowDefinition, WorkflowStep
6
+
7
+
8
+ def build_workflow_definition(request: BuildWorkflowRequest, owner: dict) -> dict:
9
+ if vertex_client.is_ready():
10
+ prompt = render_prompt(
11
+ BUILD_WORKFLOW_PROMPT,
12
+ selected_option=json.dumps(request.selected_option, indent=2),
13
+ task_name=request.task_name,
14
+ owner_email=owner.get("email", "owner@example.com"),
15
+ spreadsheet_id=owner.get("spreadsheet_id", ""),
16
+ inventory_sheet=owner.get("spreadsheet_config", {}).get("inventory_sheet", "Inventory"),
17
+ orders_sheet=owner.get("spreadsheet_config", {}).get("orders_sheet", "Orders"),
18
+ item_column="item",
19
+ stock_column="stock",
20
+ uploaded_data_summary=json.dumps(owner.get("uploaded_data_summary", []), indent=2),
21
+ )
22
+ return vertex_client.generate_json(prompt)
23
+
24
+ trigger_type = "schedule" if "summary" in request.task_name.lower() else "email_received"
25
+ steps: list[WorkflowStep] = []
26
+ if trigger_type == "email_received":
27
+ steps = [
28
+ WorkflowStep(
29
+ id="step_extract",
30
+ action="ai_extract",
31
+ params={
32
+ "input": "{{trigger.email.body}}",
33
+ "extract_fields": ["customer_name", "items", "pickup_date", "needs_clarification"],
34
+ "output_var": "order_data",
35
+ },
36
+ on_error="notify_owner",
37
+ ),
38
+ WorkflowStep(
39
+ id="step_read_inventory",
40
+ action="sheets_read",
41
+ params={
42
+ "spreadsheet_id": "{{config.spreadsheet_id}}",
43
+ "sheet_name": "Inventory",
44
+ "output_var": "inventory_rows",
45
+ },
46
+ on_error="abort",
47
+ ),
48
+ WorkflowStep(
49
+ id="step_check_stock",
50
+ action="condition",
51
+ params={
52
+ "check": "not {{step_extract.order_data.needs_clarification}}",
53
+ "if_true": "step_write_order",
54
+ "if_false": "step_notify_ambiguity",
55
+ },
56
+ on_error="abort",
57
+ ),
58
+ WorkflowStep(
59
+ id="step_write_order",
60
+ action="sheets_write",
61
+ params={
62
+ "spreadsheet_id": "{{config.spreadsheet_id}}",
63
+ "sheet_name": "Orders",
64
+ "data": {
65
+ "customer": "{{step_extract.order_data.customer_name}}",
66
+ "items": "{{step_extract.order_data.items}}",
67
+ "priority": "high" if "restaurant" in request.task_name.lower() else "normal",
68
+ },
69
+ },
70
+ on_error="notify_owner",
71
+ ),
72
+ WorkflowStep(
73
+ id="step_compose_reply",
74
+ action="ai_compose",
75
+ params={
76
+ "context": "{{step_extract.order_data}}",
77
+ "tone": owner.get("preferred_tone", "friendly"),
78
+ "output_var": "reply_body",
79
+ },
80
+ on_error="notify_owner",
81
+ ),
82
+ WorkflowStep(
83
+ id="step_send_email",
84
+ action="send_email",
85
+ params={
86
+ "to": "{{trigger.email.from}}",
87
+ "subject": "Order confirmation",
88
+ "body": "{{step_compose_reply.reply_body}}",
89
+ },
90
+ on_error="notify_owner",
91
+ ),
92
+ WorkflowStep(
93
+ id="step_notify_ambiguity",
94
+ action="notify_owner",
95
+ params={
96
+ "message": "This order needs clarification before FlowPilot can continue.",
97
+ "severity": "warning",
98
+ "options": ["Reply myself", "Ask customer for details"],
99
+ },
100
+ on_error="skip",
101
+ ),
102
+ ]
103
+ else:
104
+ steps = [
105
+ WorkflowStep(
106
+ id="step_collect_orders",
107
+ action="sheets_read",
108
+ params={
109
+ "spreadsheet_id": "{{config.spreadsheet_id}}",
110
+ "sheet_name": "Orders",
111
+ "output_var": "order_rows",
112
+ },
113
+ on_error="abort",
114
+ ),
115
+ WorkflowStep(
116
+ id="step_compose_summary",
117
+ action="ai_compose",
118
+ params={
119
+ "context": "{{step_collect_orders.order_rows}}",
120
+ "tone": "helpful",
121
+ "output_var": "summary_body",
122
+ },
123
+ on_error="notify_owner",
124
+ ),
125
+ WorkflowStep(
126
+ id="step_send_summary",
127
+ action="send_email",
128
+ params={
129
+ "to": owner.get("email", "owner@example.com"),
130
+ "subject": "Weekly order summary",
131
+ "body": "{{step_compose_summary.summary_body}}",
132
+ },
133
+ on_error="notify_owner",
134
+ ),
135
+ ]
136
+ workflow = WorkflowDefinition(
137
+ name=request.task_name,
138
+ description=request.selected_option.get("name", request.task_description),
139
+ trigger={"type": trigger_type, "config": {"cron": "0 8 * * FRI"} if trigger_type == "schedule" else {}},
140
+ steps=steps,
141
+ )
142
+ return workflow.model_dump()
backend/ai/workflow_suggester.py ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from backend.ai.client import render_prompt, vertex_client
2
+ from backend.ai.prompts import SUGGEST_WORKFLOWS_PROMPT
3
+
4
+
5
+ def suggest_workflow_options(
6
+ task_name: str,
7
+ task_description: str,
8
+ category: str,
9
+ spreadsheet_info: dict,
10
+ uploaded_files: list[dict],
11
+ ) -> dict:
12
+ if vertex_client.is_ready():
13
+ return vertex_client.generate_json(
14
+ render_prompt(
15
+ SUGGEST_WORKFLOWS_PROMPT,
16
+ task_name=task_name,
17
+ task_description=task_description,
18
+ category=category,
19
+ spreadsheet_info=spreadsheet_info,
20
+ uploaded_files=uploaded_files,
21
+ )
22
+ )
23
+
24
+ return {
25
+ "options": [
26
+ {
27
+ "id": "A",
28
+ "name": f"{task_name} Full Auto",
29
+ "approach": "full_auto",
30
+ "steps_summary": [
31
+ "Trigger from a new input automatically.",
32
+ "Use AI to extract or classify the content.",
33
+ "Update Sheets and send the output without waiting.",
34
+ ],
35
+ "owner_involvement": "nothing unless FlowPilot raises an alert",
36
+ "time_saved": "5-10 hours/week",
37
+ "risk_note": "Incorrect extraction could act before review.",
38
+ },
39
+ {
40
+ "id": "B",
41
+ "name": f"{task_name} Review First",
42
+ "approach": "balanced",
43
+ "steps_summary": [
44
+ "Trigger from inbox or schedule.",
45
+ "AI prepares the work and checks current data.",
46
+ "Owner approves before write-back or reply.",
47
+ ],
48
+ "owner_involvement": "one approval on important actions",
49
+ "time_saved": "3-6 hours/week",
50
+ "risk_note": "Still depends on owner response time.",
51
+ },
52
+ {
53
+ "id": "C",
54
+ "name": f"{task_name} Log Only",
55
+ "approach": "minimal",
56
+ "steps_summary": [
57
+ "Trigger from the chosen source.",
58
+ "Extract or summarize the information.",
59
+ "Record it for the owner to finish manually.",
60
+ ],
61
+ "owner_involvement": "send final reply or finish task manually",
62
+ "time_saved": "1-3 hours/week",
63
+ "risk_note": "Most value still depends on manual follow-through.",
64
+ },
65
+ ],
66
+ "context": {
67
+ "category": category,
68
+ "spreadsheet_connected": spreadsheet_info.get("connected", False),
69
+ "uploaded_files": [file["filename"] for file in uploaded_files],
70
+ "task_description": task_description,
71
+ },
72
+ }
backend/api/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ """API package."""
backend/api/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (197 Bytes). View file
 
backend/api/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (147 Bytes). View file
 
backend/api/__pycache__/middleware.cpython-312.pyc ADDED
Binary file (439 Bytes). View file
 
backend/api/__pycache__/routes.cpython-311.pyc ADDED
Binary file (7.74 kB). View file
 
backend/api/__pycache__/routes.cpython-312.pyc ADDED
Binary file (7.17 kB). View file
 
backend/api/middleware.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ from fastapi import HTTPException
2
+
3
+
4
+ def ensure_owner(owner_id: str | None) -> None:
5
+ if not owner_id:
6
+ raise HTTPException(status_code=400, detail="owner_id is required")
backend/api/routes.py ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import APIRouter, HTTPException
2
+
3
+ from backend.ai.analyzer import analyze_business_description, analyze_custom_task
4
+ from backend.ai.workflow_builder import build_workflow_definition
5
+ from backend.ai.workflow_suggester import suggest_workflow_options
6
+ from backend.engine.compiler import compile_workflow
7
+ from backend.engine.executor import WorkflowExecutor
8
+ from backend.integrations.file_parser import parse_uploaded_payload
9
+ from backend.models.schemas import (
10
+ AnalyzeRequest,
11
+ BuildWorkflowRequest,
12
+ BuildWorkflowResponse,
13
+ CustomTaskRequest,
14
+ DeployRequest,
15
+ DeploymentResponse,
16
+ EscalationReplyRequest,
17
+ EscalationResponse,
18
+ FileUploadRequest,
19
+ FileUploadResponse,
20
+ OwnerStatusResponse,
21
+ WorkflowSuggestionRequest,
22
+ )
23
+ from backend.storage.database import db
24
+
25
+ router = APIRouter()
26
+ executor = WorkflowExecutor()
27
+
28
+
29
+ @router.post("/analyze")
30
+ def analyze(request: AnalyzeRequest) -> dict:
31
+ analysis = analyze_business_description(request.description)
32
+ owner = db.ensure_owner(request.owner_id, request.owner_email)
33
+ owner["business_description"] = request.description
34
+ owner["business_analysis"] = analysis
35
+ db.save_owner(owner)
36
+ return analysis
37
+
38
+
39
+ @router.post("/custom-task")
40
+ def custom_task(request: CustomTaskRequest) -> dict:
41
+ owner = db.get_owner(request.owner_id)
42
+ return analyze_custom_task(
43
+ business_description=owner.get("business_description", ""),
44
+ existing_workflows=db.list_workflows(request.owner_id),
45
+ custom_task=request.custom_task,
46
+ )
47
+
48
+
49
+ @router.post("/suggest-workflows")
50
+ def suggest_workflows(request: WorkflowSuggestionRequest) -> dict:
51
+ owner = db.get_owner(request.owner_id)
52
+ return suggest_workflow_options(
53
+ task_name=request.task_name,
54
+ task_description=request.task_description,
55
+ category=request.category,
56
+ spreadsheet_info=owner.get("spreadsheet_config", {"connected": False}),
57
+ uploaded_files=db.list_data_files(request.owner_id),
58
+ )
59
+
60
+
61
+ @router.post("/build-workflow", response_model=BuildWorkflowResponse)
62
+ def build_workflow(request: BuildWorkflowRequest) -> BuildWorkflowResponse:
63
+ owner = db.get_owner(request.owner_id)
64
+ workflow_json = build_workflow_definition(request=request, owner=owner)
65
+ compiled = compile_workflow(workflow_json)
66
+ return BuildWorkflowResponse(workflow=compiled)
67
+
68
+
69
+ @router.post("/deploy", response_model=DeploymentResponse)
70
+ def deploy_workflow(request: DeployRequest) -> DeploymentResponse:
71
+ owner = db.get_owner(request.owner_id)
72
+ if not owner:
73
+ raise HTTPException(status_code=404, detail="owner not found")
74
+ deployments = []
75
+ for workflow in request.workflows:
76
+ compiled = compile_workflow(workflow.model_dump())
77
+ saved = db.save_workflow(request.owner_id, compiled)
78
+ deployments.append(saved)
79
+ owner["state"] = "live"
80
+ db.save_owner(owner)
81
+ return DeploymentResponse(
82
+ status="deployed",
83
+ workflows=deployments,
84
+ message="All workflows are live.",
85
+ )
86
+
87
+
88
+ @router.post("/upload-data", response_model=FileUploadResponse)
89
+ def upload_data(request: FileUploadRequest) -> FileUploadResponse:
90
+ parsed = parse_uploaded_payload(request.filename, request.content, request.purpose)
91
+ record = db.save_data_file(request.owner_id, request.filename, request.file_type, request.purpose, parsed)
92
+ return FileUploadResponse(file=record)
93
+
94
+
95
+ @router.get("/status", response_model=OwnerStatusResponse)
96
+ def status(owner_id: str) -> OwnerStatusResponse:
97
+ owner = db.get_owner(owner_id)
98
+ return OwnerStatusResponse(
99
+ owner=owner,
100
+ workflows=db.list_workflows(owner_id),
101
+ recent_executions=db.list_execution_logs(owner_id),
102
+ )
103
+
104
+
105
+ @router.get("/escalations")
106
+ def escalations(owner_id: str) -> dict:
107
+ return {"items": db.list_escalations(owner_id)}
108
+
109
+
110
+ @router.post("/escalation-reply", response_model=EscalationResponse)
111
+ def escalation_reply(request: EscalationReplyRequest) -> EscalationResponse:
112
+ escalation = db.resolve_escalation(request.escalation_id, request.response)
113
+ return EscalationResponse(escalation=escalation)
114
+
115
+
116
+ @router.post("/simulate-run")
117
+ def simulate_run(owner_id: str, workflow_id: str, trigger: dict) -> dict:
118
+ workflow = db.get_workflow(owner_id, workflow_id)
119
+ result = executor.execute(workflow, trigger, db=db, owner_id=owner_id)
120
+ db.save_execution_log(owner_id, workflow_id, trigger, result["steps"], result["outcome"], result.get("error"))
121
+ return result
backend/config.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from functools import lru_cache
3
+
4
+ from pydantic import BaseModel, Field
5
+ from dotenv import load_dotenv
6
+
7
+ load_dotenv()
8
+
9
+
10
+ class Settings(BaseModel):
11
+ app_name: str = "FlowPilot"
12
+ api_prefix: str = "/api"
13
+ database_url: str = "sqlite:///./flowpilot.db"
14
+ allow_origins: list[str] = Field(default_factory=lambda: _split_csv(os.getenv("ALLOW_ORIGINS", "*")))
15
+ ai_provider: str = os.getenv("AI_PROVIDER", "vertex_ai")
16
+ gmail_poll_seconds: int = int(os.getenv("GMAIL_POLL_SECONDS", "30"))
17
+ vertex_project_id: str = os.getenv("VERTEX_PROJECT_ID", "your-gcp-project-id")
18
+ vertex_location: str = os.getenv("VERTEX_LOCATION", "us-central1")
19
+ vertex_model: str = os.getenv("VERTEX_MODEL", "gemini-2.5-pro")
20
+ google_application_credentials: str = os.getenv(
21
+ "GOOGLE_APPLICATION_CREDENTIALS",
22
+ "",
23
+ )
24
+
25
+
26
+ def _split_csv(value: str) -> list[str]:
27
+ return [item.strip() for item in value.split(",") if item.strip()]
28
+
29
+
30
+ @lru_cache
31
+ def get_settings() -> Settings:
32
+ return Settings()
backend/engine/__init__.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ """Workflow engine exports."""
2
+
3
+ from backend.engine.compiler import compile_workflow
4
+
5
+ __all__ = ["compile_workflow"]