muthuk1 commited on
Commit
34f7aa0
·
verified ·
1 Parent(s): 125fe69

Add cost_estimate and explore_graph skill scripts

Browse files
openclaw/skills/cost_estimate/cost_estimate.py ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ OpenClaw Skill: cost_estimate
4
+ Estimate costs across all 12 LLM providers.
5
+ """
6
+ import json
7
+ import sys
8
+
9
+ PROVIDERS = {
10
+ "openai": {"name": "OpenAI GPT-4o-mini", "input": 0.00015, "output": 0.0006},
11
+ "anthropic": {"name": "Claude Sonnet 4", "input": 0.003, "output": 0.015},
12
+ "gemini": {"name": "Gemini 2.0 Flash", "input": 0.0001, "output": 0.0004},
13
+ "mistral": {"name": "Mistral Large", "input": 0.002, "output": 0.006},
14
+ "cohere": {"name": "Command R+", "input": 0.0025, "output": 0.01},
15
+ "ollama": {"name": "Llama 3.2 (Local)", "input": 0, "output": 0},
16
+ "openrouter": {"name": "Llama 3.3 70B", "input": 0.0004, "output": 0.0004},
17
+ "groq": {"name": "Llama 3.3 70B (LPU)", "input": 0.00059, "output": 0.00079},
18
+ "xai": {"name": "Grok 3 Mini", "input": 0.0003, "output": 0.0005},
19
+ "together": {"name": "Llama 3.1 70B Turbo", "input": 0.00088, "output": 0.00088},
20
+ "huggingface": {"name": "Llama 3.3 70B (HF)", "input": 0, "output": 0},
21
+ "deepseek": {"name": "DeepSeek V3", "input": 0.00014, "output": 0.00028},
22
+ }
23
+
24
+ BASELINE_INPUT_TOKENS = 800
25
+ BASELINE_OUTPUT_TOKENS = 150
26
+ GRAPHRAG_INPUT_TOKENS = 2200
27
+ GRAPHRAG_OUTPUT_TOKENS = 200
28
+
29
+
30
+ def cost_estimate(num_queries: int, provider: str = None) -> dict:
31
+ providers = {provider: PROVIDERS[provider]} if provider and provider in PROVIDERS else PROVIDERS
32
+ results = []
33
+
34
+ for pid, p in providers.items():
35
+ baseline_cost = (BASELINE_INPUT_TOKENS / 1000 * p["input"] +
36
+ BASELINE_OUTPUT_TOKENS / 1000 * p["output"])
37
+ graphrag_cost = (GRAPHRAG_INPUT_TOKENS / 1000 * p["input"] +
38
+ GRAPHRAG_OUTPUT_TOKENS / 1000 * p["output"])
39
+ results.append({
40
+ "provider": pid,
41
+ "name": p["name"],
42
+ "baseline_cost_per_query": round(baseline_cost, 8),
43
+ "graphrag_cost_per_query": round(graphrag_cost, 8),
44
+ "baseline_total": round(baseline_cost * num_queries, 4),
45
+ "graphrag_total": round(graphrag_cost * num_queries, 4),
46
+ "monthly_1k_qpd": round(graphrag_cost * 1000 * 30, 2),
47
+ "annual_1k_qpd": round(graphrag_cost * 1000 * 365, 2),
48
+ "is_free": p["input"] == 0 and p["output"] == 0,
49
+ })
50
+
51
+ results.sort(key=lambda x: x["graphrag_total"])
52
+
53
+ return {
54
+ "num_queries": num_queries,
55
+ "providers": results,
56
+ "cheapest": results[0]["provider"],
57
+ "most_expensive": results[-1]["provider"] if results else None,
58
+ }
59
+
60
+
61
+ if __name__ == "__main__":
62
+ n = int(sys.argv[1]) if len(sys.argv) > 1 else 10000
63
+ p = sys.argv[2] if len(sys.argv) > 2 else None
64
+ print(json.dumps(cost_estimate(n, p), indent=2))