adeshboudh16 commited on
Commit
da0480b
Β·
1 Parent(s): 143e1ed

topology fix

Browse files
.gitignore CHANGED
@@ -6,6 +6,7 @@ dist/
6
  wheels/
7
  *.egg-info
8
  .cache/
 
9
 
10
  # Virtual environments
11
  .venv
 
6
  wheels/
7
  *.egg-info
8
  .cache/
9
+ .vscode/
10
 
11
  # Virtual environments
12
  .venv
frontend/src/app/layout.tsx CHANGED
@@ -31,7 +31,10 @@ export default function RootLayout({
31
  }: Readonly<{ children: React.ReactNode }>) {
32
  return (
33
  <html lang="en" suppressHydrationWarning>
34
- <body className={`${inter.variable} ${instrumentSerif.variable}`} suppressHydrationWarning>
 
 
 
35
  <ThemeProvider
36
  attribute="class"
37
  defaultTheme="dark"
 
31
  }: Readonly<{ children: React.ReactNode }>) {
32
  return (
33
  <html lang="en" suppressHydrationWarning>
34
+ <body
35
+ className={`${inter.variable} ${instrumentSerif.variable}`}
36
+ suppressHydrationWarning
37
+ >
38
  <ThemeProvider
39
  attribute="class"
40
  defaultTheme="dark"
run_server.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import asyncio
2
+ import sys
3
+ import uvicorn
4
+
5
+ if __name__ == "__main__":
6
+ if sys.platform == "win32":
7
+ asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
8
+
9
+ uvicorn.run("src.civicsetu.api.main:app", host="0.0.0.0", port=8000, reload=True)
src/civicsetu/agent/nodes.py CHANGED
@@ -110,7 +110,7 @@ def _llm_call(prompt: str, system: str, temperature: float = 0.0, tier: str = "t
110
  "max_tokens": 16384,
111
  }
112
  # ── NVIDIA-hosted models (GLM4.7, Minimax) ───────────────
113
- _nvidia_models = ("z-ai/glm4.7", "minimaxai/minimax-m2.7", "deepseek-ai/deepseek-v4-pro", "deepseek-ai/deepseek-v4-flash")
114
  if any(nm in model for nm in _nvidia_models):
115
  completion_kwargs["api_base"] = "https://integrate.api.nvidia.com/v1"
116
  completion_kwargs["api_key"] = os.getenv("NVIDIA_API_KEY")
 
110
  "max_tokens": 16384,
111
  }
112
  # ── NVIDIA-hosted models (GLM4.7, Minimax) ───────────────
113
+ _nvidia_models = ("z-ai/glm4.7", "minimaxai/minimax-m2.7", "moonshotai/kimi-k2-thinking", "moonshotai/kimi-k2-instruct")
114
  if any(nm in model for nm in _nvidia_models):
115
  completion_kwargs["api_base"] = "https://integrate.api.nvidia.com/v1"
116
  completion_kwargs["api_key"] = os.getenv("NVIDIA_API_KEY")
src/civicsetu/api/main.py CHANGED
@@ -1,9 +1,13 @@
1
  from __future__ import annotations
2
 
3
  import asyncio
 
4
  import time
5
  from contextlib import asynccontextmanager
6
 
 
 
 
7
  import structlog
8
  from fastapi import FastAPI
9
  from fastapi.middleware.cors import CORSMiddleware
@@ -39,6 +43,9 @@ def create_checkpointer():
39
  @asynccontextmanager
40
  async def lifespan(app: FastAPI):
41
  """Startup and shutdown events."""
 
 
 
42
  log.info("civicsetu_starting", env=settings.api_env)
43
 
44
  # Determine and log the primary model's masked API key
 
1
  from __future__ import annotations
2
 
3
  import asyncio
4
+ import sys
5
  import time
6
  from contextlib import asynccontextmanager
7
 
8
+ if sys.platform == "win32":
9
+ asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
10
+
11
  import structlog
12
  from fastapi import FastAPI
13
  from fastapi.middleware.cors import CORSMiddleware
 
43
  @asynccontextmanager
44
  async def lifespan(app: FastAPI):
45
  """Startup and shutdown events."""
46
+ if sys.platform == "win32":
47
+ import asyncio
48
+ asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
49
  log.info("civicsetu_starting", env=settings.api_env)
50
 
51
  # Determine and log the primary model's masked API key