File size: 1,807 Bytes
0a55f0f | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 | import os
from typing import Optional
import google.generativeai as genai
from google.generativeai.types import GenerationConfig
class LLMClient:
def __init__(self):
self.provider = os.getenv("LLM_PROVIDER", "gemini").lower()
self.model_name = os.getenv("LLM_MODEL", "gemini-3.1-pro-preview")
if self.provider == "gemini":
if genai is None:
raise ImportError("google-generativeai not installed.")
key = os.getenv("GEMINI_API_KEY")
if not key:
raise ValueError("GEMINI_API_KEY not set.")
genai.configure(api_key=key)
self.model = genai.GenerativeModel(self.model_name)
else:
raise NotImplementedError("Only Gemini provider is wired for now.")
def call(self, prompt: str, schema: Optional[dict] = None) -> str:
"""
Call the underlying LLM.
If `schema` is provided (as a plain JSON schema dict), and provider is Gemini,
use it as response_schema with JSON mime type.
"""
if self.provider == "gemini":
if schema and GenerationConfig is not None:
config = GenerationConfig(
response_schema=schema,
response_mime_type="application/json",
)
response = self.model.generate_content(
prompt,
generation_config=config,
)
else:
response = self.model.generate_content(prompt)
text = getattr(response, "text", "")
if not text:
raise RuntimeError("LLM response did not contain text.")
return text
raise NotImplementedError("Schema-based calls only wired for Gemini right now.")
|