Web-Rag / llm.py
TharaKavin's picture
Upload 6 files
4a8b2d1 verified
raw
history blame contribute delete
534 Bytes
from groq import Groq
import os
from dotenv import load_dotenv
load_dotenv()
client = Groq(api_key=os.getenv("GROQ_API_KEY"))
def generate_answer(context, question):
prompt = f"""
You are an AI assistant. Answer ONLY from the given context.
Context:
{context}
Question:
{question}
"""
response = client.chat.completions.create(
model="openai/gpt-oss-20b",
messages=[{"role": "user", "content": prompt}],
temperature=0.3
)
return response.choices[0].message.content