Umer797's picture
Update llm_node.py
485fdc3 verified
raw
history blame
855 Bytes
from langchain_community.llms import HuggingFaceHub
def llm_node(question, search_result):
# Initialize Hugging Face model (free)
llm = HuggingFaceHub(
repo_id="HuggingFaceH4/zephyr-7b-beta", # You can swap with mistral or mixtral if needed
model_kwargs={
"temperature": 0.1,
"max_new_tokens": 500
}
)
# Build prompt combining search + question
prompt = f"""You are solving a GAIA benchmark evaluation question.
Here’s the question:
{question}
Here’s retrieved information:
{search_result}
⚠️ VERY IMPORTANT:
- ONLY return the final answer, exactly as required.
- Do NOT include explanations, prefixes, or notes.
- If the question asks for a list, give only the list, in the requested format.
Your answer:"""
response = llm.invoke(prompt)
return response.strip()