Spaces:
Sleeping
Sleeping
File size: 1,051 Bytes
fcaef68 01e7eb0 71f7f0b fcaef68 e3c0d0c 68550d1 e3c0d0c fcaef68 71f7f0b fcaef68 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 | from langchain_community.llms import HuggingFaceHub
def llm_node(question):
# Initialize the Hugging Face model
llm = HuggingFaceHub(
repo_id="HuggingFaceH4/zephyr-7b-beta", # You can replace with e.g., mistralai/Mistral-7B-Instruct-v0.2
model_kwargs={
"temperature": 0.1, # Keep responses deterministic
"max_new_tokens": 500 # Allow for longer outputs if needed
}
)
# Craft the prompt carefully for exact-match outputs
prompt = f"""You are solving a GAIA benchmark evaluation question.
⚠️ VERY IMPORTANT:
- ONLY return the final answer, exactly as required.
- DO NOT include explanations, prefixes, or notes.
- Format the answer exactly as asked (e.g., comma-separated, plural, in requested order).
- If the question asks for a list, give only the list, no intro.
Here’s the question:
{question}
Your direct answer:"""
# Run the model
response = llm.invoke(prompt)
# Clean up whitespace or stray characters
return response.strip()
|