FD900's picture
Update agent.py
3481b18 verified
raw
history blame
2.17 kB
from transformers import pipeline
from duckduckgo_search import DDGS
import os
class BasicAgent:
def __init__(self):
model_id = "meta-llama/Meta-Llama-3-8B-Instruct"
print(f"Loading model: {model_id}")
self.llm = pipeline(
"text-generation",
model=model_id,
token=os.getenv("HF_TOKEN"),
model_kwargs={"temperature": 0.2, "max_new_tokens": 200}
)
def search(self, query: str) -> str:
try:
with DDGS() as ddgs:
results = list(ddgs.text(query, max_results=1))
if results:
return results[0]["body"]
except Exception as e:
print(f"Search failed: {e}")
return ""
def __call__(self, question: str) -> str:
context = self.search(question)
system_prompt = (
"You are a general AI assistant. I will ask you a question. Report your thoughts, and finish your answer with the following template: "
"FINAL ANSWER: [YOUR FINAL ANSWER]. "
"YOUR FINAL ANSWER should be a number OR as few words as possible OR a comma separated list of numbers and/or strings. "
"If you are asked for a number, don't use comma to write your number neither use units such as $ or percent sign unless specified otherwise. "
"If you are asked for a string, don't use articles, neither abbreviations (e.g. for cities), and write the digits in plain text unless specified otherwise. "
"If you are asked for a comma separated list, apply the above rules depending on whether the element to be put in the list is a number or a string."
)
prompt = f"""{system_prompt}
Context: {context}
Question: {question}
Answer:"""
try:
result = self.llm(prompt)[0]["generated_text"]
if "FINAL ANSWER:" in result:
answer = result.split("FINAL ANSWER:")[-1].strip().split("\n")[0]
return answer.lower().strip(" .")
else:
return "unknown"
except Exception as e:
print(f"LLM error: {e}")
return "unknown"