mohantest commited on
Commit
4de1201
·
verified ·
1 Parent(s): 5616b08

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -14
app.py CHANGED
@@ -4,13 +4,11 @@ import json
4
  import os
5
  from smolagents import CodeAgent, tool
6
  from huggingface_hub import InferenceClient
7
- from agent import solve
8
 
9
- # Set up logging
10
  logging.basicConfig(level=logging.INFO)
11
  logger = logging.getLogger(__name__)
12
 
13
- # Answer cache (speeds up repeated evaluation)
14
  CACHE_FILE = "answer_cache.json"
15
  if os.path.exists(CACHE_FILE):
16
  with open(CACHE_FILE) as f:
@@ -70,17 +68,13 @@ def web_search(query: str) -> str:
70
  except Exception as e:
71
  return f"Search error: {e}"
72
 
73
- # ---------- Custom model that wraps HF InferenceClient ----------
74
  class CustomHFModel:
75
  def __init__(self, model_id="HuggingFaceH4/zephyr-7b-beta"):
76
  self.client = InferenceClient(model=model_id, token=os.getenv("HF_TOKEN"))
77
  self.model_id = model_id
78
 
79
  def __call__(self, messages, **kwargs):
80
- """
81
- Expected by smolagents: takes a list of messages (e.g., [{"role": "user", "content": "..."}])
82
- and returns the assistant's reply as a string.
83
- """
84
  response = self.client.chat_completion(
85
  messages=messages,
86
  max_tokens=500,
@@ -98,12 +92,12 @@ try:
98
  except ImportError:
99
  logger.warning("duckduckgo-search not installed, web_search disabled.")
100
 
101
- model = CustomHFModel() # you can change the model_id if desired
102
  agent = CodeAgent(tools=tools, model=model)
103
 
104
- # ---------- Main entry point ----------
105
- def answer_question(question: str) -> str:
106
- """Called by the evaluator for each question."""
107
  q_hash = hashlib.md5(question.encode()).hexdigest()
108
  if q_hash in answer_cache:
109
  logger.info(f"Cache hit for question: {question[:50]}...")
@@ -118,5 +112,4 @@ def answer_question(question: str) -> str:
118
 
119
  answer_cache[q_hash] = answer
120
  save_cache()
121
- return answer
122
-
 
4
  import os
5
  from smolagents import CodeAgent, tool
6
  from huggingface_hub import InferenceClient
 
7
 
 
8
  logging.basicConfig(level=logging.INFO)
9
  logger = logging.getLogger(__name__)
10
 
11
+ # Cache for answers
12
  CACHE_FILE = "answer_cache.json"
13
  if os.path.exists(CACHE_FILE):
14
  with open(CACHE_FILE) as f:
 
68
  except Exception as e:
69
  return f"Search error: {e}"
70
 
71
+ # ---------- Custom model ----------
72
  class CustomHFModel:
73
  def __init__(self, model_id="HuggingFaceH4/zephyr-7b-beta"):
74
  self.client = InferenceClient(model=model_id, token=os.getenv("HF_TOKEN"))
75
  self.model_id = model_id
76
 
77
  def __call__(self, messages, **kwargs):
 
 
 
 
78
  response = self.client.chat_completion(
79
  messages=messages,
80
  max_tokens=500,
 
92
  except ImportError:
93
  logger.warning("duckduckgo-search not installed, web_search disabled.")
94
 
95
+ model = CustomHFModel()
96
  agent = CodeAgent(tools=tools, model=model)
97
 
98
+ # ---------- Main entry point (called by app.py) ----------
99
+ def solve(question: str) -> str:
100
+ """This function must be named 'solve' because app.py imports it."""
101
  q_hash = hashlib.md5(question.encode()).hexdigest()
102
  if q_hash in answer_cache:
103
  logger.info(f"Cache hit for question: {question[:50]}...")
 
112
 
113
  answer_cache[q_hash] = answer
114
  save_cache()
115
+ return answer