Spaces:
Sleeping
Sleeping
| import os | |
| import json | |
| from pathlib import Path | |
| import gradio as gr | |
| import requests | |
| import pandas as pd | |
| from langchain_core.messages import HumanMessage | |
| from load_data import ( | |
| ensure_validation_data, | |
| get_file_from_gaia_level1_data, | |
| get_question, | |
| ) | |
| from graph import react_graph | |
| # (Keep Constants as is) | |
| # --- Constants --- | |
| DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space" | |
| # --- Basic Agent Definition --- | |
| # ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------ | |
| class BasicAgent: | |
| def __init__(self): | |
| print("BasicAgent initialized.") | |
| def __call__(self, question: str) -> str: | |
| print(f"Agent received question (first 50 chars): {question[:50]}...") | |
| fixed_answer = "This is a default answer." | |
| print(f"Agent returning fixed answer: {fixed_answer}") | |
| return fixed_answer | |
| def _invoke_react_graph(task_id: str) -> str: | |
| """ | |
| Invokes the react graph with the given task_id and returns the final answer. | |
| """ | |
| input_file = get_file_from_gaia_level1_data(task_id) | |
| question = get_question(task_id) | |
| print( | |
| f"Invoking react graph for task_id={task_id} with question: {question[:50]}... and input_file: {input_file}" | |
| ) | |
| messages = [HumanMessage(content=question)] | |
| messages = react_graph.invoke( | |
| {"messages": messages, "input_file": input_file}, | |
| config={"recursion_limit": 100}, | |
| ) | |
| final_message = messages["messages"][-1] | |
| print(f"Final message from react graph: {final_message.content[:100]}...") | |
| # Extract the final answer from the message content | |
| final_answer_prefix = "FINAL ANSWER:" | |
| if final_answer_prefix in final_message.content: | |
| final_answer = final_message.content.split(final_answer_prefix)[-1].strip() | |
| print(f"Extracted final answer: {final_answer}") | |
| return final_answer | |
| else: | |
| print( | |
| f"Warning: 'FINAL ANSWER:' prefix not found in react graph output. Returning full message content as answer." | |
| ) | |
| return final_message.content.strip() | |
| def run_and_submit_all(profile: gr.OAuthProfile | None): | |
| """ | |
| Fetches all questions, runs the BasicAgent on them, submits all answers, | |
| and displays the results. | |
| """ | |
| # --- Determine HF Space Runtime URL and Repo URL --- | |
| space_id = os.getenv("SPACE_ID") # Get the SPACE_ID for sending link to the code | |
| if profile: | |
| username = f"{profile.username}" | |
| print(f"User logged in: {username}") | |
| else: | |
| print("User not logged in.") | |
| return "Please Login to Hugging Face with the button.", None | |
| api_url = DEFAULT_API_URL | |
| questions_url = f"{api_url}/questions" | |
| submit_url = f"{api_url}/submit" | |
| # 1. Instantiate Agent ( modify this part to create your agent) | |
| # In the case of an app running as a hugging Face space, this link points toward your codebase ( usefull for others so please keep it public) | |
| agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main" | |
| print(agent_code) | |
| # 2. Load Questions (cache first, API fallback) | |
| cache_path = Path(__file__).resolve().parent / "cached_questions.json" | |
| questions_data = None | |
| # 2.a Try cache first | |
| if cache_path.exists(): | |
| try: | |
| with open(cache_path, "r", encoding="utf-8") as f: | |
| cached = json.load(f) | |
| if isinstance(cached, list) and cached: | |
| questions_data = cached | |
| print( | |
| f"Loaded {len(questions_data)} questions from cache: {cache_path}" | |
| ) | |
| else: | |
| print(f"Cache file found but empty/invalid format: {cache_path}") | |
| except json.JSONDecodeError as e: | |
| print(f"Cache JSON is invalid ({cache_path}): {e}. Falling back to API.") | |
| except OSError as e: | |
| print( | |
| f"Could not read cache file ({cache_path}): {e}. Falling back to API." | |
| ) | |
| # 2.b Fetch from API only if cache missing/invalid/empty | |
| if questions_data is None: | |
| print(f"Fetching questions from: {questions_url}") | |
| try: | |
| response = requests.get(questions_url, timeout=15) | |
| response.raise_for_status() | |
| questions_data = response.json() | |
| if not isinstance(questions_data, list) or not questions_data: | |
| print("Fetched questions list is empty or invalid format.") | |
| return "Fetched questions list is empty or invalid format.", None | |
| print(f"Fetched {len(questions_data)} questions from API.") | |
| # Save cache for next runs | |
| try: | |
| with open(cache_path, "w", encoding="utf-8") as f: | |
| json.dump(questions_data, f, ensure_ascii=False, indent=2) | |
| print(f"Questions cached to: {cache_path}") | |
| except OSError as e: | |
| print(f"Warning: unable to write cache file ({cache_path}): {e}") | |
| except requests.exceptions.RequestException as e: | |
| print(f"Error fetching questions: {e}") | |
| return f"Error fetching questions: {e}", None | |
| except requests.exceptions.JSONDecodeError as e: | |
| print(f"Error decoding JSON response from questions endpoint: {e}") | |
| print(f"Response text: {response.text[:500]}") | |
| return f"Error decoding server response for questions: {e}", None | |
| except Exception as e: | |
| print(f"An unexpected error occurred fetching questions: {e}") | |
| return f"An unexpected error occurred fetching questions: {e}", None | |
| # 2.c Retrieve the data files provided for the test ( in the case of the test on Hugging Face, the files are in data/2023_level1/validation/) | |
| base_dir = Path(__file__).resolve().parent | |
| ok, error_message = ensure_validation_data(base_dir) | |
| if not ok: | |
| return error_message, None | |
| # 3. Run your Agent (answers cache by task_id) | |
| results_log = [] | |
| answers_payload = [] | |
| answers_cache_path = Path(__file__).resolve().parent / "cached_answers.json" | |
| answers_cache = {} | |
| # 3.a Load answers cache | |
| if answers_cache_path.exists(): | |
| try: | |
| with open(answers_cache_path, "r", encoding="utf-8") as f: | |
| loaded_cache = json.load(f) | |
| if isinstance(loaded_cache, dict): | |
| answers_cache = loaded_cache | |
| print( | |
| f"Loaded {len(answers_cache)} cached answers from: {answers_cache_path}" | |
| ) | |
| else: | |
| print( | |
| f"Answers cache has invalid format (expected object): {answers_cache_path}" | |
| ) | |
| except json.JSONDecodeError as e: | |
| print( | |
| f"Answers cache JSON is invalid ({answers_cache_path}): {e}. Starting with empty cache." | |
| ) | |
| except OSError as e: | |
| print( | |
| f"Could not read answers cache ({answers_cache_path}): {e}. Starting with empty cache." | |
| ) | |
| cache_updated = False | |
| print(f"Running agent on {len(questions_data)} questions...") | |
| for item in questions_data: | |
| task_id = item.get("task_id") | |
| # task_id = "a1e91b78-d3d8-4675-bb8d-62741b4b68a6" # TEMPORARY HARDCODED TASK_ID FOR TESTING | |
| question_text = item.get("question") | |
| if not task_id or question_text is None: | |
| print(f"Skipping item with missing task_id or question: {item}") | |
| continue | |
| task_key = str(task_id) | |
| # Use cached answer if available | |
| if task_key in answers_cache: | |
| submitted_answer = answers_cache[task_key] | |
| print(f"Using cached answer for task_id={task_id}") | |
| else: | |
| try: | |
| submitted_answer = _invoke_react_graph(task_key) | |
| answers_cache[task_key] = submitted_answer | |
| cache_updated = True | |
| print(f"Computed and cached answer for task_id={task_id}") | |
| except Exception as e: | |
| print(f"Error running agent on task {task_id}: {e}") | |
| results_log.append( | |
| { | |
| "Task ID": task_id, | |
| "Question": question_text, | |
| "Submitted Answer": f"AGENT ERROR: {e}", | |
| } | |
| ) | |
| continue | |
| answers_payload.append( | |
| {"task_id": task_id, "submitted_answer": submitted_answer} | |
| ) | |
| results_log.append( | |
| { | |
| "Task ID": task_id, | |
| "Question": question_text, | |
| "Submitted Answer": submitted_answer, | |
| } | |
| ) | |
| # 3.b Save answers cache only if updated | |
| if cache_updated: | |
| try: | |
| with open(answers_cache_path, "w", encoding="utf-8") as f: | |
| json.dump(answers_cache, f, ensure_ascii=False, indent=2) | |
| print(f"Answers cache updated: {answers_cache_path}") | |
| except OSError as e: | |
| print(f"Warning: unable to write answers cache ({answers_cache_path}): {e}") | |
| if not answers_payload: | |
| print("Agent did not produce any answers to submit.") | |
| return "Agent did not produce any answers to submit.", pd.DataFrame(results_log) | |
| # 4. Prepare Submission | |
| submission_data = { | |
| "username": username.strip(), | |
| "agent_code": agent_code, | |
| "answers": answers_payload, | |
| } | |
| status_update = f"Agent finished. Submitting {len(answers_payload)} answers for user '{username}'..." | |
| print(status_update) | |
| # 5. Submit | |
| print(f"Submitting {len(answers_payload)} answers to: {submit_url}") | |
| try: | |
| response = requests.post(submit_url, json=submission_data, timeout=60) | |
| response.raise_for_status() | |
| result_data = response.json() | |
| final_status = ( | |
| f"Submission Successful!\n" | |
| f"User: {result_data.get('username')}\n" | |
| f"Overall Score: {result_data.get('score', 'N/A')}% " | |
| f"({result_data.get('correct_count', '?')}/{result_data.get('total_attempted', '?')} correct)\n" | |
| f"Message: {result_data.get('message', 'No message received.')}" | |
| ) | |
| print("Submission successful.") | |
| results_df = pd.DataFrame(results_log) | |
| return final_status, results_df | |
| except requests.exceptions.HTTPError as e: | |
| error_detail = f"Server responded with status {e.response.status_code}." | |
| try: | |
| error_json = e.response.json() | |
| error_detail += f" Detail: {error_json.get('detail', e.response.text)}" | |
| except requests.exceptions.JSONDecodeError: | |
| error_detail += f" Response: {e.response.text[:500]}" | |
| status_message = f"Submission Failed: {error_detail}" | |
| print(status_message) | |
| results_df = pd.DataFrame(results_log) | |
| return status_message, results_df | |
| except requests.exceptions.Timeout: | |
| status_message = "Submission Failed: The request timed out." | |
| print(status_message) | |
| results_df = pd.DataFrame(results_log) | |
| return status_message, results_df | |
| except requests.exceptions.RequestException as e: | |
| status_message = f"Submission Failed: Network error - {e}" | |
| print(status_message) | |
| results_df = pd.DataFrame(results_log) | |
| return status_message, results_df | |
| except Exception as e: | |
| status_message = f"An unexpected error occurred during submission: {e}" | |
| print(status_message) | |
| results_df = pd.DataFrame(results_log) | |
| return status_message, results_df | |
| # --- Build Gradio Interface using Blocks --- | |
| with gr.Blocks() as demo: | |
| gr.Markdown("# Basic Agent Evaluation Runner") | |
| gr.Markdown( | |
| """ | |
| **Instructions:** | |
| 1. Please clone this space, then modify the code to define your agent's logic, the tools, the necessary packages, etc ... | |
| 2. Log in to your Hugging Face account using the button below. This uses your HF username for submission. | |
| 3. Click 'Run Evaluation & Submit All Answers' to fetch questions, run your agent, submit answers, and see the score. | |
| --- | |
| **Disclaimers:** | |
| Once clicking on the "submit button, it can take quite some time ( this is the time for the agent to go through all the questions). | |
| This space provides a basic setup and is intentionally sub-optimal to encourage you to develop your own, more robust solution. For instance for the delay process of the submit button, a solution could be to cache the answers and submit in a seperate action or even to answer the questions in async. | |
| """ | |
| ) | |
| gr.LoginButton() | |
| run_button = gr.Button("Run Evaluation & Submit All Answers") | |
| status_output = gr.Textbox( | |
| label="Run Status / Submission Result", lines=5, interactive=False | |
| ) | |
| # Removed max_rows=10 from DataFrame constructor | |
| results_table = gr.DataFrame(label="Questions and Agent Answers", wrap=True) | |
| run_button.click(fn=run_and_submit_all, outputs=[status_output, results_table]) | |
| if __name__ == "__main__": | |
| print("\n" + "-" * 30 + " App Starting " + "-" * 30) | |
| # Check for SPACE_HOST and SPACE_ID at startup for information | |
| space_host_startup = os.getenv("SPACE_HOST") | |
| space_id_startup = os.getenv("SPACE_ID") # Get SPACE_ID at startup | |
| if space_host_startup: | |
| print(f"✅ SPACE_HOST found: {space_host_startup}") | |
| print(f" Runtime URL should be: https://{space_host_startup}") | |
| else: | |
| print("ℹ️ SPACE_HOST environment variable not found (running locally?).") | |
| if space_id_startup: # Print repo URLs if SPACE_ID is found | |
| print(f"✅ SPACE_ID found: {space_id_startup}") | |
| print(f" Repo URL: https://huggingface.co/spaces/{space_id_startup}") | |
| print( | |
| f" Repo Tree URL: https://huggingface.co/spaces/{space_id_startup}/tree/main" | |
| ) | |
| else: | |
| print( | |
| "ℹ️ SPACE_ID environment variable not found (running locally?). Repo URL cannot be determined." | |
| ) | |
| print("-" * (60 + len(" App Starting ")) + "\n") | |
| print("Launching Gradio Interface for Basic Agent Evaluation...") | |
| demo.launch(debug=True, share=False, ssr_mode=False) | |