Final_Assignment_Template / gradio_functions.py
ArseniyPerchik's picture
Clean state
45b200f
raw
history blame
1.86 kB
from globals import *
from langgraph_agent import *
from PIL import Image
import io
train_dataset = load_dataset("gaia-benchmark/GAIA", '2023_level1', split="validation")
builder, builder_name = workflow_tools()
alfred = builder.compile()
def toggle_textbox(show):
return gr.update(visible=not show)
def slider_release_func(q_num: int):
item = train_dataset[q_num]
tr_metadata = ''
for k, v in item['Annotator Metadata'].items():
tr_metadata += f'{k}: \n{v} \n---\n'
return item['Question'], item['Final answer'], item['file_name'], tr_metadata.strip(), item['Annotator Metadata']['Tools']
def process_output(response):
s = response['messages'][-1].content
pattern = "FINAL ANSWER: "
index = s.find(pattern)
if index != -1:
result = s[index + len(pattern):].lstrip()
return s, result
return s, f'WITH ERROR: {s}'
def get_agent_answer_train(q_num: int):
item = train_dataset[q_num]
return ask_alfred(question=item['Question'], file_name=item['file_name'])
# # q_content = item['Question']
# response = alfred.invoke({
# 'messages': [HumanMessage(content=item['Question'])],
# 'file_name': item['file_name'],
# 'final_output_is_good': False,
# })
# # response = alfred.invoke({'messages': [HumanMessage(content=q_content)]})
# return process_output(response)
def ask_alfred(question: str, file_name: str):
response = alfred.invoke({
'messages': [HumanMessage(content=question)],
'file_name': file_name,
'final_output_is_good': False,
})
# response = alfred.invoke({'messages': [HumanMessage(content=q_content)]})
return process_output(response)
def show_langgraph_structure():
png_bytes = alfred.get_graph().draw_mermaid_png()
image = Image.open(io.BytesIO(png_bytes))
return image