| |
| |
|
|
| import gradio as gr |
| import wikipedia |
| from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline |
| import torch |
|
|
| |
| def search_wikipedia(query): |
| try: |
| page_titles = wikipedia.search(query, results=2) |
| summaries = [wikipedia.summary(title, sentences=3) for title in page_titles] |
| return "\n\n".join(summaries) |
| except Exception as e: |
| return f"Wikipedia search error: {str(e)}" |
|
|
| |
| model_name = "google/flan-t5-large" |
| tokenizer = AutoTokenizer.from_pretrained(model_name) |
| model = AutoModelForSeq2SeqLM.from_pretrained(model_name) |
|
|
| llm_pipeline = pipeline("text2text-generation", model=model, tokenizer=tokenizer, framework="pt") |
|
|
| |
| def verify_claim(claim): |
| context = search_wikipedia(claim) |
| if "error" in context.lower() or context.strip() == "": |
| return "Could not retrieve relevant information. Please try a different claim." |
|
|
| prompt = f"Claim: {claim}\n\nContext: {context}\n\nIs this claim true or false? Explain." |
| response = llm_pipeline(prompt, max_length=512, do_sample=False)[0]['generated_text'] |
| return response |
|
|
| |
| demo = gr.Interface( |
| fn=verify_claim, |
| inputs=gr.Textbox(label="Enter a historical claim", placeholder="e.g., Alexander the Great died in 1971."), |
| outputs=gr.Textbox(label="Claim Verification Output"), |
| title="Historical Claim Verifier (RAG-Based)", |
| description="Uses Wikipedia + a free LLM to verify if a historical claim is true or false, and explains why." |
| ) |
|
|
| |
| demo.launch() |
|
|