| import gradio as gr |
| import requests |
| import os |
|
|
| |
| HF_API_TOKEN = os.getenv("HF_API_TOKEN") |
|
|
| |
| MODEL_ENDPOINTS = { |
| "Hindi": "https://api-inference.huggingface.co/models/LingoIITGN/mBERT_toxic_hindi", |
| "Telugu": "https://api-inference.huggingface.co/models/LingoIITGN/mBERT_toxic_telugu" |
| } |
|
|
| |
| def get_toxicity_prediction(text, language): |
| if language not in MODEL_ENDPOINTS: |
| return "Error: Model not found for the selected language" |
|
|
| url = MODEL_ENDPOINTS[language] |
| headers = { |
| "Authorization": f"Bearer {HF_API_TOKEN}", |
| "Content-Type": "application/json" |
| } |
|
|
| payload = { |
| "inputs": text, |
| "parameters": { |
| "return_all_scores": True |
| }, |
| "options": { |
| "wait_for_model": True |
| } |
| } |
|
|
| response = requests.post(url, headers=headers, json=payload) |
|
|
| if response.status_code == 200: |
| predictions = response.json()[0] |
| |
| |
| toxicity_score = None |
| for pred in predictions: |
| if pred["label"] == "toxic": |
| toxicity_score = pred["score"] * 100 |
| break |
|
|
| if toxicity_score is not None: |
| return f"Toxicity Score: {toxicity_score:.2f}%\nClassification: Toxic" |
| else: |
| return "Classification: Non-Toxic" |
| else: |
| return f"Error: {response.text}" |
|
|
| |
| with gr.Blocks() as app: |
| gr.Markdown("# 🛡️ ToxiGuard - Hindi & Telugu Toxicity Detection") |
| |
| text_input = gr.Textbox(label="Enter your text") |
| language_dropdown = gr.Dropdown(choices=["Hindi", "Telugu"], label="Select Language", value="Hindi") |
| |
| submit_button = gr.Button("Check Toxicity") |
| output_text = gr.Textbox(label="Result") |
|
|
| submit_button.click(fn=get_toxicity_prediction, inputs=[text_input, language_dropdown], outputs=output_text) |
|
|
| |
| app.launch(server_name="0.0.0.0", server_port=7860) |
|
|