Spaces:
Sleeping
Sleeping
Create app.py
Browse files
app.py
ADDED
|
@@ -0,0 +1,97 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
from fastapi import FastAPI
|
| 3 |
+
from pydantic import BaseModel
|
| 4 |
+
import requests
|
| 5 |
+
import uvicorn
|
| 6 |
+
import os
|
| 7 |
+
|
| 8 |
+
# ==========================================
|
| 9 |
+
# KONFIGURATION FÜR HUGGING FACE
|
| 10 |
+
# ==========================================
|
| 11 |
+
# Hugging Face Spaces nutzt standardmäßig Port 7860.
|
| 12 |
+
# Wir lesen den Port dynamisch aus den Umgebungsvariablen (Fallback auf 7860).
|
| 13 |
+
PORT = int(os.environ.get("PORT", 7860))
|
| 14 |
+
|
| 15 |
+
# ==========================================
|
| 16 |
+
# 1. FASTAPI SETUP (Das Backend)
|
| 17 |
+
# ==========================================
|
| 18 |
+
|
| 19 |
+
app = FastAPI(
|
| 20 |
+
title="HF Space: Gradio & FastAPI Template",
|
| 21 |
+
description="Ein Template für Hugging Face, das Gradio und FastAPI kombiniert."
|
| 22 |
+
)
|
| 23 |
+
|
| 24 |
+
class InputData(BaseModel):
|
| 25 |
+
text: str
|
| 26 |
+
|
| 27 |
+
class OutputData(BaseModel):
|
| 28 |
+
result: str
|
| 29 |
+
length: int
|
| 30 |
+
|
| 31 |
+
@app.post("/api/process", response_model=OutputData)
|
| 32 |
+
def process_text(data: InputData):
|
| 33 |
+
"""
|
| 34 |
+
Der FastAPI-Endpunkt. Hier läuft deine eigentliche Logik (z.B. KI-Modelle).
|
| 35 |
+
"""
|
| 36 |
+
processed_text = f"Hallo vom HF-Backend! Du hast gesendet: '{data.text}'"
|
| 37 |
+
text_length = len(data.text)
|
| 38 |
+
return OutputData(result=processed_text, length=text_length)
|
| 39 |
+
|
| 40 |
+
# ==========================================
|
| 41 |
+
# 2. GRADIO FUNKTIONEN (Die API-Aufrufe)
|
| 42 |
+
# ==========================================
|
| 43 |
+
|
| 44 |
+
def call_fastapi(user_input: str) -> str:
|
| 45 |
+
"""
|
| 46 |
+
Gradio ruft das eigene FastAPI-Backend über HTTP auf.
|
| 47 |
+
Da beides im selben Hugging Face Container läuft, nutzen wir localhost (127.0.0.1)
|
| 48 |
+
und den dynamisch ermittelten Port.
|
| 49 |
+
"""
|
| 50 |
+
api_url = f"http://127.0.0.1:{PORT}/api/process"
|
| 51 |
+
payload = {"text": user_input}
|
| 52 |
+
|
| 53 |
+
try:
|
| 54 |
+
response = requests.post(api_url, json=payload, timeout=10)
|
| 55 |
+
response.raise_for_status()
|
| 56 |
+
data = response.json()
|
| 57 |
+
return f"Ergebnis: {data['result']}\nZeichenanzahl: {data['length']}"
|
| 58 |
+
|
| 59 |
+
except requests.exceptions.RequestException as e:
|
| 60 |
+
return f"Fehler beim API-Aufruf auf Port {PORT}: {str(e)}"
|
| 61 |
+
|
| 62 |
+
# ==========================================
|
| 63 |
+
# 3. GRADIO UI SETUP (Das Frontend)
|
| 64 |
+
# ==========================================
|
| 65 |
+
|
| 66 |
+
with gr.Blocks(title="Mein HF FastAPI Client", theme=gr.themes.Soft()) as gradio_app:
|
| 67 |
+
gr.Markdown("# 🚀 Hugging Face: Gradio + FastAPI Template")
|
| 68 |
+
gr.Markdown("Diese UI sendet Anfragen an das integrierte FastAPI-Backend innerhalb des HF Spaces.")
|
| 69 |
+
|
| 70 |
+
with gr.Row():
|
| 71 |
+
with gr.Column():
|
| 72 |
+
input_text = gr.Textbox(label="Deine Eingabe", placeholder="Schreibe hier etwas rein...", lines=3)
|
| 73 |
+
submit_btn = gr.Button("An FastAPI senden", variant="primary")
|
| 74 |
+
|
| 75 |
+
with gr.Column():
|
| 76 |
+
output_text = gr.Textbox(label="Antwort von FastAPI", interactive=False, lines=3)
|
| 77 |
+
|
| 78 |
+
submit_btn.click(
|
| 79 |
+
fn=call_fastapi,
|
| 80 |
+
inputs=input_text,
|
| 81 |
+
outputs=output_text
|
| 82 |
+
)
|
| 83 |
+
|
| 84 |
+
# ==========================================
|
| 85 |
+
# 4. APP MOUNTING & SERVER START
|
| 86 |
+
# ==========================================
|
| 87 |
+
|
| 88 |
+
# Wir mounten Gradio in FastAPI. Hugging Face sieht die Variable `app`
|
| 89 |
+
# und startet sie automatisch richtig.
|
| 90 |
+
app = gr.mount_gradio_app(app, gradio_app, path="/")
|
| 91 |
+
|
| 92 |
+
if __name__ == "__main__":
|
| 93 |
+
# Dieser Block wird nur ausgeführt, wenn du das Skript LOKAL auf deinem PC testest.
|
| 94 |
+
# Auf Hugging Face übernimmt das System den Start.
|
| 95 |
+
# WICHTIG: host="0.0.0.0" erlaubt externe Zugriffe (nötig für Docker/HF Spaces).
|
| 96 |
+
print(f"Starte Server lokal... Öffne http://127.0.0.1:{PORT} in deinem Browser.")
|
| 97 |
+
uvicorn.run(app, host="0.0.0.0", port=PORT)
|