Spaces:
Running
Running
Sync from GitHub via hub-sync
Browse files- VERSION +1 -1
- main.py +53 -21
- requirements.txt +1 -0
VERSION
CHANGED
|
@@ -1 +1 @@
|
|
| 1 |
-
|
|
|
|
| 1 |
+
cc68171513c91148b7ac59a8fd45a07f5d01798d
|
main.py
CHANGED
|
@@ -3,6 +3,7 @@ import os
|
|
| 3 |
from pathlib import Path
|
| 4 |
|
| 5 |
import gradio as gr
|
|
|
|
| 6 |
|
| 7 |
|
| 8 |
def _build_label() -> str:
|
|
@@ -24,36 +25,67 @@ def _build_label() -> str:
|
|
| 24 |
return f"Version: {version} | Commit: {short_commit} | Loaded: {deployed_at}"
|
| 25 |
|
| 26 |
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
return "Type something to test your CI/CD sync."
|
| 31 |
|
| 32 |
-
now = dt.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S UTC")
|
| 33 |
-
return (
|
| 34 |
-
f"You typed: {clean}\n"
|
| 35 |
-
f"Length: {len(clean)} characters\n"
|
| 36 |
-
f"Built from GitHub sync demo\n"
|
| 37 |
-
f"Timestamp: {now}"
|
| 38 |
-
)
|
| 39 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 40 |
|
| 41 |
-
|
| 42 |
-
|
|
|
|
| 43 |
gr.Markdown(f"**{_build_label()}**")
|
| 44 |
gr.Markdown(
|
| 45 |
-
"
|
| 46 |
)
|
| 47 |
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
|
| 51 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 52 |
)
|
| 53 |
-
output = gr.Textbox(label="Result", lines=6)
|
| 54 |
-
run_btn = gr.Button("Run")
|
| 55 |
|
| 56 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 57 |
|
| 58 |
|
| 59 |
if __name__ == "__main__":
|
|
|
|
| 3 |
from pathlib import Path
|
| 4 |
|
| 5 |
import gradio as gr
|
| 6 |
+
from huggingface_hub import InferenceClient
|
| 7 |
|
| 8 |
|
| 9 |
def _build_label() -> str:
|
|
|
|
| 25 |
return f"Version: {version} | Commit: {short_commit} | Loaded: {deployed_at}"
|
| 26 |
|
| 27 |
|
| 28 |
+
# Initialize HF Inference Client with Z.ai provider (via HF_TOKEN)
|
| 29 |
+
client = InferenceClient()
|
| 30 |
+
MODEL = "zai-org/GLM-5.1"
|
|
|
|
| 31 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 32 |
|
| 33 |
+
def chat_response(message: str, history: list) -> str:
|
| 34 |
+
"""
|
| 35 |
+
Send a message to the GLM-5.1 model and get a response.
|
| 36 |
+
history is a list of [user_msg, assistant_msg] pairs.
|
| 37 |
+
"""
|
| 38 |
+
if not message or not message.strip():
|
| 39 |
+
return "Please enter a message."
|
| 40 |
+
|
| 41 |
+
try:
|
| 42 |
+
# Convert Gradio chat history format to messages for the API
|
| 43 |
+
messages = []
|
| 44 |
+
for user_msg, assistant_msg in history:
|
| 45 |
+
messages.append({"role": "user", "content": user_msg})
|
| 46 |
+
if assistant_msg:
|
| 47 |
+
messages.append({"role": "assistant", "content": assistant_msg})
|
| 48 |
+
|
| 49 |
+
# Add current user message
|
| 50 |
+
messages.append({"role": "user", "content": message})
|
| 51 |
+
|
| 52 |
+
# Call the model via HF Inference API
|
| 53 |
+
response = client.chat_completion(
|
| 54 |
+
model=MODEL,
|
| 55 |
+
messages=messages,
|
| 56 |
+
max_tokens=512,
|
| 57 |
+
)
|
| 58 |
+
|
| 59 |
+
return response.choices[0].message.content
|
| 60 |
+
except Exception as e:
|
| 61 |
+
return f"Error: {str(e)}"
|
| 62 |
|
| 63 |
+
|
| 64 |
+
with gr.Blocks(title="GitHub + HuggingFace + AI Chat Demo") as demo:
|
| 65 |
+
gr.Markdown("# GitHub → HuggingFace → AI Chat")
|
| 66 |
gr.Markdown(f"**{_build_label()}**")
|
| 67 |
gr.Markdown(
|
| 68 |
+
f"Powered by **{MODEL}** via Z.ai on HuggingFace. Push to GitHub, auto-syncs here."
|
| 69 |
)
|
| 70 |
|
| 71 |
+
gr.ChatInterface(
|
| 72 |
+
chat_response,
|
| 73 |
+
examples=[
|
| 74 |
+
"What is the capital of France?",
|
| 75 |
+
"Explain quantum computing in simple terms.",
|
| 76 |
+
"Write a short poem about the moon.",
|
| 77 |
+
],
|
| 78 |
+
title=None,
|
| 79 |
+
description="Ask me anything!",
|
| 80 |
)
|
|
|
|
|
|
|
| 81 |
|
| 82 |
+
|
| 83 |
+
if __name__ == "__main__":
|
| 84 |
+
# server_name="0.0.0.0" is required inside HF Space containers.
|
| 85 |
+
# root_path ensures Gradio resolves JS/CSS assets correctly when running
|
| 86 |
+
# behind a reverse proxy or custom domain.
|
| 87 |
+
_root_path = os.getenv("GRADIO_ROOT_PATH", "").rstrip("/")
|
| 88 |
+
demo.launch(server_name="0.0.0.0", root_path=_root_path)
|
| 89 |
|
| 90 |
|
| 91 |
if __name__ == "__main__":
|
requirements.txt
CHANGED
|
@@ -1 +1,2 @@
|
|
| 1 |
gradio>=5.49.1
|
|
|
|
|
|
| 1 |
gradio>=5.49.1
|
| 2 |
+
huggingface-hub>=0.24.0
|