Ghost-Coder / app.py
muhammadtlha944's picture
Update app.py
b84fbac verified
raw
history blame
3.82 kB
import gradio as gr
import requests
import time
# --- CONFIGURATION ---
DROPLET_IP = "134.199.195.151"
API_URL = "https://djrbe-134-199-192-140.run.pinggy-free.link/translate"
# Your exact Lablab URL
LABLAB_URL = "https://lablab.ai/ai-hackathons/amd-developer/amd-ghost-coder-cuda-to-rocm-ai-migration/ghost-coder-autonomous-cuda-to-hip-agent"
# --- LOGIC ---
def ghost_translate(cuda_code):
yield "👻 Ghost-Coder: Analyzing CUDA Kernel...", "Loading..."
try:
split_marker = "// --- GHOST-CODER HIP OUTPUT ---"
formatted_prompt = f"""Task: Translate the following CUDA code to AMD HIP.
Output ONLY valid C++ code. Do not include markdown blocks or explanations.
// --- ORIGINAL CUDA CODE ---
{cuda_code}
{split_marker}
"""
response = requests.post(API_URL, json={"code": formatted_prompt}, timeout=120)
if response.status_code == 200:
raw_response = response.json().get("hip_code", "")
if split_marker in raw_response:
hip_code = raw_response.split(split_marker)[-1].strip()
else:
hip_code = raw_response.strip()
yield "🔄 Analyzing HIP logic on ROCm stack...", "Generating..."
time.sleep(1)
yield "🛠️ Verifying syntax and memory offsets...", "Verifying..."
time.sleep(1)
yield "✅ Self-Healing successful! HIP Code generated.", hip_code
else:
yield f"❌ Droplet Error: {response.status_code}", "// Check bridge logs on MI300X"
except Exception as e:
yield f"❌ Connection Error: Ensure bridge is running", str(e)
# --- UI DESIGN ---
# Removed 'css' from here to fix the UserWarning
with gr.Blocks() as demo:
with gr.Row():
gr.HTML(f"""
<div style="background-color: #fff3cd; color: #856404; padding: 20px; border-radius: 8px; border: 1px solid #ffeeba; width: 100%; text-align: left; font-family: sans-serif;">
<span style="font-size: 1.2em;">⚠️Demo Status: GPU Backend Paused</span><br>
<p style="margin-top: 5px; color: #856404;">The AMD MI300X instance for this live demo has been paused following the conclusion of the hackathon credit period.</p>
<a href="https://lablab.ai/ai-hackathons/amd-developer/amd-ghost-coder-cuda-to-rocm-ai-migration/ghost-coder-autonomous-cuda-to-hip-agent" target="_blank" style="display: inline-block; background-color: #856404; color: white; padding: 10px 20px; text-decoration: none; border-radius: 5px; font-weight: bold; margin-top: 3px;">View Full Video Demo & Technical Pitch</a>
</div>
""")
gr.Markdown("# 👻 Ghost-Coder: Autonomous CUDA-to-HIP Agent")
gr.Markdown("### Optimized for AMD Instinct™ MI300X | Qwen2.5-Coder-32B")
with gr.Row():
with gr.Column():
input_code = gr.Code(
label="Paste CUDA Code Here",
language="cpp",
lines=15,
value="// Example CUDA Host Code\nvoid runKernel() {\n cudaMalloc(&d_A, size);\n kernel<<<256, 256>>>(d_A);\n}"
)
run_btn = gr.Button("Translate & Verify", variant="primary")
with gr.Column():
output_code = gr.Code(label="Generated HIP Code", language="cpp", lines=15)
logs = gr.Textbox(label="Agent Status & Self-Healing Logs", interactive=False)
run_btn.click(ghost_translate, inputs=[input_code], outputs=[logs, output_code])
# --- LAUNCH CONFIG ---
if __name__ == "__main__":
# Moved css here and removed show_api=False to fix the TypeError
demo.queue().launch(
css=".banner { margin-bottom: 20px; }",
theme=gr.themes.Soft()
)