File size: 3,817 Bytes
84a3a22 51d6ddc d5b5387 365332a 51d6ddc 84a3a22 51d6ddc 84a3a22 51d6ddc 4c66828 cb1b510 4c66828 cb1b510 51d6ddc 4c66828 51d6ddc 4c66828 cb1b510 51d6ddc fcfb979 51d6ddc fcfb979 51d6ddc 1a0f30c 51d6ddc 4c66828 84a3a22 1a0f30c 365332a 1a0f30c 143144e ecaf567 b84fbac 1a0f30c 84a3a22 1a0f30c 84a3a22 fcfb979 1a0f30c fcfb979 84a3a22 365332a 1a0f30c 365332a | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 | import gradio as gr
import requests
import time
# --- CONFIGURATION ---
DROPLET_IP = "134.199.195.151"
API_URL = "https://djrbe-134-199-192-140.run.pinggy-free.link/translate"
# Your exact Lablab URL
LABLAB_URL = "https://lablab.ai/ai-hackathons/amd-developer/amd-ghost-coder-cuda-to-rocm-ai-migration/ghost-coder-autonomous-cuda-to-hip-agent"
# --- LOGIC ---
def ghost_translate(cuda_code):
yield "👻 Ghost-Coder: Analyzing CUDA Kernel...", "Loading..."
try:
split_marker = "// --- GHOST-CODER HIP OUTPUT ---"
formatted_prompt = f"""Task: Translate the following CUDA code to AMD HIP.
Output ONLY valid C++ code. Do not include markdown blocks or explanations.
// --- ORIGINAL CUDA CODE ---
{cuda_code}
{split_marker}
"""
response = requests.post(API_URL, json={"code": formatted_prompt}, timeout=120)
if response.status_code == 200:
raw_response = response.json().get("hip_code", "")
if split_marker in raw_response:
hip_code = raw_response.split(split_marker)[-1].strip()
else:
hip_code = raw_response.strip()
yield "🔄 Analyzing HIP logic on ROCm stack...", "Generating..."
time.sleep(1)
yield "🛠️ Verifying syntax and memory offsets...", "Verifying..."
time.sleep(1)
yield "✅ Self-Healing successful! HIP Code generated.", hip_code
else:
yield f"❌ Droplet Error: {response.status_code}", "// Check bridge logs on MI300X"
except Exception as e:
yield f"❌ Connection Error: Ensure bridge is running", str(e)
# --- UI DESIGN ---
# Removed 'css' from here to fix the UserWarning
with gr.Blocks() as demo:
with gr.Row():
gr.HTML(f"""
<div style="background-color: #fff3cd; color: #856404; padding: 20px; border-radius: 8px; border: 1px solid #ffeeba; width: 100%; text-align: left; font-family: sans-serif;">
<span style="font-size: 1.2em;">⚠️Demo Status: GPU Backend Paused</span><br>
<p style="margin-top: 5px; color: #856404;">The AMD MI300X instance for this live demo has been paused following the conclusion of the hackathon credit period.</p>
<a href="https://lablab.ai/ai-hackathons/amd-developer/amd-ghost-coder-cuda-to-rocm-ai-migration/ghost-coder-autonomous-cuda-to-hip-agent" target="_blank" style="display: inline-block; background-color: #856404; color: white; padding: 10px 20px; text-decoration: none; border-radius: 5px; font-weight: bold; margin-top: 3px;">View Full Video Demo & Technical Pitch</a>
</div>
""")
gr.Markdown("# 👻 Ghost-Coder: Autonomous CUDA-to-HIP Agent")
gr.Markdown("### Optimized for AMD Instinct™ MI300X | Qwen2.5-Coder-32B")
with gr.Row():
with gr.Column():
input_code = gr.Code(
label="Paste CUDA Code Here",
language="cpp",
lines=15,
value="// Example CUDA Host Code\nvoid runKernel() {\n cudaMalloc(&d_A, size);\n kernel<<<256, 256>>>(d_A);\n}"
)
run_btn = gr.Button("Translate & Verify", variant="primary")
with gr.Column():
output_code = gr.Code(label="Generated HIP Code", language="cpp", lines=15)
logs = gr.Textbox(label="Agent Status & Self-Healing Logs", interactive=False)
run_btn.click(ghost_translate, inputs=[input_code], outputs=[logs, output_code])
# --- LAUNCH CONFIG ---
if __name__ == "__main__":
# Moved css here and removed show_api=False to fix the TypeError
demo.queue().launch(
css=".banner { margin-bottom: 20px; }",
theme=gr.themes.Soft()
) |