File size: 3,716 Bytes
84a3a22 51d6ddc 1a0f30c 51d6ddc d5b5387 1a0f30c 51d6ddc 84a3a22 51d6ddc 84a3a22 51d6ddc 1a0f30c 4c66828 cb1b510 4c66828 cb1b510 51d6ddc 4c66828 51d6ddc 1a0f30c 4c66828 cb1b510 1a0f30c 51d6ddc fcfb979 51d6ddc fcfb979 51d6ddc 1a0f30c 51d6ddc 4c66828 84a3a22 1a0f30c 84a3a22 1a0f30c 84a3a22 fcfb979 1a0f30c fcfb979 84a3a22 1a0f30c 84a3a22 1a0f30c | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 | import gradio as gr
import requests
import time
# --- CONFIGURATION ---
# Replace with your actual URLs
DROPLET_IP = "134.199.195.151"
API_URL = "https://djrbe-134-199-192-140.run.pinggy-free.link/translate"
LABLAB_URL = "https://lablab.ai/event/amd-ai-hackathon/ghost-coder" # Update this link!
# --- LOGIC ---
def ghost_translate(cuda_code):
yield "👻 Ghost-Coder: Analyzing CUDA Kernel...", "Loading..."
try:
# Standard marker for backend slicing
split_marker = "// --- GHOST-CODER HIP OUTPUT ---"
formatted_prompt = f"""Task: Translate the following CUDA code to AMD HIP.
Output ONLY valid C++ code. Do not include markdown blocks or explanations.
// --- ORIGINAL CUDA CODE ---
{cuda_code}
{split_marker}
"""
response = requests.post(API_URL, json={"code": formatted_prompt}, timeout=120)
if response.status_code == 200:
raw_response = response.json().get("hip_code", "")
# Hard Slice logic to ensure clean output
if split_marker in raw_response:
hip_code = raw_response.split(split_marker)[-1].strip()
else:
hip_code = raw_response.strip()
# Agentic Visual Feedback
yield "🔄 Analyzing HIP logic on ROCm stack...", "Generating..."
time.sleep(1)
yield "🛠️ Verifying syntax and memory offsets...", "Verifying..."
time.sleep(1)
yield "✅ Self-Healing successful! HIP Code generated.", hip_code
else:
yield f"❌ Droplet Error: {response.status_code}", "// Check bridge logs on MI300X"
except Exception as e:
yield f"❌ Connection Error: Ensure bridge is running", str(e)
# --- UI DESIGN ---
with gr.Blocks(css=".banner { margin-bottom: 20px; }") as demo:
# Header Banner - Wrapped for maximum compatibility
with gr.Row():
gr.HTML(f"""
<div style="background-color: #fff3cd; color: #856404; padding: 20px; border-radius: 8px; border: 1px solid #ffeeba; width: 100%; text-align: center; font-family: sans-serif;">
<span style="font-size: 1.2em;">⚠️ <strong>Demo Status: GPU Backend Paused</strong></span><br>
<p style="margin-top: 10px;">The AMD MI300X instance for this live demo has been paused following the conclusion of the hackathon credit period.</p>
<a href="{LABLAB_URL}" target="_blank" style="display: inline-block; background-color: #856404; color: white; padding: 10px 20px; text-decoration: none; border-radius: 5px; font-weight: bold; margin-top: 5px;">View Full Video Demo & Technical Pitch</a>
</div>
""")
gr.Markdown("# 👻 Ghost-Coder: Autonomous CUDA-to-HIP Agent")
gr.Markdown("### Optimized for AMD Instinct™ MI300X | Qwen2.5-Coder-32B")
with gr.Row():
with gr.Column():
input_code = gr.Code(
label="Paste CUDA Code Here",
language="cpp",
lines=15,
value="// Example CUDA Host Code\nvoid runKernel() {\n cudaMalloc(&d_A, size);\n kernel<<<256, 256>>>(d_A);\n}"
)
run_btn = gr.Button("Translate & Verify", variant="primary")
with gr.Column():
output_code = gr.Code(label="Generated HIP Code", language="cpp", lines=15)
logs = gr.Textbox(label="Agent Status & Self-Healing Logs", interactive=False)
# UI Action
run_btn.click(ghost_translate, inputs=[input_code], outputs=[logs, output_code])
# Launch configuration
if __name__ == "__main__":
demo.queue().launch(show_api=False) |