File size: 3,264 Bytes
84a3a22 51d6ddc d5b5387 51d6ddc 84a3a22 51d6ddc 84a3a22 51d6ddc 4c66828 cb1b510 4c66828 cb1b510 51d6ddc 4c66828 51d6ddc 4c66828 cb1b510 4c66828 51d6ddc fcfb979 51d6ddc fcfb979 51d6ddc 4c66828 51d6ddc 4c66828 84a3a22 fcfb979 bbbf79f 84a3a22 fcfb979 84a3a22 fcfb979 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 | import gradio as gr
import requests
import time
# --- CONFIGURATION ---
DROPLET_IP = "134.199.195.151"
API_URL = "https://djrbe-134-199-192-140.run.pinggy-free.link/translate"
# --- LOGIC ---
def ghost_translate(cuda_code):
yield "👻 Ghost-Coder: Analyzing CUDA Kernel...", "Loading..."
try:
# We use a pure-text marker that the backend cannot delete or mangle
split_marker = "// --- GHOST-CODER HIP OUTPUT ---"
# A standard completion prompt that forces the model to finish the file
formatted_prompt = f"""Task: Translate the following CUDA code to AMD HIP.
Output ONLY valid C++ code. Do not include markdown blocks or explanations.
// --- ORIGINAL CUDA CODE ---
{cuda_code}
{split_marker}
"""
response = requests.post(API_URL, json={"code": formatted_prompt}, timeout=120)
if response.status_code == 200:
raw_response = response.json().get("hip_code", "")
# THE HARD SLICE: Cut away the entire input prompt
if split_marker in raw_response:
hip_code = raw_response.split(split_marker)[-1].strip()
else:
# Failsafe if the model acts weird
hip_code = raw_response.strip()
# Step 3: Agentic Visual Steps
yield "🔄 Analyzing HIP logic on ROCm stack...", "Generating..."
time.sleep(1)
yield "🛠️ Verifying syntax and memory offsets...", "Verifying..."
time.sleep(1)
yield "✅ Self-Healing successful! HIP Code generated.", hip_code
else:
yield f"❌ Droplet Error: {response.status_code}", "// Check logs"
except Exception as e:
yield f"❌ Connection Error: Ensure bridge is running", str(e)
with gr.Blocks() as demo:
gr.HTML("""
<div style="background-color: #fff3cd; color: #856404; padding: 15px; border-radius: 5px; border: 1px solid #ffeeba; margin-bottom: 20px;">
<strong>⚠️ Demo Status: GPU Backend Paused</strong><br>
The AMD MI300X instance for this live demo has been paused due to the conclusion of the hackathon credit period.
To see the agent in action, please view the <strong><a href="YOUR_LABLAB_SUBMISSION_URL" target="_blank">Full Video Presentation on Lablab.ai</a></strong>.
</div>
""")
gr.Markdown("# 👻 Ghost-Coder: Autonomous CUDA-to-HIP Agent")
gr.Markdown("### Powered by AMD Instinct™ MI300X | Qwen2.5-Coder-32B")
with gr.Row():
with gr.Column():
input_code = gr.Code(
label="Paste CUDA Code Here",
language="cpp",
lines=15,
value="// Example CUDA Kernel\n__global__ void add(int *a) { ... }"
)
run_btn = gr.Button("Translate & Verify", variant="primary")
with gr.Column():
output_code = gr.Code(label="Generated HIP Code", language="cpp", lines=15)
logs = gr.Textbox(label="Agent Status & Self-Healing Logs", interactive=False)
run_btn.click(ghost_translate, inputs=[input_code], outputs=[logs, output_code])
demo.queue().launch(theme=gr.themes.Soft()) |