File size: 3,322 Bytes
ebe34f5
9a7c864
e828f4e
ebe34f5
e828f4e
ebe34f5
 
 
 
 
 
 
 
 
9a7c864
ebe34f5
 
 
9a7c864
ebe34f5
 
 
9a7c864
ebe34f5
9a7c864
 
ebe34f5
 
 
 
 
 
 
 
 
 
 
 
9a7c864
ebe34f5
 
9a7c864
ebe34f5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9a7c864
 
 
 
 
 
 
 
 
 
 
 
ebe34f5
9a7c864
 
 
 
ebe34f5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e828f4e
9a7c864
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
import os
import asyncio
import gradio as gr
from zhipuai import ZhipuAI

# === GLM setup ===
api_key = os.getenv("ZHIPUAI_API_KEY")
if not api_key:
    print("❌ [Startup Error] Missing ZHIPUAI_API_KEY in environment variables.")
else:
    print("βœ… [Startup] ZHIPUAI_API_KEY loaded successfully.")

zhipu_client = ZhipuAI(api_key=api_key) if api_key else None


# === Helper: call GLM ===
def glm_call(prompt: str) -> str:
    if not zhipu_client:
        return "❌ GLM client not initialized (missing API key)."
    try:
        resp = zhipu_client.chat.completions.create(
            model="glm-4-0520",
            messages=[{"role": "user", "content": prompt}],
        )
        # βœ… FIX: access `.message.content` instead of ["content"]
        return resp.choices[0].message.content
    except Exception as e:
        return f"[GLM Error] {e}"


# === Agent Implementations ===
def autogen_agent(prompt: str) -> str:
    return f"[Autogen Agent] {glm_call(prompt)}"

def langgraph_agent(prompt: str) -> str:
    return f"[LangGraph Agent] {glm_call(prompt)}"

def crewai_agent(prompt: str) -> str:
    return "[CrewAI Agent] ❌ Not connected to GLM (stubbed for now)."

def opendevin_agent(prompt: str) -> str:
    return "[OpenDevin Agent] ❌ Not connected to GLM (stubbed for now)."


# === Gradio Handlers ===
def single_agent(agent: str, prompt: str) -> str:
    if agent == "Autogen":
        return autogen_agent(prompt)
    elif agent == "LangGraph":
        return langgraph_agent(prompt)
    elif agent == "CrewAI":
        return crewai_agent(prompt)
    elif agent == "OpenDevin":
        return opendevin_agent(prompt)
    else:
        return "[Error] Unknown agent selected."


# Async version for running all agents concurrently
async def run_all_agents(prompt: str):
    loop = asyncio.get_event_loop()
    tasks = [
        loop.run_in_executor(None, autogen_agent, prompt),
        loop.run_in_executor(None, langgraph_agent, prompt),
        loop.run_in_executor(None, crewai_agent, prompt),
        loop.run_in_executor(None, opendevin_agent, prompt),
    ]
    results = await asyncio.gather(*tasks)
    return "\n\n".join(results)

def all_agents(prompt: str) -> str:
    try:
        return asyncio.run(run_all_agents(prompt))
    except Exception as e:
        return f"[Error running all agents] {e}"


# === Gradio UI ===
def build_ui():
    with gr.Blocks() as demo:
        gr.Markdown("## πŸ€– Multi-Agent GLM Playground")

        with gr.Tab("Single Agent"):
            agent = gr.Dropdown(
                ["Autogen", "LangGraph", "CrewAI", "OpenDevin"],
                label="Choose an agent"
            )
            prompt = gr.Textbox(label="Your prompt")
            output = gr.Textbox(label="Agent response")
            run_btn = gr.Button("Run")

            run_btn.click(fn=single_agent, inputs=[agent, prompt], outputs=output)

        with gr.Tab("All Agents"):
            prompt_all = gr.Textbox(label="Your prompt")
            output_all = gr.Textbox(label="Responses")
            run_all_btn = gr.Button("Run All")

            run_all_btn.click(fn=all_agents, inputs=prompt_all, outputs=output_all)

    return demo


if __name__ == "__main__":
    demo = build_ui()
    demo.launch(server_name="0.0.0.0", server_port=int(os.getenv("PORT", 7860)))