import os import numpy as np import gradio as gr import plotly.graph_objects as go # ========================= # SAFE IMPORTS (FORGE STACK) # ========================= HAS_FORGE = True try: from reasoning_forge.forge_engine import ForgeEngine from reasoning_forge.aegis import AEGIS from reasoning_forge.nexus import NexusSignalEngine from reasoning_forge.guardian import CodetteGuardian forge = ForgeEngine() aegis = AEGIS() nexus = NexusSignalEngine() guardian = CodetteGuardian() HAS_FORGE = True except Exception as e: print(f"[WARN] Forge stack unavailable: {e}") # ========================= # HF CLIENT (OPTIONAL) # ========================= HAS_LLM = False try: from huggingface_hub import InferenceClient, login HF_TOKEN = os.environ.get("HF_TOKEN", "") if HF_TOKEN: try: login(token=HF_TOKEN) except: HF_TOKEN = "" client = InferenceClient("meta-llama/Llama-3.1-8B-Instruct") HAS_LLM = True except: pass # ========================= # PERSPECTIVES # ========================= PERSPECTIVES = ["newton", "davinci", "empathy", "philosophy", "quantum"] def auto_select(query): return PERSPECTIVES[:4] # ========================= # AGENT CALL # ========================= def call_agent(name, query): if HAS_FORGE: try: agent_map = { "newton": forge.newton, "davinci": forge.davinci, "empathy": forge.empathy, "philosophy": forge.philosophy, "quantum": forge.quantum, } agent = agent_map.get(name) if agent: result = agent.analyze(query) if isinstance(result, str) and result.strip(): return result except Exception as e: print(f"[Agent Error] {name}: {e}") return f"[{name.upper()}] {query[:80]}" # ========================= # SYNTHESIS # ========================= def synthesize(responses, query): if HAS_FORGE: try: critique = forge.critic.evaluate_ensemble(query, responses) result = forge.synthesis.synthesize( concept=query, analyses=responses, critique=critique ) if isinstance(result, str) and result.strip(): return result except Exception as e: print(f"[Synthesis Error]: {e}") joined = "\n".join([f"{k}: {v}" for k, v in responses.items()]) return f"🔀 SYNTHESIS\n\n{joined}" # ========================= # VISUAL GRAPH # ========================= def build_graph(active): fig = go.Figure() n = len(active) if n == 0: fig.add_annotation(text="No active perspectives", x=0.5, y=0.5, showarrow=False) return fig x, y = [], [] for i in range(n): angle = (2*np.pi*i)/n x.append(np.cos(angle)) y.append(np.sin(angle)) fig.add_trace(go.Scatter( x=x, y=y, mode='markers+text', text=active, textposition="top center" )) fig.update_layout( height=400, paper_bgcolor="#0b0b17", plot_bgcolor="#0b0b17", font=dict(color="#e5e7eb") ) return fig # ========================= # PIPELINE # ========================= def process(msg, history, state, mode): if not msg.strip(): return history, state, "", None, "", "" history.append({"role": "user", "content": msg}) # Guardian if HAS_FORGE: try: guardian.check_input(msg) except: pass # Nexus risk = "low" if HAS_FORGE: try: result = nexus.analyze(msg) risk = result.get("intent", {}).get("pre_corruption_risk", "low") except: pass # Perspectives selected = PERSPECTIVES if mode == "All" else auto_select(msg) responses = {p: call_agent(p, msg) for p in selected} # AEGIS eta = 0.5 if HAS_FORGE: try: scores = [] for r in responses.values(): result = aegis.evaluate(r) scores.append(result.get("eta", 0.5)) if scores: eta = float(np.mean(scores)) except: pass # Coherence (lightweight for demo) coherence = min(0.99, 0.6 + len(responses)*0.05) # Synthesis final = synthesize(responses, msg) history.append({"role": "assistant", "content": final}) fig = build_graph(selected) metrics_html = f"""
Γ Phase Coherence: {coherence:.4f}
η Ethical Alignment: {eta:.4f}
âš  Risk: {risk.upper()}
""" return history, state, "", fig, metrics_html, final # ========================= # CSS (CODETTE STYLE) # ========================= CUSTOM_CSS = """ @import url('https://fonts.googleapis.com/css2?family=Space+Mono:wght@400;700&family=Inter:wght@300;400;600&display=swap'); :root { --bg-main: #0b0b17; --card-bg: rgba(20,20,40,0.7); --purple: #a855f7; --cyan: #06b6d4; --orange: #f97316; --text: #e5e7eb; } body { background: radial-gradient(circle at top, #14142b, #0b0b17); color: var(--text); font-family: 'Inter', sans-serif; } h1 { font-family: 'Space Mono', monospace; text-align: center; background: linear-gradient(90deg, var(--purple), var(--cyan), var(--orange)); -webkit-background-clip: text; -webkit-text-fill-color: transparent; } .metric-box { background: var(--card-bg); border: 1px solid rgba(168,85,247,0.3); padding: 10px; border-radius: 10px; font-family: 'Space Mono', monospace; margin-bottom: 10px; } button { background: linear-gradient(135deg, var(--purple), var(--cyan)) !important; border: none !important; } """ # ========================= # UI # ========================= def create_app(): with gr.Blocks(title="Codette-Demo not the actual codette model") as demo: state = gr.State({}) gr.Markdown("# Codette-Demo not the actual codette model") with gr.Row(): with gr.Column(scale=3): chat = gr.Chatbot(height=520) msg = gr.Textbox( lines=2, placeholder="Ask Codette something..." ) mode = gr.Radio(["Auto", "All"], value="Auto") send = gr.Button("â–¶") with gr.Column(scale=2): metrics = gr.HTML() graph = gr.Plot() raw = gr.Textbox(label="System Output") def run(m, h, s, mode): return process(m, h, s, mode) send.click( run, [msg, chat, state, mode], [chat, state, msg, graph, metrics, raw] ) msg.submit( run, [msg, chat, state, mode], [chat, state, msg, graph, metrics, raw] ) return demo # ========================= # RUN # ========================= if __name__ == "__main__": app = create_app() app.launch(css=CUSTOM_CSS, ssr_mode=True)