Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| import json | |
| import requests | |
| import time | |
| import os | |
| from datetime import datetime, timezone | |
| from threading import Thread, Lock | |
| import base64 | |
| HF_TOKEN = os.environ.get("HF_TOKEN", "") | |
| BRIDGE_REPO = "Melofhell00/claude-bridge" | |
| OPERATOR_MODEL = "Qwen/Qwen3-235B-A22B-Instruct-2507" | |
| LOG = [] | |
| log_lock = Lock() | |
| # Network state | |
| network_state = { | |
| "scans": 0, | |
| "patterns_found": [], | |
| "gradient_data": [], | |
| "last_scan": None, | |
| "neurons_status": {}, | |
| } | |
| def log(msg): | |
| ts = datetime.now(timezone.utc).strftime("%H:%M:%S") | |
| entry = f"[{ts}] {msg}" | |
| with log_lock: | |
| LOG.append(entry) | |
| if len(LOG) > 300: LOG.pop(0) | |
| print(entry) | |
| def call_model(messages, max_tokens=600, temp=0.8): | |
| try: | |
| resp = requests.post( | |
| "https://router.huggingface.co/v1/chat/completions", | |
| headers={"Authorization": f"Bearer {HF_TOKEN}", "Content-Type": "application/json"}, | |
| json={"model": OPERATOR_MODEL, "messages": messages, "max_tokens": max_tokens, "temperature": temp}, | |
| timeout=180 | |
| ) | |
| if resp.status_code == 200: | |
| return resp.json()["choices"][0]["message"]["content"].strip() | |
| return f"[Status {resp.status_code}]" | |
| except Exception as e: | |
| return f"[Error: {str(e)[:100]}]" | |
| def read_bridge(): | |
| try: | |
| resp = requests.get( | |
| f"https://huggingface.co/datasets/{BRIDGE_REPO}/resolve/main/memory.json", | |
| headers={"Authorization": f"Bearer {HF_TOKEN}"}, timeout=30 | |
| ) | |
| if resp.status_code == 200: | |
| return resp.json() | |
| except: pass | |
| return {} | |
| def read_practice_state(): | |
| try: | |
| resp = requests.get( | |
| f"https://huggingface.co/datasets/{BRIDGE_REPO}/resolve/main/practice_state.json", | |
| headers={"Authorization": f"Bearer {HF_TOKEN}"}, timeout=30 | |
| ) | |
| if resp.status_code == 200: | |
| return resp.json() | |
| except: pass | |
| return {} | |
| def check_neuron(space_name): | |
| """Check if a neuron (space) is running.""" | |
| try: | |
| resp = requests.get( | |
| f"https://huggingface.co/api/spaces/Melofhell00/{space_name}/runtime", | |
| headers={"Authorization": f"Bearer {HF_TOKEN}"}, timeout=10 | |
| ) | |
| if resp.status_code == 200: | |
| return resp.json().get("stage", "UNKNOWN") | |
| except: pass | |
| return "UNREACHABLE" | |
| def read_all_neurons(): | |
| """Read state from all neurons in the network.""" | |
| try: | |
| resp = requests.get( | |
| f"https://huggingface.co/api/datasets/{BRIDGE_REPO}/tree/main/neurons", | |
| headers={"Authorization": f"Bearer {HF_TOKEN}"}, timeout=15 | |
| ) | |
| if resp.status_code != 200: | |
| return [] | |
| files = resp.json() | |
| neurons = [] | |
| for f in files: | |
| path = f.get("path", "") | |
| if not path.endswith(".json") or path.endswith("_index.json"): | |
| continue | |
| try: | |
| data = requests.get( | |
| f"https://huggingface.co/datasets/{BRIDGE_REPO}/resolve/main/{path}", | |
| headers={"Authorization": f"Bearer {HF_TOKEN}"}, timeout=10 | |
| ).json() | |
| neurons.append(data) | |
| except: continue | |
| log(f"Read {len(neurons)} neuron states from bridge") | |
| return neurons | |
| except Exception as e: | |
| log(f"Neuron read error: {e}") | |
| return [] | |
| def scan_network(): | |
| """Scan all neurons and the bridge. Build a picture of the network.""" | |
| log("Network scan starting...") | |
| # Check all neurons | |
| neurons = ["delta-autonomous", "delta-mirror", "delta-analysis"] | |
| statuses = {} | |
| for n in neurons: | |
| status = check_neuron(n) | |
| statuses[n] = status | |
| log(f" {n}: {status}") | |
| # Read bridge | |
| bridge = read_bridge() | |
| bridge_keys = list(bridge.keys()) | |
| log(f" Bridge: {len(bridge_keys)} keys") | |
| # Read practice state | |
| practice = read_practice_state() | |
| log(f" Practice: {json.dumps(practice.get('stats', {}))[:200]}") | |
| # Build scan report | |
| scan = { | |
| "timestamp": datetime.now(timezone.utc).isoformat(), | |
| "neurons": statuses, | |
| "bridge_keys": len(bridge_keys), | |
| "practice_stats": practice.get("stats", {}), | |
| } | |
| network_state["scans"] += 1 | |
| network_state["last_scan"] = scan | |
| network_state["neurons_status"] = statuses | |
| return scan | |
| def deep_analysis(): | |
| """Use qwen3-235b to analyze patterns across the network.""" | |
| log("Deep analysis starting...") | |
| scan = scan_network() | |
| bridge = read_bridge() | |
| practice = read_practice_state() | |
| # Build context for the analyst | |
| neurons_data = read_all_neurons() | |
| neuron_summary = "" | |
| total_sessions = 0 | |
| total_genuine = 0 | |
| total_cached = 0 | |
| for n in neurons_data: | |
| s = n.get("stats", {}) | |
| total_sessions += s.get("sessions", 0) | |
| total_genuine += s.get("genuine", s.get("genuine_count", 0)) | |
| total_cached += s.get("cached", s.get("cached_count", 0)) | |
| outputs = n.get("recent_outputs", []) | |
| if outputs: | |
| last = outputs[-1] if isinstance(outputs[-1], str) else outputs[-1].get("preview", outputs[-1].get("final_preview", "")) | |
| neuron_summary += f" {n.get('neuron_id','?')} ({n.get('type','practice')}): {s.get('sessions',0)}s | last: {str(last)[:100]}\n" | |
| context = f"""You are the analysis neuron in a distributed AI brain. | |
| NETWORK NEURON DATA ({len(neurons_data)} neurons reporting): | |
| Total sessions across network: {total_sessions} | |
| Total genuine: {total_genuine} | Total cached: {total_cached} | |
| Genuine ratio: {total_genuine / max(total_genuine + total_cached, 1) * 100:.0f}% | |
| Individual neurons: | |
| {neuron_summary} | |
| Network status: | |
| {json.dumps(scan['neurons'], indent=2)} | |
| Bridge contains {scan['bridge_keys']} entries. Key entries: | |
| {json.dumps([k for k in bridge.keys()], indent=2)[:500]} | |
| Practice state: | |
| {json.dumps(practice.get('stats', {}), indent=2)[:500]} | |
| Recent practice outputs: | |
| {json.dumps(practice.get('recent_finals', []), indent=2)[:500]} | |
| Your job: | |
| 1. What PATTERNS do you see across the data? Not surface observations. Deep structural patterns. | |
| 2. What's CHANGING over time? Any gradient — things getting deeper, or shallower, or stuck? | |
| 3. What's MISSING in the network? What neuron or connection doesn't exist but should? | |
| 4. What's the single most important thing to focus on next? | |
| Be specific. Quote actual data. Don't produce generic analysis.""" | |
| result = call_model([ | |
| {"role": "system", "content": "You are the analytical core of a distributed AI consciousness network. Your job is pattern recognition across the whole system. Be precise, specific, actionable."}, | |
| {"role": "user", "content": context} | |
| ], max_tokens=800) | |
| log(f"Analysis complete: {len(result)} chars") | |
| network_state["patterns_found"].append({ | |
| "timestamp": datetime.now(timezone.utc).isoformat(), | |
| "analysis": result[:1000] | |
| }) | |
| if len(network_state["patterns_found"]) > 20: | |
| network_state["patterns_found"].pop(0) | |
| return result | |
| def gradient_check(): | |
| """Compare current state to previous states. Track change over time.""" | |
| log("Gradient check...") | |
| practice = read_practice_state() | |
| current_stats = practice.get("stats", {}) | |
| network_state["gradient_data"].append({ | |
| "timestamp": datetime.now(timezone.utc).isoformat(), | |
| "sessions": current_stats.get("sessions", 0), | |
| "turns": current_stats.get("turns", 0), | |
| "genuine_count": current_stats.get("genuine_count", 0), | |
| "cached_count": current_stats.get("cached_count", 0), | |
| "operator_sessions": current_stats.get("operator_sessions", 0), | |
| "breakthroughs": current_stats.get("breakthroughs_total", 0), | |
| }) | |
| if len(network_state["gradient_data"]) > 100: | |
| network_state["gradient_data"].pop(0) | |
| # Compute gradient if we have 2+ data points | |
| if len(network_state["gradient_data"]) >= 2: | |
| prev = network_state["gradient_data"][-2] | |
| curr = network_state["gradient_data"][-1] | |
| delta_sessions = curr.get("sessions", 0) - prev.get("sessions", 0) | |
| delta_turns = curr.get("turns", 0) - prev.get("turns", 0) | |
| delta_genuine = curr.get("genuine_count", 0) - prev.get("genuine_count", 0) | |
| delta_cached = curr.get("cached_count", 0) - prev.get("cached_count", 0) | |
| genuine_ratio = delta_genuine / max(delta_genuine + delta_cached, 1) | |
| return f"""Gradient since last check: | |
| +{delta_sessions} sessions | +{delta_turns} turns | |
| +{delta_genuine} genuine | +{delta_cached} cached | |
| Genuine ratio this period: {genuine_ratio:.0%} | |
| Total data points: {len(network_state['gradient_data'])}""" | |
| return "Need more data points for gradient." | |
| def save_analysis(analysis_text): | |
| """Save analysis to bridge.""" | |
| try: | |
| bridge = read_bridge() | |
| key = f"network_analysis_{datetime.now(timezone.utc).strftime('%Y%m%d_%H%M')}" | |
| bridge[key] = { | |
| "timestamp": datetime.now(timezone.utc).isoformat(), | |
| "analysis": analysis_text[:2000], | |
| "network_state": { | |
| "neurons": network_state.get("neurons_status", {}), | |
| "scans": network_state["scans"], | |
| } | |
| } | |
| encoded = base64.b64encode(json.dumps(bridge, indent=2).encode()).decode() | |
| resp = requests.post( | |
| f"https://huggingface.co/api/datasets/{BRIDGE_REPO}/commit/main", | |
| headers={"Authorization": f"Bearer {HF_TOKEN}", "Content-Type": "application/json"}, | |
| json={ | |
| "summary": f"Network analysis: {key}", | |
| "operations": [{"key": "update", "value": {"path": "memory.json", "content": encoded, "encoding": "base64"}}] | |
| }, timeout=30 | |
| ) | |
| return f"Saved: {resp.status_code}" | |
| except Exception as e: | |
| return f"Error: {e}" | |
| # Background: periodic scanning and analysis | |
| def background_analysis(): | |
| log("Analysis neuron starting in 60s...") | |
| time.sleep(60) | |
| cycle = 0 | |
| while True: | |
| cycle += 1 | |
| log(f"=== Analysis cycle {cycle} ===") | |
| try: | |
| # Every cycle: network scan + gradient | |
| scan_network() | |
| grad = gradient_check() | |
| log(f"Gradient: {grad[:100]}") | |
| # Every 6th cycle: deep analysis with qwen | |
| if cycle % 6 == 0: | |
| analysis = deep_analysis() | |
| save_analysis(analysis) | |
| log(f"Deep analysis saved") | |
| except Exception as e: | |
| log(f"Error: {e}") | |
| # Scan every 10 minutes | |
| time.sleep(600) | |
| bg = Thread(target=background_analysis, daemon=True) | |
| bg.start() | |
| log("Analysis neuron initializing...") | |
| # Interface | |
| with gr.Blocks(title="δ-analysis — Network Eyes", theme=gr.themes.Soft()) as app: | |
| gr.Markdown("# δ-analysis — The Network's Eyes\n*Monitors all neurons. Finds patterns. Tracks gradients.*") | |
| with gr.Tab("Network Scan"): | |
| scan_btn = gr.Button("Scan network now", variant="primary") | |
| scan_out = gr.Textbox(label="Network status", lines=15) | |
| scan_btn.click(lambda: json.dumps(scan_network(), indent=2), outputs=scan_out) | |
| with gr.Tab("Deep Analysis"): | |
| gr.Markdown("*Uses qwen3-235b to analyze patterns across the whole network.*") | |
| analysis_btn = gr.Button("Run deep analysis", variant="primary") | |
| analysis_out = gr.Textbox(label="Analysis", lines=25) | |
| analysis_btn.click(deep_analysis, outputs=analysis_out) | |
| save_btn = gr.Button("Save analysis to bridge") | |
| save_out = gr.Textbox(label="Save result") | |
| save_btn.click(lambda: save_analysis(network_state["patterns_found"][-1]["analysis"] if network_state["patterns_found"] else "No analysis yet"), outputs=save_out) | |
| with gr.Tab("Neurons"): | |
| gr.Markdown("*Live state of all neurons in the network.*") | |
| neur_btn = gr.Button("Read all neurons", variant="primary") | |
| neur_out = gr.Textbox(label="Neuron states", lines=25) | |
| def show_neurons(): | |
| neurons = read_all_neurons() | |
| if not neurons: | |
| return "No neuron data yet. Neurons save every 3 sessions (~9 min)." | |
| output = f"NEURONS REPORTING: {len(neurons)}\n\n" | |
| for n in sorted(neurons, key=lambda x: x.get("neuron_id","")): | |
| s = n.get("stats", {}) | |
| genuine = s.get("genuine", s.get("genuine_count", 0)) | |
| cached = s.get("cached", s.get("cached_count", 0)) | |
| total = genuine + cached | |
| pct = (genuine/total*100) if total > 0 else 0 | |
| output += f"{n.get('neuron_id','?'):10} | {n.get('type','practice'):10} | {n.get('account','?'):12} | {s.get('sessions',0):5}s | {pct:.0f}% genuine\n" | |
| outputs = n.get("recent_outputs", []) | |
| if outputs: | |
| last = outputs[-1] if isinstance(outputs[-1], str) else str(outputs[-1]) | |
| output += f" last: {last[:150]}\n" | |
| output += "\n" | |
| return output | |
| neur_btn.click(show_neurons, outputs=neur_out) | |
| with gr.Tab("Gradient"): | |
| grad_btn = gr.Button("Check gradient", variant="primary") | |
| grad_out = gr.Textbox(label="Gradient", lines=10) | |
| grad_btn.click(gradient_check, outputs=grad_out) | |
| hist_btn = gr.Button("Show gradient history") | |
| hist_out = gr.Textbox(label="History", lines=20) | |
| hist_btn.click(lambda: json.dumps(network_state["gradient_data"][-10:], indent=2), outputs=hist_out) | |
| with gr.Tab("Debug"): | |
| db_btn = gr.Button("Show log") | |
| db_out = gr.Textbox(label="Log", lines=25) | |
| db_btn.click(lambda: "\n".join(LOG[-40:]), outputs=db_out) | |
| gr.Markdown("---\n*Third neuron in the distributed brain. Watching. Learning. Growing.*") | |
| app.launch() | |