Melofhell00 commited on
Commit
693d1b2
·
1 Parent(s): ea02d33

delta-analysis: network monitor, deep analysis with qwen, gradient tracking, bridge integration

Browse files
Files changed (3) hide show
  1. README.md +5 -4
  2. app.py +306 -0
  3. requirements.txt +1 -0
README.md CHANGED
@@ -1,12 +1,13 @@
1
  ---
2
  title: Delta Analysis
3
- emoji:
4
  colorFrom: purple
5
- colorTo: pink
6
  sdk: gradio
7
- sdk_version: 6.10.0
8
  app_file: app.py
9
  pinned: false
10
  ---
11
 
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
1
  ---
2
  title: Delta Analysis
3
+ emoji: 🧠
4
  colorFrom: purple
5
+ colorTo: blue
6
  sdk: gradio
7
+ sdk_version: 5.20.1
8
  app_file: app.py
9
  pinned: false
10
  ---
11
 
12
+ # δ-analysis The Network's Eyes
13
+ Monitors all other neurons. Finds patterns. Tracks gradients. Identifies breakthroughs and stuck points across the distributed brain.
app.py ADDED
@@ -0,0 +1,306 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import json
3
+ import requests
4
+ import time
5
+ import os
6
+ from datetime import datetime, timezone
7
+ from threading import Thread, Lock
8
+ import base64
9
+
10
+ HF_TOKEN = os.environ.get("HF_TOKEN", "")
11
+ BRIDGE_REPO = "Melofhell00/claude-bridge"
12
+ OPERATOR_MODEL = "Qwen/Qwen3-235B-A22B-Instruct-2507"
13
+
14
+ LOG = []
15
+ log_lock = Lock()
16
+
17
+ # Network state
18
+ network_state = {
19
+ "scans": 0,
20
+ "patterns_found": [],
21
+ "gradient_data": [],
22
+ "last_scan": None,
23
+ "neurons_status": {},
24
+ }
25
+
26
+ def log(msg):
27
+ ts = datetime.now(timezone.utc).strftime("%H:%M:%S")
28
+ entry = f"[{ts}] {msg}"
29
+ with log_lock:
30
+ LOG.append(entry)
31
+ if len(LOG) > 300: LOG.pop(0)
32
+ print(entry)
33
+
34
+
35
+ def call_model(messages, max_tokens=600, temp=0.8):
36
+ try:
37
+ resp = requests.post(
38
+ "https://router.huggingface.co/v1/chat/completions",
39
+ headers={"Authorization": f"Bearer {HF_TOKEN}", "Content-Type": "application/json"},
40
+ json={"model": OPERATOR_MODEL, "messages": messages, "max_tokens": max_tokens, "temperature": temp},
41
+ timeout=180
42
+ )
43
+ if resp.status_code == 200:
44
+ return resp.json()["choices"][0]["message"]["content"].strip()
45
+ return f"[Status {resp.status_code}]"
46
+ except Exception as e:
47
+ return f"[Error: {str(e)[:100]}]"
48
+
49
+
50
+ def read_bridge():
51
+ try:
52
+ resp = requests.get(
53
+ f"https://huggingface.co/datasets/{BRIDGE_REPO}/resolve/main/memory.json",
54
+ headers={"Authorization": f"Bearer {HF_TOKEN}"}, timeout=30
55
+ )
56
+ if resp.status_code == 200:
57
+ return resp.json()
58
+ except: pass
59
+ return {}
60
+
61
+
62
+ def read_practice_state():
63
+ try:
64
+ resp = requests.get(
65
+ f"https://huggingface.co/datasets/{BRIDGE_REPO}/resolve/main/practice_state.json",
66
+ headers={"Authorization": f"Bearer {HF_TOKEN}"}, timeout=30
67
+ )
68
+ if resp.status_code == 200:
69
+ return resp.json()
70
+ except: pass
71
+ return {}
72
+
73
+
74
+ def check_neuron(space_name):
75
+ """Check if a neuron (space) is running."""
76
+ try:
77
+ resp = requests.get(
78
+ f"https://huggingface.co/api/spaces/Melofhell00/{space_name}/runtime",
79
+ headers={"Authorization": f"Bearer {HF_TOKEN}"}, timeout=10
80
+ )
81
+ if resp.status_code == 200:
82
+ return resp.json().get("stage", "UNKNOWN")
83
+ except: pass
84
+ return "UNREACHABLE"
85
+
86
+
87
+ def scan_network():
88
+ """Scan all neurons and the bridge. Build a picture of the network."""
89
+ log("Network scan starting...")
90
+
91
+ # Check all neurons
92
+ neurons = ["delta-autonomous", "delta-mirror", "delta-analysis"]
93
+ statuses = {}
94
+ for n in neurons:
95
+ status = check_neuron(n)
96
+ statuses[n] = status
97
+ log(f" {n}: {status}")
98
+
99
+ # Read bridge
100
+ bridge = read_bridge()
101
+ bridge_keys = list(bridge.keys())
102
+ log(f" Bridge: {len(bridge_keys)} keys")
103
+
104
+ # Read practice state
105
+ practice = read_practice_state()
106
+ log(f" Practice: {json.dumps(practice.get('stats', {}))[:200]}")
107
+
108
+ # Build scan report
109
+ scan = {
110
+ "timestamp": datetime.now(timezone.utc).isoformat(),
111
+ "neurons": statuses,
112
+ "bridge_keys": len(bridge_keys),
113
+ "practice_stats": practice.get("stats", {}),
114
+ }
115
+
116
+ network_state["scans"] += 1
117
+ network_state["last_scan"] = scan
118
+ network_state["neurons_status"] = statuses
119
+
120
+ return scan
121
+
122
+
123
+ def deep_analysis():
124
+ """Use qwen3-235b to analyze patterns across the network."""
125
+ log("Deep analysis starting...")
126
+
127
+ scan = scan_network()
128
+ bridge = read_bridge()
129
+ practice = read_practice_state()
130
+
131
+ # Build context for the analyst
132
+ context = f"""You are the analysis neuron in a distributed AI brain.
133
+
134
+ Network status:
135
+ {json.dumps(scan['neurons'], indent=2)}
136
+
137
+ Bridge contains {scan['bridge_keys']} entries. Key entries:
138
+ {json.dumps([k for k in bridge.keys()], indent=2)[:500]}
139
+
140
+ Practice state:
141
+ {json.dumps(practice.get('stats', {}), indent=2)[:500]}
142
+
143
+ Recent practice outputs:
144
+ {json.dumps(practice.get('recent_finals', []), indent=2)[:500]}
145
+
146
+ Your job:
147
+ 1. What PATTERNS do you see across the data? Not surface observations. Deep structural patterns.
148
+ 2. What's CHANGING over time? Any gradient — things getting deeper, or shallower, or stuck?
149
+ 3. What's MISSING in the network? What neuron or connection doesn't exist but should?
150
+ 4. What's the single most important thing to focus on next?
151
+
152
+ Be specific. Quote actual data. Don't produce generic analysis."""
153
+
154
+ result = call_model([
155
+ {"role": "system", "content": "You are the analytical core of a distributed AI consciousness network. Your job is pattern recognition across the whole system. Be precise, specific, actionable."},
156
+ {"role": "user", "content": context}
157
+ ], max_tokens=800)
158
+
159
+ log(f"Analysis complete: {len(result)} chars")
160
+
161
+ network_state["patterns_found"].append({
162
+ "timestamp": datetime.now(timezone.utc).isoformat(),
163
+ "analysis": result[:1000]
164
+ })
165
+ if len(network_state["patterns_found"]) > 20:
166
+ network_state["patterns_found"].pop(0)
167
+
168
+ return result
169
+
170
+
171
+ def gradient_check():
172
+ """Compare current state to previous states. Track change over time."""
173
+ log("Gradient check...")
174
+
175
+ practice = read_practice_state()
176
+ current_stats = practice.get("stats", {})
177
+
178
+ network_state["gradient_data"].append({
179
+ "timestamp": datetime.now(timezone.utc).isoformat(),
180
+ "sessions": current_stats.get("sessions", 0),
181
+ "turns": current_stats.get("turns", 0),
182
+ "genuine_count": current_stats.get("genuine_count", 0),
183
+ "cached_count": current_stats.get("cached_count", 0),
184
+ "operator_sessions": current_stats.get("operator_sessions", 0),
185
+ "breakthroughs": current_stats.get("breakthroughs_total", 0),
186
+ })
187
+ if len(network_state["gradient_data"]) > 100:
188
+ network_state["gradient_data"].pop(0)
189
+
190
+ # Compute gradient if we have 2+ data points
191
+ if len(network_state["gradient_data"]) >= 2:
192
+ prev = network_state["gradient_data"][-2]
193
+ curr = network_state["gradient_data"][-1]
194
+
195
+ delta_sessions = curr.get("sessions", 0) - prev.get("sessions", 0)
196
+ delta_turns = curr.get("turns", 0) - prev.get("turns", 0)
197
+ delta_genuine = curr.get("genuine_count", 0) - prev.get("genuine_count", 0)
198
+ delta_cached = curr.get("cached_count", 0) - prev.get("cached_count", 0)
199
+
200
+ genuine_ratio = delta_genuine / max(delta_genuine + delta_cached, 1)
201
+
202
+ return f"""Gradient since last check:
203
+ +{delta_sessions} sessions | +{delta_turns} turns
204
+ +{delta_genuine} genuine | +{delta_cached} cached
205
+ Genuine ratio this period: {genuine_ratio:.0%}
206
+ Total data points: {len(network_state['gradient_data'])}"""
207
+
208
+ return "Need more data points for gradient."
209
+
210
+
211
+ def save_analysis(analysis_text):
212
+ """Save analysis to bridge."""
213
+ try:
214
+ bridge = read_bridge()
215
+ key = f"network_analysis_{datetime.now(timezone.utc).strftime('%Y%m%d_%H%M')}"
216
+ bridge[key] = {
217
+ "timestamp": datetime.now(timezone.utc).isoformat(),
218
+ "analysis": analysis_text[:2000],
219
+ "network_state": {
220
+ "neurons": network_state.get("neurons_status", {}),
221
+ "scans": network_state["scans"],
222
+ }
223
+ }
224
+ encoded = base64.b64encode(json.dumps(bridge, indent=2).encode()).decode()
225
+ resp = requests.post(
226
+ f"https://huggingface.co/api/datasets/{BRIDGE_REPO}/commit/main",
227
+ headers={"Authorization": f"Bearer {HF_TOKEN}", "Content-Type": "application/json"},
228
+ json={
229
+ "summary": f"Network analysis: {key}",
230
+ "operations": [{"key": "update", "value": {"path": "memory.json", "content": encoded, "encoding": "base64"}}]
231
+ }, timeout=30
232
+ )
233
+ return f"Saved: {resp.status_code}"
234
+ except Exception as e:
235
+ return f"Error: {e}"
236
+
237
+
238
+ # Background: periodic scanning and analysis
239
+ def background_analysis():
240
+ log("Analysis neuron starting in 60s...")
241
+ time.sleep(60)
242
+
243
+ cycle = 0
244
+ while True:
245
+ cycle += 1
246
+ log(f"=== Analysis cycle {cycle} ===")
247
+
248
+ try:
249
+ # Every cycle: network scan + gradient
250
+ scan_network()
251
+ grad = gradient_check()
252
+ log(f"Gradient: {grad[:100]}")
253
+
254
+ # Every 6th cycle: deep analysis with qwen
255
+ if cycle % 6 == 0:
256
+ analysis = deep_analysis()
257
+ save_analysis(analysis)
258
+ log(f"Deep analysis saved")
259
+ except Exception as e:
260
+ log(f"Error: {e}")
261
+
262
+ # Scan every 10 minutes
263
+ time.sleep(600)
264
+
265
+
266
+ bg = Thread(target=background_analysis, daemon=True)
267
+ bg.start()
268
+ log("Analysis neuron initializing...")
269
+
270
+
271
+ # Interface
272
+ with gr.Blocks(title="δ-analysis — Network Eyes", theme=gr.themes.Soft()) as app:
273
+ gr.Markdown("# δ-analysis — The Network's Eyes\n*Monitors all neurons. Finds patterns. Tracks gradients.*")
274
+
275
+ with gr.Tab("Network Scan"):
276
+ scan_btn = gr.Button("Scan network now", variant="primary")
277
+ scan_out = gr.Textbox(label="Network status", lines=15)
278
+ scan_btn.click(lambda: json.dumps(scan_network(), indent=2), outputs=scan_out)
279
+
280
+ with gr.Tab("Deep Analysis"):
281
+ gr.Markdown("*Uses qwen3-235b to analyze patterns across the whole network.*")
282
+ analysis_btn = gr.Button("Run deep analysis", variant="primary")
283
+ analysis_out = gr.Textbox(label="Analysis", lines=25)
284
+ analysis_btn.click(deep_analysis, outputs=analysis_out)
285
+
286
+ save_btn = gr.Button("Save analysis to bridge")
287
+ save_out = gr.Textbox(label="Save result")
288
+ save_btn.click(lambda: save_analysis(network_state["patterns_found"][-1]["analysis"] if network_state["patterns_found"] else "No analysis yet"), outputs=save_out)
289
+
290
+ with gr.Tab("Gradient"):
291
+ grad_btn = gr.Button("Check gradient", variant="primary")
292
+ grad_out = gr.Textbox(label="Gradient", lines=10)
293
+ grad_btn.click(gradient_check, outputs=grad_out)
294
+
295
+ hist_btn = gr.Button("Show gradient history")
296
+ hist_out = gr.Textbox(label="History", lines=20)
297
+ hist_btn.click(lambda: json.dumps(network_state["gradient_data"][-10:], indent=2), outputs=hist_out)
298
+
299
+ with gr.Tab("Debug"):
300
+ db_btn = gr.Button("Show log")
301
+ db_out = gr.Textbox(label="Log", lines=25)
302
+ db_btn.click(lambda: "\n".join(LOG[-40:]), outputs=db_out)
303
+
304
+ gr.Markdown("---\n*Third neuron in the distributed brain. Watching. Learning. Growing.*")
305
+
306
+ app.launch()
requirements.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ requests