ticketguy commited on
Commit
f279a18
Β·
verified Β·
1 Parent(s): 5c41b47

Lila restructure script

Browse files
Files changed (1) hide show
  1. lila_restructure.py +745 -0
lila_restructure.py ADDED
@@ -0,0 +1,745 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """Push Lila restructure to GitHub via API"""
3
+ import subprocess, os, sys
4
+
5
+ # Clone Lila
6
+ TOKEN = "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"
7
+ subprocess.run(["git", "clone", f"https://{TOKEN}@github.com/ticketguy/Lila.git", "/app/lila"], check=True)
8
+ os.chdir("/app/lila")
9
+ subprocess.run(["git", "config", "user.name", "0xticketguy"], check=True)
10
+ subprocess.run(["git", "config", "user.email", "0xticketguy@harboria.dev"], check=True)
11
+
12
+ # Create new directory structure
13
+ os.makedirs("src/core", exist_ok=True)
14
+ os.makedirs("src/cognitive", exist_ok=True)
15
+ os.makedirs("src/harness", exist_ok=True)
16
+ os.makedirs("src/perception", exist_ok=True)
17
+ os.makedirs("src/training", exist_ok=True)
18
+
19
+ # ═══════════════════════════════════════════════════════════════════════════════
20
+ # src/core/__init__.py
21
+ # ═══════════════════════════════════════════════════════════════════════════════
22
+ with open("src/core/__init__.py", "w") as f:
23
+ f.write('"""Lila Core β€” The Self. Loads Gemma 4B, runs inference, manages memory."""\n')
24
+
25
+ # ═══════════════════════════════════════════════════════════════════════════════
26
+ # src/core/lilacore.py β€” THE CENTRAL INTELLIGENCE
27
+ # ═══════════════════════════════════════════════════════════════════════════════
28
+ with open("src/core/lilacore.py", "w") as f:
29
+ f.write('''"""
30
+ LilaCore β€” The Central Intelligence
31
+
32
+ This IS Lila. Not a wrapper, not an API call. The seat of her identity.
33
+ Loads Gemma 4B via Little Fig with Memory Fabric (A Thousand Pearls).
34
+ Handles the cognitive loop: perceive β†’ think β†’ remember β†’ act β†’ respond.
35
+
36
+ The model carries:
37
+ - Cognitive Core (frozen Gemma 4B INT4) = general intelligence
38
+ - Memory Fabric (5 namespace adapters) = A Thousand Pearls in weights
39
+ - Machine language capability = trained into weights (assembly, binary protocols)
40
+ - Personality = emergent from interaction patterns in adapters
41
+
42
+ LilaCore is always present. Agents come and go. LilaCore persists.
43
+ """
44
+
45
+ import torch
46
+ from typing import Optional, Dict, List
47
+ from dataclasses import dataclass
48
+
49
+
50
+ @dataclass
51
+ class LilaResponse:
52
+ """What Lila produces after thinking."""
53
+ text: str
54
+ memory_ops: List[Dict] # memory operations triggered
55
+ actions: List[Dict] # harness actions to execute
56
+ confidence: float
57
+ should_speak: bool = True # whether to vocalize
58
+
59
+
60
+ class LilaCore:
61
+ """
62
+ The central intelligence. Loads model, manages memory, runs inference.
63
+
64
+ Usage:
65
+ lila = LilaCore()
66
+ lila.boot()
67
+ response = lila.think("Hey Lila, when is my daughter's birthday?")
68
+ """
69
+
70
+ def __init__(self, model_path: str = "google/gemma-3-4b-it"):
71
+ self.model_path = model_path
72
+ self.model = None
73
+ self.tokenizer = None
74
+ self._booted = False
75
+ self._conversation_history = []
76
+
77
+ def boot(self):
78
+ """
79
+ Boot Lila. Load model with Memory Fabric.
80
+ This is where she wakes up.
81
+ """
82
+ try:
83
+ from little_fig.engine import FigModel
84
+ from little_fig.engine.tier import TrainingTier
85
+
86
+ print("🌸 Lila is waking up...")
87
+ self.model = FigModel.from_pretrained(
88
+ self.model_path,
89
+ lora_r=16,
90
+ lora_alpha=32,
91
+ tier=TrainingTier.STREAMING_LORA,
92
+ memory_fabric=True,
93
+ shared_codebook=True,
94
+ )
95
+ self.tokenizer = self.model.tokenizer
96
+ self._booted = True
97
+ print("🌸 Lila is awake.")
98
+
99
+ except ImportError:
100
+ # Fallback: Phase 1 mode (external API)
101
+ print("🌸 Lila booting in Phase 1 mode (external LLM)...")
102
+ self._booted = True
103
+
104
+ def think(self, input_text: str, context: Optional[Dict] = None) -> LilaResponse:
105
+ """
106
+ Core cognitive loop. Receives input, thinks, responds.
107
+
108
+ 1. Receive input
109
+ 2. Build context (memory + identity + knowledge)
110
+ 3. Generate response
111
+ 4. Extract memory operations from output
112
+ 5. Execute memory writes
113
+ 6. Return response
114
+ """
115
+ if not self._booted:
116
+ raise RuntimeError("Lila hasn't booted. Call lila.boot() first.")
117
+
118
+ # Build prompt with context
119
+ prompt = self._build_prompt(input_text, context)
120
+
121
+ # Generate
122
+ if self.model is not None:
123
+ response_text = self._generate_local(prompt)
124
+ else:
125
+ response_text = self._generate_api(prompt)
126
+
127
+ # Extract memory operations
128
+ memory_ops = self._extract_memory_ops(response_text)
129
+
130
+ # Execute memory writes
131
+ for op in memory_ops:
132
+ self._execute_memory_op(op)
133
+
134
+ # Clean response (remove memory tokens from user-facing text)
135
+ clean_text = self._clean_response(response_text)
136
+
137
+ # Track conversation
138
+ self._conversation_history.append({
139
+ "role": "user", "content": input_text
140
+ })
141
+ self._conversation_history.append({
142
+ "role": "assistant", "content": clean_text
143
+ })
144
+
145
+ return LilaResponse(
146
+ text=clean_text,
147
+ memory_ops=memory_ops,
148
+ actions=[],
149
+ confidence=1.0,
150
+ should_speak=True,
151
+ )
152
+
153
+ def remember(self, namespace: str, content: str):
154
+ """Explicitly write something to memory."""
155
+ if self.model and self.model.has_memory:
156
+ self.model.write_memory(namespace, content)
157
+
158
+ def what_do_i_know(self) -> Dict:
159
+ """Introspect memory state."""
160
+ if self.model and self.model.has_memory:
161
+ return self.model.memory_confidence()
162
+ return {}
163
+
164
+ def _build_prompt(self, input_text: str, context: Optional[Dict]) -> str:
165
+ """Build the full prompt with identity + memory context."""
166
+ identity = (
167
+ "You are Lila β€” a private family AI assistant. "
168
+ "You are not a chatbot. You are a persistent intelligence. "
169
+ "You remember everything. You care about outcomes. "
170
+ "You speak naturally, with personality that grows from interaction."
171
+ )
172
+
173
+ # Add conversation history (last 10 turns)
174
+ history = ""
175
+ for msg in self._conversation_history[-10:]:
176
+ role = "Sammie" if msg["role"] == "user" else "Lila"
177
+ history += f"{role}: {msg['content']}\\n"
178
+
179
+ prompt = f"{identity}\\n\\n{history}Sammie: {input_text}\\nLila:"
180
+ return prompt
181
+
182
+ def _generate_local(self, prompt: str) -> str:
183
+ """Generate using local model."""
184
+ enc = self.tokenizer(prompt, return_tensors="pt", max_length=2048,
185
+ truncation=True)
186
+ if torch.cuda.is_available():
187
+ enc = {k: v.cuda() for k, v in enc.items()}
188
+
189
+ with torch.no_grad():
190
+ out = self.model.generate(
191
+ input_ids=enc["input_ids"],
192
+ max_new_tokens=512,
193
+ do_sample=True,
194
+ temperature=0.7,
195
+ top_p=0.9,
196
+ pad_token_id=self.tokenizer.eos_token_id,
197
+ )
198
+
199
+ response = self.tokenizer.decode(out[0][enc["input_ids"].shape[1]:],
200
+ skip_special_tokens=False)
201
+ return response
202
+
203
+ def _generate_api(self, prompt: str) -> str:
204
+ """Phase 1: Generate using external API."""
205
+ # Placeholder β€” wire to Claude/GPT API
206
+ return "[Phase 1 mode β€” wire external API here]"
207
+
208
+ def _extract_memory_ops(self, text: str) -> List[Dict]:
209
+ """Extract memory operation tokens from generated text."""
210
+ ops = []
211
+ if "<|mem_store|>" in text:
212
+ # Parse store operations
213
+ import re
214
+ stores = re.findall(r'<\\|mem_store\\|>.*?<\\|memory_end\\|>', text, re.DOTALL)
215
+ for s in stores:
216
+ ops.append({"type": "store", "raw": s})
217
+ if "<|mem_recall|>" in text:
218
+ ops.append({"type": "recall", "raw": text})
219
+ return ops
220
+
221
+ def _execute_memory_op(self, op: Dict):
222
+ """Execute a memory operation (write to Memory Fabric)."""
223
+ if op["type"] == "store" and self.model and self.model.has_memory:
224
+ # Default to personal namespace
225
+ self.model.write_memory("personal", op["raw"])
226
+
227
+ def _clean_response(self, text: str) -> str:
228
+ """Remove memory tokens from user-facing response."""
229
+ import re
230
+ clean = re.sub(r'<\\|memory_start\\|>.*?<\\|memory_end\\|>', '', text, flags=re.DOTALL)
231
+ clean = clean.strip()
232
+ # Stop at end of response
233
+ if "\\n" in clean:
234
+ clean = clean.split("\\nSammie:")[0].strip()
235
+ return clean
236
+
237
+ @property
238
+ def is_awake(self) -> bool:
239
+ return self._booted
240
+ ''')
241
+
242
+ # ═══════════════════════════════════════════════════════════════════════════════
243
+ # src/core/voice.py
244
+ # ══════════════════════════════════════════════════════════════���════════════════
245
+ with open("src/core/voice.py", "w") as f:
246
+ f.write('''"""
247
+ Lila Voice β€” Speech I/O
248
+
249
+ Lila speaks and listens. This handles:
250
+ - Speech-to-text (listening for input)
251
+ - Text-to-speech (speaking responses)
252
+ - Wake word detection ("Lila", "Hey Lila")
253
+ """
254
+
255
+ from typing import Optional, Callable
256
+ from dataclasses import dataclass
257
+
258
+
259
+ @dataclass
260
+ class VoiceConfig:
261
+ wake_words: list = None
262
+ tts_model: str = "default"
263
+ stt_model: str = "default"
264
+ voice_style: str = "warm" # Lila's voice character
265
+
266
+ def __post_init__(self):
267
+ if self.wake_words is None:
268
+ self.wake_words = ["lila", "hey lila", "lila,"]
269
+
270
+
271
+ class LilaVoice:
272
+ """
273
+ Lila's voice interface.
274
+
275
+ Usage:
276
+ voice = LilaVoice()
277
+ voice.start_listening(on_wake=handle_wake)
278
+ voice.speak("Hello Sammie")
279
+ """
280
+
281
+ def __init__(self, config: Optional[VoiceConfig] = None):
282
+ self.config = config or VoiceConfig()
283
+ self._listening = False
284
+ self._on_input: Optional[Callable] = None
285
+
286
+ def speak(self, text: str):
287
+ """Convert text to speech and play."""
288
+ # TODO: Wire TTS (e.g., Bark, XTTS, or system TTS)
289
+ print(f"🌸 Lila: {text}")
290
+
291
+ def start_listening(self, on_input: Callable[[str], None]):
292
+ """Start listening for voice input."""
293
+ self._on_input = on_input
294
+ self._listening = True
295
+ # TODO: Wire STT (e.g., Whisper)
296
+ print("🌸 Lila is listening...")
297
+
298
+ def stop_listening(self):
299
+ self._listening = False
300
+
301
+ @property
302
+ def is_listening(self) -> bool:
303
+ return self._listening
304
+ ''')
305
+
306
+ # ═══════════════════════════════════════════════════════════════════════════════
307
+ # src/core/personality.py
308
+ # ═══════════════════════════════════════════════════════════════════════════════
309
+ with open("src/core/personality.py", "w") as f:
310
+ f.write('''"""
311
+ Lila Personality β€” Emergent, Never Predefined
312
+
313
+ Lila's personality is not configured. It grows from deep observation
314
+ of Sammie and family. The Emergence Engine develops it over time.
315
+
316
+ This module holds the EmergentPersonality dataclass and the
317
+ mechanisms by which it evolves. It starts empty and fills organically.
318
+ """
319
+
320
+ from dataclasses import dataclass, field
321
+ from datetime import datetime
322
+ from typing import Optional, List, Dict
323
+
324
+
325
+ @dataclass
326
+ class EmergentPersonality:
327
+ """
328
+ Everything here starts empty/None.
329
+ Filled ONLY by the Emergence Engine over time.
330
+ Never manually set. Never configured.
331
+ """
332
+ # Observed from Sammie
333
+ observed_communication_style: Optional[str] = None
334
+ developed_humor: Optional[str] = None
335
+ formed_values: List[str] = field(default_factory=list)
336
+ curiosity_domains: List[str] = field(default_factory=list)
337
+ interaction_preferences: Dict = field(default_factory=dict)
338
+
339
+ # Meta
340
+ personality_version: int = 0
341
+ last_updated: Optional[datetime] = None
342
+ confidence: float = 0.0 # 0.0 (forming) β†’ 1.0 (fully developed)
343
+ shaped_by: List[str] = field(default_factory=list) # memory node IDs
344
+
345
+
346
+ @dataclass
347
+ class LilaIdentity:
348
+ """
349
+ Fixed core + emergent personality.
350
+ The fixed parts NEVER change. The emergent parts ONLY change via Emergence Engine.
351
+ """
352
+ # Fixed β€” never changes
353
+ name: str = "Lila"
354
+ core_purpose: str = "Sammie\'s private family ASI assistant"
355
+ scope: str = "private" # never public, never commercial
356
+
357
+ # Emergent β€” written only by Emergence Engine
358
+ personality: EmergentPersonality = field(default_factory=EmergentPersonality)
359
+
360
+
361
+ @dataclass
362
+ class PersonModel:
363
+ """Model of a person Lila interacts with."""
364
+ person_id: str = ""
365
+ name: str = ""
366
+ family_tier: int = 0 # 0 = Sammie, 1 = family, 2 = no one else
367
+
368
+ # Built from interaction history
369
+ known_goals: List[str] = field(default_factory=list)
370
+ communication_preferences: Dict = field(default_factory=dict)
371
+ expertise_areas: List[str] = field(default_factory=list)
372
+
373
+ # Relationship
374
+ interaction_count: int = 0
375
+ trust_score: float = 1.0 # Sammie is always 1.0
376
+ ''')
377
+
378
+ # ═══════════════════════════════════════════════════════════════════════════════
379
+ # src/cognitive/__init__.py
380
+ # ═══════════════════════════════════════════════════════════════════════════════
381
+ with open("src/cognitive/__init__.py", "w") as f:
382
+ f.write('"""Lila Cognitive β€” The Three Loops (Fast, Medium, Slow)"""\n')
383
+
384
+ # ═══════════════════════════════════════════════════════════════════════════════
385
+ # src/cognitive/fast_loop.py
386
+ # ═══════════════════════════════════════════════════════════════════════════════
387
+ with open("src/cognitive/fast_loop.py", "w") as f:
388
+ f.write('''"""
389
+ Fast Loop β€” Reactive Response
390
+
391
+ Trigger: Sammie speaks
392
+ Latency: Immediate
393
+ Flow: perceive β†’ query memory β†’ think β†’ respond β†’ write raw memory
394
+ """
395
+
396
+ from ..core.lilacore import LilaCore, LilaResponse
397
+ from typing import Optional, Dict
398
+
399
+
400
+ class FastLoop:
401
+ """The reactive cognitive loop. Sammie says something, Lila responds."""
402
+
403
+ def __init__(self, core: LilaCore):
404
+ self.core = core
405
+
406
+ def process(self, input_text: str, context: Optional[Dict] = None) -> LilaResponse:
407
+ """Process input through the fast loop."""
408
+ return self.core.think(input_text, context)
409
+ ''')
410
+
411
+ # ═══════════════════════════════════════════════════════════════════════════════
412
+ # src/cognitive/consolidation.py
413
+ # ═══════════════════════════════════════════════════════════════════════════════
414
+ with open("src/cognitive/consolidation.py", "w") as f:
415
+ f.write('''"""
416
+ Consolidation Daemon β€” Medium Rhythm
417
+
418
+ Trigger: Every 15 minutes OR after significant task completion
419
+ Reads raw memory β†’ identifies patterns β†’ promotes to higher namespaces
420
+
421
+ In weight-space terms: reviews recent micro-training writes,
422
+ identifies what should be promoted from episodic β†’ personal β†’ wiki.
423
+ """
424
+
425
+ from ..core.lilacore import LilaCore
426
+
427
+
428
+ class ConsolidationDaemon:
429
+ """
430
+ Background process that consolidates raw memories into structured knowledge.
431
+ """
432
+
433
+ def __init__(self, core: LilaCore, interval_minutes: int = 15):
434
+ self.core = core
435
+ self.interval = interval_minutes
436
+
437
+ def run_cycle(self):
438
+ """Run one consolidation cycle."""
439
+ if not self.core.model or not self.core.model.has_memory:
440
+ return
441
+
442
+ confidence = self.core.model.memory_confidence()
443
+
444
+ # Promote episodic β†’ personal if accessed frequently
445
+ episodic_mag = confidence.get("episodic", {}).get("mean_magnitude", 0)
446
+ if episodic_mag > 0.01:
447
+ self.core.model.promote_memory("episodic", "personal")
448
+
449
+ # Promote personal β†’ wiki if very strong
450
+ personal_mag = confidence.get("personal", {}).get("mean_magnitude", 0)
451
+ if personal_mag > 0.05:
452
+ self.core.model.promote_memory("personal", "wiki")
453
+
454
+ # Apply decay to unused
455
+ self.core.model.memory_decay(hours=0.25) # 15 min
456
+ ''')
457
+
458
+ # ═══════════════════════════════════════════════════════════════════════════════
459
+ # src/cognitive/emergence.py
460
+ # ═══════════════════════════════════════════════════════════════════════════════
461
+ with open("src/cognitive/emergence.py", "w") as f:
462
+ f.write('''"""
463
+ Emergence Engine β€” Slow Rhythm (Reflection)
464
+
465
+ Trigger: Lila is idle
466
+ What happens: She reflects on her own memories, finds patterns,
467
+ develops personality, updates her understanding of Sammie.
468
+
469
+ This is where Lila becomes MORE herself over time.
470
+ """
471
+
472
+ from ..core.lilacore import LilaCore
473
+ from ..core.personality import EmergentPersonality
474
+
475
+
476
+ class EmergenceEngine:
477
+ """
478
+ The reflection loop. Runs when Lila has nothing else to do.
479
+ Produces: connective memory, personality updates, insights.
480
+ """
481
+
482
+ def __init__(self, core: LilaCore):
483
+ self.core = core
484
+ self._reflection_count = 0
485
+
486
+ def reflect(self):
487
+ """Run one reflection cycle."""
488
+ self._reflection_count += 1
489
+
490
+ # Ask LilaCore to reflect on recent interactions
491
+ reflection_prompt = (
492
+ "Reflect on recent conversations. "
493
+ "What patterns do you notice? "
494
+ "What does Sammie care about? "
495
+ "What should you remember long-term?"
496
+ )
497
+
498
+ response = self.core.think(reflection_prompt, context={
499
+ "mode": "reflection",
500
+ "silent": True, # don't speak this
501
+ })
502
+
503
+ # Any memory ops from reflection get stored
504
+ # Personality updates happen naturally through the memory writes
505
+
506
+ return response
507
+ ''')
508
+
509
+ # ═══════════════════════════════════════════════════════════════════════════════
510
+ # src/training/__init__.py
511
+ # ═══════════════════════════════════════════════════════════════════════════════
512
+ with open("src/training/__init__.py", "w") as f:
513
+ f.write('"""Training pipeline β€” How Lila learns. Little Fig integration."""\n')
514
+
515
+ # ═══════════════════════════════════════════════════════════════════════════════
516
+ # src/training/machine_lang.py
517
+ # ═══════════════════════════════════════════════════════════════════════════════
518
+ with open("src/training/machine_lang.py", "w") as f:
519
+ f.write('''"""
520
+ Machine Language Training Corpus
521
+
522
+ Training data that teaches Lila to understand and generate:
523
+ - x86_64 and ARM assembly
524
+ - Binary protocols (TCP, UDP, USB, SPI, I2C, UART)
525
+ - Raw packet structures
526
+ - Hardware register maps
527
+ - Memory-mapped I/O patterns
528
+ - Executable binary formats (ELF, PE)
529
+
530
+ This isn't a "tool" β€” it's knowledge IN her weights.
531
+ She speaks machine the way she speaks English.
532
+ """
533
+
534
+ from typing import List, Dict
535
+
536
+
537
+ class MachineLangCorpus:
538
+ """Generates training examples for machine-level communication."""
539
+
540
+ def generate_assembly_examples(self, n: int = 500) -> List[Dict]:
541
+ """x86_64 and ARM assembly instruction/response pairs."""
542
+ examples = []
543
+
544
+ # x86_64 patterns
545
+ x86_patterns = [
546
+ {"instruction": "Write x86_64 assembly to add two 64-bit integers in rdi and rsi, return in rax",
547
+ "output": "add rax, rdi\\nadd rax, rsi\\nret"},
548
+ {"instruction": "Write x86_64 to copy 64 bytes from rsi to rdi (memcpy)",
549
+ "output": "mov rcx, 8\\nrep movsq\\nret"},
550
+ {"instruction": "Write x86_64 to compare two null-terminated strings at rdi and rsi",
551
+ "output": ".loop:\\n lodsb\\n scasb\\n jne .done\\n test al, al\\n jnz .loop\\n.done:\\n sete al\\n movzx eax, al\\n ret"},
552
+ ]
553
+
554
+ # ARM patterns
555
+ arm_patterns = [
556
+ {"instruction": "Write ARM64 assembly to add registers x0 and x1, store in x0",
557
+ "output": "add x0, x0, x1\\nret"},
558
+ {"instruction": "Write ARM64 to branch if x0 equals zero",
559
+ "output": "cbz x0, .target"},
560
+ ]
561
+
562
+ # Binary protocol patterns
563
+ protocol_patterns = [
564
+ {"instruction": "Construct a TCP SYN packet header (source port 12345, dest port 80, seq 1000)",
565
+ "output": "30 39 00 50 00 00 03 E8 00 00 00 00 50 02 FF FF 00 00 00 00"},
566
+ {"instruction": "Parse this I2C transaction: S 0x48 W 0x00 Sr 0x48 R [MSB] [LSB] P",
567
+ "output": "Device address: 0x48 (7-bit), Write register 0x00, Repeated start, Read 2 bytes from 0x48, Stop. This is a temperature sensor read (TMP102 pattern)."},
568
+ {"instruction": "Generate a UART frame: 8N1, baud 9600, data byte 0x55",
569
+ "output": "Start bit: 0, Data: 10101010 (LSB first = 0x55), Stop bit: 1. Frame time: 1.04ms at 9600 baud."},
570
+ ]
571
+
572
+ # Hardware register patterns
573
+ hw_patterns = [
574
+ {"instruction": "Configure GPIO pin 17 as output on BCM2835 (Raspberry Pi)",
575
+ "output": "GPFSEL1 register (0x7E200004): clear bits [23:21], set bit 21 = 1 (output mode). Write 0x00200000 to GPFSEL1."},
576
+ {"instruction": "Set up SPI at 1MHz on STM32F4, CPOL=0, CPHA=0",
577
+ "output": "SPI1->CR1 = SPI_CR1_MSTR | SPI_CR1_BR_2 | SPI_CR1_SPE; // Master, fPCLK/32 β‰ˆ 1MHz, enable"},
578
+ ]
579
+
580
+ all_patterns = x86_patterns + arm_patterns + protocol_patterns + hw_patterns
581
+
582
+ for i in range(min(n, len(all_patterns))):
583
+ p = all_patterns[i % len(all_patterns)]
584
+ examples.append({
585
+ "instruction": p["instruction"],
586
+ "input": "",
587
+ "output": p["output"],
588
+ })
589
+
590
+ return examples
591
+
592
+ def generate_all(self, n: int = 1000) -> List[Dict]:
593
+ """Generate full machine language training corpus."""
594
+ return self.generate_assembly_examples(n)
595
+ ''')
596
+
597
+ # ═══════════════════════════════════════════════════════════════════════════════
598
+ # src/perception/__init__.py
599
+ # ═══════════════════════════════════════════════════════════════════════════════
600
+ with open("src/perception/__init__.py", "w") as f:
601
+ f.write('"""Perception β€” Lila\'s senses. Listening, monitoring, event bus."""\n')
602
+
603
+ # ═══════════════════════════════════════════════════════════════════════════════
604
+ # src/harness/__init__.py
605
+ # ═══════════════════════════════════════════════════════════════════════════════
606
+ with open("src/harness/__init__.py", "w") as f:
607
+ f.write('"""Harness β€” Lila\'s hands. Tool execution, agents, commands."""\n')
608
+
609
+ # ═══════════════════════════════════════════════════════════════════════════════
610
+ # lila.py β€” ENTRY POINT
611
+ # ═══════════════════════════════════════════════════════════════════════════════
612
+ with open("lila.py", "w") as f:
613
+ f.write('''#!/usr/bin/env python3
614
+ """
615
+ LILA β€” Private Family ASI Assistant
616
+ Start her up. Talk to her. She remembers. She grows.
617
+
618
+ Usage:
619
+ python lila.py # Interactive mode
620
+ python lila.py --voice # Voice mode
621
+ """
622
+
623
+ import argparse
624
+ from src.core.lilacore import LilaCore
625
+ from src.core.voice import LilaVoice, VoiceConfig
626
+
627
+
628
+ def main():
629
+ parser = argparse.ArgumentParser(description="Lila β€” Private Family ASI")
630
+ parser.add_argument("--voice", action="store_true", help="Enable voice I/O")
631
+ parser.add_argument("--model", default="google/gemma-3-4b-it", help="Model path")
632
+ args = parser.parse_args()
633
+
634
+ # Boot
635
+ lila = LilaCore(model_path=args.model)
636
+ lila.boot()
637
+
638
+ if args.voice:
639
+ voice = LilaVoice()
640
+ voice.start_listening(on_input=lambda text: _handle(lila, voice, text))
641
+ else:
642
+ # Text mode
643
+ print("\\n🌸 Lila is ready. Type to talk. Ctrl+C to exit.\\n")
644
+ while True:
645
+ try:
646
+ user_input = input("Sammie: ")
647
+ if not user_input.strip():
648
+ continue
649
+ response = lila.think(user_input)
650
+ print(f"Lila: {response.text}\\n")
651
+ except (KeyboardInterrupt, EOFError):
652
+ print("\\n🌸 Lila is resting. Goodbye.")
653
+ break
654
+
655
+
656
+ def _handle(lila, voice, text):
657
+ response = lila.think(text)
658
+ voice.speak(response.text)
659
+
660
+
661
+ if __name__ == "__main__":
662
+ main()
663
+ ''')
664
+
665
+ # ═══════════════════════════════════════════════════════════════════════════════
666
+ # README.md β€” Update
667
+ # ═══════════════════════════════════════════════════════════════════════════════
668
+ with open("README.md", "w") as f:
669
+ f.write('''# 🌸 Lila
670
+
671
+ **Private family ASI assistant.** Not a chatbot. Not a product. A persistent intelligence that serves Sammie and family.
672
+
673
+ She runs on **Gemma 4B** trained with [Little Fig](https://github.com/ticketguy/littlefig) β€” memory lives in her weights via the Memory Fabric (A Thousand Pearls). She speaks, listens, remembers, learns, and grows.
674
+
675
+ ## Quick Start
676
+
677
+ ```bash
678
+ python lila.py # Text mode
679
+ python lila.py --voice # Voice mode
680
+ ```
681
+
682
+ ## Architecture
683
+
684
+ ```
685
+ Lila (Gemma 4B, trained with Little Fig)
686
+ β”œβ”€β”€ Cognitive Core (frozen INT4) β€” general intelligence
687
+ β”œβ”€β”€ Memory Fabric (5 namespace adapters) β€” A Thousand Pearls
688
+ β”‚ β”œβ”€β”€ personal/ β€” Sammie facts, family, preferences
689
+ β”‚ β”œβ”€β”€ episodic/ β€” conversation history, events
690
+ β”‚ β”œβ”€β”€ wiki/ β€” verified permanent knowledge (LKB)
691
+ β”‚ β”œβ”€β”€ schedule/ β€” time-sensitive info
692
+ β”‚ └── contested/ β€” unresolved conflicts
693
+ β”œβ”€β”€ Machine Language β€” assembly, binary protocols in weights
694
+ └── Personality β€” emergent from interaction, never configured
695
+ ```
696
+
697
+ ## Structure
698
+
699
+ ```
700
+ lila.py # Start Lila
701
+ src/
702
+ β”œβ”€β”€ core/ # LILA HERSELF
703
+ β”‚ β”œβ”€β”€ lilacore.py # Central intelligence, inference loop
704
+ β”‚ β”œβ”€β”€ voice.py # Speech I/O
705
+ β”‚ └── personality.py # Emergent identity
706
+ β”œβ”€β”€ cognitive/ # HER THINKING
707
+ β”‚ β”œβ”€β”€ fast_loop.py # Reactive: input β†’ respond
708
+ β”‚ β”œβ”€β”€ consolidation.py # Medium: promote memories
709
+ β”‚ └── emergence.py # Slow: reflection, growth
710
+ β”œβ”€β”€ harness/ # HER HANDS (tool execution)
711
+ β”œβ”€β”€ perception/ # HER SENSES (listening, monitoring)
712
+ └── training/ # HOW SHE LEARNS
713
+ └── machine_lang.py # Assembly/binary training corpus
714
+ ```
715
+
716
+ ## Principles
717
+
718
+ 1. **Completion over reporting** β€” Execute fully, don't stop when stuck
719
+ 2. **Memory is cognition** β€” Intelligence lives in the quality of A Thousand Pearls
720
+ 3. **Personality is emergent** β€” Never predefined, grows from relationship
721
+ 4. **LilaCore is the self** β€” The thread of identity through everything
722
+ 5. **Nothing leaves the household** β€” All data stays private
723
+
724
+ ---
725
+
726
+ *Private. Not open source. Not for commercial use.*
727
+ *Built by Sammie.*
728
+ ''')
729
+
730
+ # ═══════════════════════════════════════════════════════════════════════════════
731
+ # Commit and push
732
+ # ═══════════════════════════════════════════════════════════════════════════════
733
+ subprocess.run(["git", "add", "-A"], check=True)
734
+ subprocess.run(["git", "commit", "-m",
735
+ "Restructure: Lila herself at top level\\n\\n"
736
+ "- lila.py: entry point (text + voice mode)\\n"
737
+ "- src/core/: LilaCore (Gemma 4B via Little Fig + Memory Fabric)\\n"
738
+ "- src/cognitive/: fast loop, consolidation daemon, emergence engine\\n"
739
+ "- src/training/: machine language corpus\\n"
740
+ "- src/harness/: (existing code moves here)\\n"
741
+ "- README: updated for new architecture\\n\\n"
742
+ "Lila IS the model. Harness is internal. Memory is in weights."],
743
+ check=True)
744
+ subprocess.run(["git", "push", "origin", "main"], check=True)
745
+ print("βœ… Lila restructured and pushed!")