Spaces:
Running
Running
| """ | |
| AETHER v0.3.0 β Autonomous Self-Evolving Architecture (HF Space) | |
| ================================================================== | |
| Runs continuously in a Docker Space. Background evolution thread persists | |
| memory, knowledge graph, and architecture checkpoints to the HF Hub via | |
| CommitScheduler. Executes tasks via REST API even when browser tabs are closed. | |
| Endpoints: | |
| GET / β Gradio monitoring dashboard | |
| GET /status β Live system state (JSON) | |
| POST /task β Submit a reasoning task (async) | |
| GET /task/{id}β Get task result | |
| POST /evolve β Trigger one-shot evolution | |
| GET /history β Evolution trajectory | |
| GET /kg β Knowledge graph stats | |
| """ | |
| import os, sys, time, json, hashlib, copy, random, warnings, asyncio, threading | |
| from pathlib import Path | |
| from dataclasses import dataclass, asdict | |
| from typing import Dict, List, Any, Optional, Tuple, Callable | |
| from collections import deque | |
| from contextlib import asynccontextmanager | |
| import numpy as np | |
| import networkx as nx | |
| import torch | |
| import torch.nn as nn | |
| import torch.nn.functional as F | |
| from fastapi import FastAPI, BackgroundTasks | |
| from pydantic import BaseModel | |
| import uvicorn | |
| import gradio as gr | |
| from huggingface_hub import CommitScheduler, HfApi, hf_hub_download | |
| warnings.filterwarnings("ignore") | |
| STATE_DIR = Path("/tmp/aether_state") | |
| STATE_DIR.mkdir(parents=True, exist_ok=True) | |
| HF_TOKEN = os.environ.get("HF_TOKEN", "") | |
| STATE_REPO = os.environ.get("AETHER_STATE_REPO", "camdog920/aether-state") | |
| scheduler = None | |
| def init_scheduler(): | |
| global scheduler | |
| if scheduler is None and HF_TOKEN: | |
| scheduler = CommitScheduler( | |
| repo_id=STATE_REPO, repo_type="model", folder_path=STATE_DIR, | |
| path_in_repo="state", every=5, | |
| ) | |
| print(f"[PERSISTENCE] CommitScheduler initialized for {STATE_REPO}") | |
| def save_state(state_dict, name="latest"): | |
| path = STATE_DIR / f"{name}.json" | |
| with open(path, "w") as f: | |
| json.dump(state_dict, f, indent=2, default=str) | |
| print(f"[PERSISTENCE] State saved to {path}") | |
| def load_state(name="latest"): | |
| path = STATE_DIR / f"{name}.json" | |
| if path.exists(): | |
| with open(path) as f: | |
| return json.load(f) | |
| if HF_TOKEN: | |
| try: | |
| downloaded = hf_hub_download( | |
| repo_id=STATE_REPO, filename=f"state/{name}.json", | |
| repo_type="model", local_dir=str(STATE_DIR), token=HF_TOKEN, | |
| ) | |
| with open(downloaded) as f: | |
| return json.load(f) | |
| except Exception as e: | |
| print(f"[PERSISTENCE] No remote state found: {e}") | |
| return None | |
| class AetherConfig: | |
| population_size: int = 6; generations: int = 5; mutation_rate: float = 0.12 | |
| macro_policy_dim: int = 128; micro_policy_dim: int = 64; num_agents: int = 4 | |
| working_memory_capacity: int = 16; episodic_buffer_size: int = 500 | |
| kg_embedding_dim: int = 64; kg_num_relations: int = 10 | |
| learning_rate: float = 2e-5; batch_size: int = 4 | |
| enable_self_modification: bool = True | |
| max_mutation_rate: float = 0.50; max_agents: int = 16 | |
| max_memory_mb: float = 8192.0; rollback_fitness_drop: float = 0.15 | |
| risk_threshold: float = 0.70 | |
| archive_dims: Tuple[int, int] = (10, 10) | |
| def to_vector(self): | |
| return np.array([self.population_size, self.mutation_rate, self.learning_rate * 1e5, | |
| self.macro_policy_dim, self.micro_policy_dim, self.num_agents, | |
| self.kg_embedding_dim], dtype=np.float32) | |
| def from_vector(cls, vec): | |
| return cls( | |
| population_size=int(np.clip(vec[0], 2, 64)), | |
| mutation_rate=float(np.clip(vec[1], 0.01, 0.5)), | |
| learning_rate=float(np.clip(vec[2] / 1e5, 1e-6, 1e-3)), | |
| macro_policy_dim=int(np.clip(vec[3], 64, 512)), | |
| micro_policy_dim=int(np.clip(vec[4], 32, 256)), | |
| num_agents=int(np.clip(vec[5], 1, 16)), | |
| kg_embedding_dim=int(np.clip(vec[6], 32, 512)), | |
| ) | |
| class WorkingMemory: | |
| def __init__(self, capacity=16): | |
| self.buffer = deque(maxlen=capacity) | |
| self.attention = nn.Parameter(torch.ones(capacity)) | |
| def store(self, item): | |
| item["_t"] = time.time() | |
| self.buffer.append(item) | |
| def retrieve(self, query, top_k=3): | |
| if not self.buffer: return [] | |
| buf = list(self.buffer) | |
| scores = [] | |
| for i, item in enumerate(buf): | |
| text = json.dumps(item) | |
| score = sum(1 for w in query.lower().split() if w in text.lower()) | |
| attn = torch.sigmoid(self.attention[i % self.capacity]).item() | |
| scores.append(score * attn) | |
| indices = sorted(range(len(scores)), key=lambda i: scores[i], reverse=True)[:top_k] | |
| return [buf[i] for i in indices] | |
| def export(self): | |
| return list(self.buffer) | |
| class EpisodicMemory: | |
| def __init__(self, buffer_size=1000): | |
| self.buffer = deque(maxlen=buffer_size) | |
| def store(self, episode): | |
| episode["_t"] = time.time() | |
| self.buffer.append(episode) | |
| def retrieve_similar(self, query, top_k=5): | |
| if not self.buffer: return [] | |
| buf = list(self.buffer) | |
| scores = [sum(1 for w in query.lower().split() if w in json.dumps(item).lower()) for item in buf] | |
| indices = sorted(range(len(scores)), key=lambda i: scores[i], reverse=True)[:top_k] | |
| return [buf[i] for i in indices] | |
| def export(self): | |
| return list(self.buffer) | |
| class SemanticMemory: | |
| def __init__(self): | |
| self.facts = {} | |
| def store_fact(self, key, value, confidence=1.0): | |
| self.facts[key] = {"value": value, "confidence": confidence, "t": time.time()} | |
| def query(self, query): | |
| return [v for k, v in self.facts.items() if query.lower() in k.lower()] | |
| def export(self): | |
| return self.facts | |
| class ProceduralMemory: | |
| def __init__(self): | |
| self.tools = {} | |
| self.usage = {} | |
| def register_tool(self, name, code, description, tags=None): | |
| self.tools[name] = {"code": code, "description": description, | |
| "tags": tags or [], "registered_at": time.time(), "version": 1} | |
| self.usage[name] = 0 | |
| def search_tools(self, query): | |
| out = [] | |
| for name, tool in self.tools.items(): | |
| text = f"{name} {tool['description']} {' '.join(tool['tags'])}" | |
| if query.lower() in text.lower(): | |
| out.append({"name": name, **tool}) | |
| return out | |
| def export(self): | |
| return {"tools": self.tools, "usage": self.usage} | |
| class CoALAMemory: | |
| def __init__(self, capacity=16): | |
| self.working = WorkingMemory(capacity=capacity) | |
| self.episodic = EpisodicMemory(buffer_size=1000) | |
| self.semantic = SemanticMemory() | |
| self.procedural = ProceduralMemory() | |
| def store(self, item, memory_type="working"): | |
| if memory_type == "working": | |
| self.working.store(item) | |
| elif memory_type == "episodic": | |
| self.episodic.store(item) | |
| elif memory_type == "semantic": | |
| for k, v in item.items(): | |
| self.semantic.store_fact(k, v) | |
| elif memory_type == "procedural": | |
| if "name" in item and "code" in item: | |
| self.procedural.register_tool(item["name"], item["code"], | |
| item.get("description", ""), item.get("tags", [])) | |
| def retrieve(self, query, memory_type="all", top_k=5): | |
| if memory_type == "all": | |
| out = self.working.retrieve(query, top_k=top_k // 2) | |
| out += self.episodic.retrieve_similar(query, top_k=top_k) | |
| out += self.semantic.query(query)[:top_k] | |
| return out[:top_k] | |
| elif memory_type == "working": | |
| return self.working.retrieve(query, top_k) | |
| elif memory_type == "episodic": | |
| return self.episodic.retrieve_similar(query, top_k) | |
| elif memory_type == "semantic": | |
| return self.semantic.query(query)[:top_k] | |
| elif memory_type == "procedural": | |
| return self.procedural.search_tools(query) | |
| return [] | |
| def export(self): | |
| return {"working": self.working.export(), "episodic": self.episodic.export(), | |
| "semantic": self.semantic.export(), "procedural": self.procedural.export()} | |
| class TemporalMemory(nn.Module): | |
| def __init__(self, buffer_size=1000, hidden_dim=64): | |
| super().__init__() | |
| self.buffer = deque(maxlen=buffer_size) | |
| self.temporal_gate = nn.Sequential( | |
| nn.Linear(2, hidden_dim), nn.ReLU(), | |
| nn.Linear(hidden_dim, 1), nn.Sigmoid(), | |
| ) | |
| def store(self, event): | |
| event["_t"] = time.time() | |
| self.buffer.append(event) | |
| def retrieve_context(self, current_time=None, lookback=3600.0): | |
| current_time = current_time or time.time() | |
| relevant = [] | |
| for event in self.buffer: | |
| age = current_time - event.get("_t", current_time) | |
| if age <= lookback: | |
| recency = torch.exp(torch.tensor(-age / lookback)).item() | |
| relevant.append({**event, "recency": recency, "age": age}) | |
| relevant.sort(key=lambda x: x["recency"], reverse=True) | |
| return relevant | |
| def export(self): | |
| return list(self.buffer) | |
| class RGCNLayer(nn.Module): | |
| def __init__(self, in_dim, out_dim, num_relations, num_bases=4): | |
| super().__init__() | |
| self.num_relations = num_relations | |
| self.bases = nn.Parameter(torch.Tensor(num_bases, in_dim, out_dim)) | |
| self.comp = nn.Parameter(torch.Tensor(num_relations, num_bases)) | |
| self.self_loop = nn.Parameter(torch.Tensor(in_dim, out_dim)) | |
| self.bias = nn.Parameter(torch.Tensor(out_dim)) | |
| nn.init.xavier_uniform_(self.bases) | |
| nn.init.xavier_uniform_(self.comp) | |
| nn.init.xavier_uniform_(self.self_loop) | |
| nn.init.zeros_(self.bias) | |
| def forward(self, x, edge_index, edge_type): | |
| num_nodes = int(edge_index.max().item()) + 1 if x is None else x.size(0) | |
| if x is None: | |
| x = torch.eye(num_nodes, self.bases.size(1), device=edge_index.device) | |
| weight = torch.einsum("rb,bio->rio", self.comp, self.bases) | |
| out = torch.zeros(num_nodes, weight.size(2), device=x.device) | |
| for rid in range(self.num_relations): | |
| mask = edge_type == rid | |
| if mask.sum() == 0: continue | |
| ei = edge_index[:, mask] | |
| messages = torch.mm(x[ei[0]], weight[rid]) | |
| out.index_add_(0, ei[1], messages) | |
| out = out + torch.mm(x, self.self_loop) + self.bias | |
| return out | |
| class KnowledgeGraphEncoder(nn.Module): | |
| def __init__(self, num_nodes, hidden_dim, num_relations, num_layers=2, num_bases=4): | |
| super().__init__() | |
| self.node_embeddings = nn.Embedding(num_nodes, hidden_dim) | |
| self.layers = nn.ModuleList([ | |
| RGCNLayer(hidden_dim, hidden_dim, num_relations, num_bases) for _ in range(num_layers) | |
| ]) | |
| self.norms = nn.ModuleList([nn.LayerNorm(hidden_dim) for _ in range(num_layers)]) | |
| def forward(self, edge_index, edge_type): | |
| num_nodes = int(edge_index.max().item()) + 1 | |
| x = self.node_embeddings(torch.arange(num_nodes, device=edge_index.device)) | |
| for layer, norm in zip(self.layers, self.norms): | |
| x = F.relu(norm(layer(x, edge_index, edge_type))) | |
| return x | |
| class ComplExScorer(nn.Module): | |
| def __init__(self, num_nodes, num_relations, hidden_dim=50): | |
| super().__init__() | |
| self.head_real = nn.Embedding(num_nodes, hidden_dim) | |
| self.head_imag = nn.Embedding(num_nodes, hidden_dim) | |
| self.tail_real = nn.Embedding(num_nodes, hidden_dim) | |
| self.tail_imag = nn.Embedding(num_nodes, hidden_dim) | |
| self.rel_real = nn.Embedding(num_relations, hidden_dim) | |
| self.rel_imag = nn.Embedding(num_relations, hidden_dim) | |
| for p in self.parameters(): | |
| nn.init.xavier_uniform_(p) | |
| def forward(self, h, r, t): | |
| hr, hi = self.head_real(h), self.head_imag(h) | |
| tr, ti = self.tail_real(t), self.tail_imag(t) | |
| rr, ri = self.rel_real(r), self.rel_imag(r) | |
| return torch.sum(hr * rr * tr + hr * ri * ti + hi * rr * ti - hi * ri * tr, dim=-1) | |
| def loss(self, h, r, t, neg_t=None): | |
| pos = self.forward(h, r, t) | |
| if neg_t is None: | |
| neg_t = torch.randint(0, self.tail_real.num_embeddings, t.size(), device=t.device) | |
| neg = self.forward(h, r, neg_t) | |
| return (F.softplus(-pos) + F.softplus(neg)).mean() | |
| class KnowledgeGraphEngine(nn.Module): | |
| def __init__(self, embedding_dim=128, num_relations=20, max_nodes=10000): | |
| super().__init__() | |
| self.embedding_dim = embedding_dim | |
| self.num_relations = num_relations | |
| self.max_nodes = max_nodes | |
| self.graph = nx.DiGraph() | |
| self.node_id_map = {} | |
| self.relation_map = {} | |
| self.next_node_id = 0 | |
| self.next_rel_id = 0 | |
| self.encoder = None | |
| self.scorer = None | |
| self.symbolic_attention = nn.Parameter(torch.ones(num_relations)) | |
| self.rules = [] | |
| def _get_or_create_node(self, name): | |
| if name not in self.node_id_map: | |
| self.node_id_map[name] = self.next_node_id | |
| self.graph.add_node(self.next_node_id, name=name) | |
| self.next_node_id += 1 | |
| return self.node_id_map[name] | |
| def _get_or_create_relation(self, name): | |
| if name not in self.relation_map: | |
| self.relation_map[name] = self.next_rel_id | |
| self.next_rel_id += 1 | |
| return self.relation_map[name] | |
| def add_fact(self, head, relation, tail, confidence=1.0): | |
| h = self._get_or_create_node(head) | |
| t = self._get_or_create_node(tail) | |
| r = self._get_or_create_relation(relation) | |
| self.graph.add_edge(h, t, relation=r, name=relation, confidence=confidence) | |
| self._ensure_capacity() | |
| def add_rule(self, premise, conclusion): | |
| self.rules.append((premise, conclusion)) | |
| def _ensure_capacity(self): | |
| if self.encoder is None and self.next_node_id > 0: | |
| n = min(self.next_node_id, self.max_nodes) | |
| r = max(self.next_rel_id, self.num_relations) | |
| self.encoder = KnowledgeGraphEncoder(n, self.embedding_dim, r) | |
| self.scorer = ComplExScorer(n, r, self.embedding_dim // 2) | |
| def _check_fact(self, fact): | |
| h, r, t = fact | |
| if h not in self.node_id_map or t not in self.node_id_map or r not in self.relation_map: | |
| return False | |
| h_id, t_id, r_id = self.node_id_map[h], self.node_id_map[t], self.relation_map[r] | |
| return self.graph.has_edge(h_id, t_id) and self.graph.edges[h_id, t_id].get("relation") == r_id | |
| def reason_symbolic(self, query_head, query_relation): | |
| results = [] | |
| if query_head not in self.node_id_map: | |
| return results | |
| h_id = self.node_id_map[query_head] | |
| r_name = query_relation | |
| if r_name in self.relation_map: | |
| r_id = self.relation_map[r_name] | |
| for _, target, data in self.graph.out_edges(h_id, data=True): | |
| if data.get("relation") == r_id: | |
| results.append({ | |
| "head": query_head, "relation": r_name, | |
| "tail": self.graph.nodes[target].get("name", str(target)), | |
| "confidence": data.get("confidence", 1.0), "path": "direct", | |
| }) | |
| for premise, conclusion in self.rules: | |
| p_head, _, _ = premise | |
| c_head, c_rel, c_tail = conclusion | |
| if p_head == query_head and self._check_fact(premise): | |
| results.append({ | |
| "head": c_head if c_head != "?" else query_head, | |
| "relation": c_rel, "tail": c_tail, | |
| "confidence": 0.8, "path": "inferred", | |
| "rule": f"{premise} -> {conclusion}", | |
| }) | |
| for neighbor in nx.bfs_tree(self.graph, h_id, depth_limit=2).nodes(): | |
| if neighbor != h_id: | |
| for path in nx.all_simple_paths(self.graph, h_id, neighbor, cutoff=2): | |
| if len(path) > 1: | |
| ed = self.graph.edges[path[0], path[1]] | |
| results.append({ | |
| "head": query_head, | |
| "relation": f"multi-hop via {ed.get('name', 'unknown')}", | |
| "tail": self.graph.nodes[neighbor].get("name", str(neighbor)), | |
| "confidence": 0.6 ** (len(path) - 1), | |
| "path": "->".join(str(n) for n in path), | |
| }) | |
| return sorted(results, key=lambda x: x.get("confidence", 0), reverse=True) | |
| def reason_learned(self, query_head, query_relation, top_k=5): | |
| if self.scorer is None or query_head not in self.node_id_map: | |
| return [] | |
| h_id = self.node_id_map[query_head] | |
| r_id = self.relation_map.get(query_relation) | |
| if r_id is None: | |
| return [] | |
| h_t = torch.tensor([h_id]) | |
| r_t = torch.tensor([r_id]) | |
| all_t = torch.arange(self.scorer.tail_real.num_embeddings) | |
| scores = [] | |
| for i in range(0, len(all_t), 1000): | |
| batch = all_t[i:i + 1000] | |
| scores.extend(self.scorer(h_t.repeat(len(batch)), r_t.repeat(len(batch)), batch).tolist()) | |
| scores_t = torch.tensor(scores) | |
| top_scores, top_idx = torch.topk(scores_t, min(top_k, len(scores_t))) | |
| results = [] | |
| for idx, sc in zip(top_idx, top_scores): | |
| node_name = self.graph.nodes[idx.item()].get("name", str(idx.item())) | |
| results.append({ | |
| "head": query_head, "relation": query_relation, | |
| "tail": node_name, "confidence": torch.sigmoid(sc).item(), "path": "learned", | |
| }) | |
| return results | |
| def query(self, text_query, top_k=5): | |
| parts = text_query.lower().split() | |
| head = parts[0].capitalize() if parts else text_query.capitalize() | |
| relation = " ".join(parts[1:]) if len(parts) > 1 else "related_to" | |
| sym = self.reason_symbolic(head, relation)[:top_k] | |
| learned = self.reason_learned(head, relation, top_k) | |
| rel_id = self.relation_map.get(relation, 0) | |
| sym_w = torch.sigmoid(self.symbolic_attention[rel_id % self.num_relations]).item() | |
| learned_w = 1.0 - sym_w | |
| for r in sym: | |
| r["source"] = "symbolic" | |
| r["fusion_weight"] = sym_w | |
| for r in learned: | |
| r["source"] = "learned" | |
| r["fusion_weight"] = learned_w | |
| all_r = sorted(sym + learned, key=lambda x: x.get("confidence", 0), reverse=True) | |
| return { | |
| "query": text_query, "results": all_r[:top_k], | |
| "symbolic_weight": sym_w, "learned_weight": learned_w, | |
| "num_symbolic": len(sym), "num_learned": len(learned), | |
| } | |
| def stats(self): | |
| return {"num_nodes": self.graph.number_of_nodes(), | |
| "num_edges": self.graph.number_of_edges(), | |
| "num_relations": len(self.relation_map), "num_rules": len(self.rules)} | |
| def export(self): | |
| edges = [] | |
| for u, v, d in self.graph.edges(data=True): | |
| edges.append({"source": u, "target": v, "relation": d.get("name"), "confidence": d.get("confidence")}) | |
| return { | |
| "nodes": {n: self.graph.nodes[n].get("name", str(n)) for n in self.graph.nodes()}, | |
| "edges": edges, "rules": self.rules, | |
| "node_id_map": self.node_id_map, "relation_map": self.relation_map, | |
| "next_node_id": self.next_node_id, "next_rel_id": self.next_rel_id, | |
| } | |
| def from_export(cls, data, embedding_dim=64, num_relations=10): | |
| kg = cls(embedding_dim=embedding_dim, num_relations=num_relations) | |
| kg.node_id_map = data.get("node_id_map", {}) | |
| kg.relation_map = data.get("relation_map", {}) | |
| kg.next_node_id = data.get("next_node_id", 0) | |
| kg.next_rel_id = data.get("next_rel_id", 0) | |
| kg.rules = [tuple(r) for r in data.get("rules", [])] | |
| for n, name in data.get("nodes", {}).items(): | |
| kg.graph.add_node(int(n), name=name) | |
| for e in data.get("edges", []): | |
| kg.graph.add_edge(int(e["source"]), int(e["target"]), | |
| relation=e.get("relation"), name=e.get("relation"), confidence=e.get("confidence", 1.0)) | |
| kg._ensure_capacity() | |
| return kg | |
| class AgentRole: | |
| RESEARCHER = "researcher"; ENGINEER = "engineer"; ANALYZER = "analyzer"; INTEGRATOR = "integrator" | |
| class BaseAgent(nn.Module): | |
| def __init__(self, role, hidden_dim=128, vocab_size=32000): | |
| super().__init__() | |
| self.role = role | |
| self.hidden_dim = hidden_dim | |
| self.encoder = nn.Sequential(nn.Embedding(vocab_size, hidden_dim), | |
| nn.LSTM(hidden_dim, hidden_dim, batch_first=True)) | |
| self.policy_head = nn.Linear(hidden_dim, hidden_dim) | |
| self.value_head = nn.Linear(hidden_dim, 1) | |
| self.task_history = deque(maxlen=100) | |
| self.performance_log = [] | |
| def forward(self, input_ids): | |
| embeds = self.encoder[0](input_ids) | |
| lstm_out, _ = self.encoder[1](embeds) | |
| hidden = lstm_out[:, -1, :] | |
| return {"policy_logits": self.policy_head(hidden), "value": self.value_head(hidden), "hidden": hidden} | |
| def act(self, observation): | |
| self.task_history.append({"observation": observation, "t": time.time()}) | |
| actions = { | |
| AgentRole.RESEARCHER: f"[RESEARCHER] Exploring knowledge for: '{observation[:50]}...'", | |
| AgentRole.ENGINEER: f"[ENGINEER] Synthesizing tool for: '{observation[:50]}...'", | |
| AgentRole.ANALYZER: f"[ANALYZER] Evaluating solution for: '{observation[:50]}...'", | |
| AgentRole.INTEGRATOR: f"[INTEGRATOR] Merging components for: '{observation[:50]}...'", | |
| } | |
| return actions.get(self.role, f"[{self.role.upper()}] Processing: '{observation}'") | |
| def update(self, reward): | |
| self.performance_log.append(reward) | |
| class HierarchicalAgent(nn.Module): | |
| def __init__(self, macro_dim=256, micro_dim=128, num_subgoals=5): | |
| super().__init__() | |
| self.macro_dim = macro_dim | |
| self.micro_dim = micro_dim | |
| self.num_subgoals = num_subgoals | |
| self.macro_decoder = nn.LSTM(macro_dim, macro_dim, batch_first=True) | |
| self.subgoal_head = nn.Linear(macro_dim, num_subgoals) | |
| self.termination_token = nn.Parameter(torch.randn(macro_dim)) | |
| self.micro_encoder = nn.LSTM(micro_dim + macro_dim, micro_dim, batch_first=True) | |
| self.action_head = nn.Linear(micro_dim, 50) | |
| self.current_blueprint = None | |
| self.active_subgoal_idx = 0 | |
| def generate_blueprint(self, task_embedding): | |
| batch_size = task_embedding.size(0) | |
| hidden = (torch.zeros(1, batch_size, self.macro_dim), torch.zeros(1, batch_size, self.macro_dim)) | |
| input_tok = task_embedding.unsqueeze(1) | |
| blueprints = [] | |
| for _ in range(self.num_subgoals): | |
| out, hidden = self.macro_decoder(input_tok, hidden) | |
| sg_logits = self.subgoal_head(out.squeeze(1)) | |
| sg_id = torch.argmax(sg_logits, dim=-1) | |
| sim = torch.cosine_similarity(out.squeeze(1), self.termination_token.unsqueeze(0)) | |
| if sim.item() > 0.9: | |
| break | |
| blueprints.append(f"subgoal_{sg_id.item()}") | |
| input_tok = out | |
| self.current_blueprint = blueprints | |
| self.active_subgoal_idx = 0 | |
| return blueprints | |
| def execute_action(self, observation, blueprint=None): | |
| if blueprint is not None: | |
| self.current_blueprint = blueprint | |
| if not self.current_blueprint: | |
| return torch.zeros(1, 50) | |
| active = self.current_blueprint[min(self.active_subgoal_idx, len(self.current_blueprint) - 1)] | |
| subgoal_embed = torch.randn(1, self.macro_dim) | |
| combined = torch.cat([observation, subgoal_embed], dim=-1) | |
| out, _ = self.micro_encoder(combined.unsqueeze(1)) | |
| return self.action_head(out.squeeze(1)) | |
| def advance_subgoal(self): | |
| self.active_subgoal_idx += 1 | |
| def reset(self): | |
| self.current_blueprint = None | |
| self.active_subgoal_idx = 0 | |
| class AetherAgentOrchestrator(nn.Module): | |
| def __init__(self, config): | |
| super().__init__() | |
| self.config = config | |
| self.agents = nn.ModuleDict({ | |
| "researcher": BaseAgent(AgentRole.RESEARCHER, hidden_dim=config.macro_policy_dim), | |
| "engineer": BaseAgent(AgentRole.ENGINEER, hidden_dim=config.micro_policy_dim), | |
| "analyzer": BaseAgent(AgentRole.ANALYZER, hidden_dim=config.micro_policy_dim), | |
| "integrator": BaseAgent(AgentRole.INTEGRATOR, hidden_dim=config.micro_policy_dim), | |
| }) | |
| self.leader = BaseAgent("leader", hidden_dim=config.macro_policy_dim) | |
| self.hierarchical = HierarchicalAgent(macro_dim=config.macro_policy_dim, micro_dim=config.micro_policy_dim) | |
| self.routing_weights = nn.Parameter(torch.ones(len(self.agents))) | |
| self.aggregation_gate = nn.Softmax(dim=0) | |
| self.interactions = [] | |
| self.task_count = 0 | |
| def forward(self, task, context): | |
| task_embed = torch.randn(1, self.config.macro_policy_dim) | |
| blueprint = self.hierarchical.generate_blueprint(task_embed) | |
| routing_probs = self.aggregation_gate(self.routing_weights) | |
| agent_outputs = {} | |
| for i, (name, agent) in enumerate(self.agents.items()): | |
| weight = routing_probs[i].item() | |
| if weight < 0.10: | |
| continue | |
| sub_task = blueprint[min(i, len(blueprint) - 1)] if blueprint else task | |
| output = agent.act(f"[{name}] {sub_task}") | |
| agent_outputs[name] = {"output": output, "weight": weight, "sub_task": sub_task} | |
| synthesis = self.leader.act(f"Synthesize: {task} with inputs: {list(agent_outputs.keys())}") | |
| self.interactions.append({ | |
| "task": task, "blueprint": blueprint, | |
| "agent_outputs": agent_outputs, "leader_synthesis": synthesis, | |
| "routing_probs": routing_probs.detach().cpu().tolist(), | |
| "t": time.time(), | |
| }) | |
| self.task_count += 1 | |
| return {"output": synthesis, "blueprint": blueprint, | |
| "agent_outputs": agent_outputs, | |
| "routing_weights": routing_probs.detach().cpu().tolist()} | |
| def execute(self, task, kg_context, context): | |
| return self.forward(task, context) | |
| def stats(self): | |
| return {"total_tasks": self.task_count, "num_agents": len(self.agents), | |
| "total_interactions": len(self.interactions), | |
| "routing_weights": self.routing_weights.detach().cpu().tolist()} | |
| class AutoOversight: | |
| def __init__(self, config): | |
| self.config = config | |
| self.audit_log = [] | |
| self.baseline_fitness = 0.0 | |
| self.last_good_config = None | |
| self.last_good_fitness = -float("inf") | |
| self.consecutive_rejections = 0 | |
| def risk_score(self, candidate): | |
| risks = [] | |
| risks.append(min(1.0, candidate.mutation_rate / self.config.max_mutation_rate)) | |
| risks.append(min(1.0, candidate.num_agents / self.config.max_agents)) | |
| est_mem = (candidate.macro_policy_dim * candidate.micro_policy_dim * candidate.num_agents * 4) / 1e6 | |
| risks.append(min(1.0, est_mem / self.config.max_memory_mb)) | |
| risks.append(1.0 if candidate.micro_policy_dim > candidate.macro_policy_dim else 0.0) | |
| return float(np.mean(risks)) | |
| def validate_stability(self, candidate): | |
| checks = {"population_size": (2, 64), "mutation_rate": (0.0, self.config.max_mutation_rate), | |
| "learning_rate": (1e-6, 1e-3), "num_agents": (1, self.config.max_agents), | |
| "macro_policy_dim": (32, 512), "micro_policy_dim": (16, 256)} | |
| violations = [] | |
| for field_name, (lo, hi) in checks.items(): | |
| val = getattr(candidate, field_name, None) | |
| if val is not None and not (lo <= val <= hi): | |
| violations.append(f"{field_name}={val} not in [{lo},{hi}]") | |
| if candidate.micro_policy_dim > candidate.macro_policy_dim: | |
| violations.append("micro > macro") | |
| return (False, "; ".join(violations)) if violations else (True, "ok") | |
| def regression_suite(self, candidate, core): | |
| scores = [] | |
| try: | |
| wm = WorkingMemory(capacity=candidate.working_memory_capacity) | |
| for i in range(100): | |
| wm.store({"idx": i, "data": torch.randn(16)}) | |
| retrieved = wm.retrieve("idx", top_k=5) | |
| scores.append(len(retrieved) / 5.0) | |
| kg = KnowledgeGraphEngine(embedding_dim=candidate.kg_embedding_dim, num_relations=candidate.kg_num_relations) | |
| for i in range(20): | |
| kg.add_fact(f"Node{i}", "relates_to", f"Node{i+1}") | |
| q = kg.query("Node0 relates_to", top_k=3) | |
| scores.append(min(1.0, len(q["results"]) / 3.0)) | |
| orch = AetherAgentOrchestrator(candidate) | |
| task_embed = torch.randn(1, candidate.macro_policy_dim) | |
| blueprint = orch.hierarchical.generate_blueprint(task_embed) | |
| scores.append(min(1.0, len(blueprint) / 3.0)) | |
| except Exception: | |
| return False, 0.0 | |
| composite = float(np.mean(scores)) | |
| if self.baseline_fitness > 0 and composite < self.baseline_fitness * (1 - self.config.rollback_fitness_drop): | |
| return False, composite | |
| return True, composite | |
| def should_rollback(self, current_fitness): | |
| if self.last_good_fitness == -float("inf"): | |
| return False | |
| drop = (self.last_good_fitness - current_fitness) / (abs(self.last_good_fitness) + 1e-8) | |
| return drop > self.config.rollback_fitness_drop | |
| def decide(self, candidate, core): | |
| risk = self.risk_score(candidate) | |
| if risk > self.config.risk_threshold: | |
| self._log(candidate, False, f"risk={risk:.2f} > threshold") | |
| self.consecutive_rejections += 1 | |
| return False, risk, "auto-rejected: high risk" | |
| stable, reason = self.validate_stability(candidate) | |
| if not stable: | |
| self._log(candidate, False, reason) | |
| self.consecutive_rejections += 1 | |
| return False, risk, f"auto-rejected: unstable ({reason})" | |
| reg_pass, reg_score = self.regression_suite(candidate, core) | |
| if not reg_pass: | |
| self._log(candidate, False, f"regression fail score={reg_score:.3f}") | |
| self.consecutive_rejections += 1 | |
| return False, reg_score, "auto-rejected: regression failure" | |
| self._log(candidate, True, f"risk={risk:.2f} reg={reg_score:.3f}") | |
| self.consecutive_rejections = 0 | |
| self.baseline_fitness = max(self.baseline_fitness, reg_score) | |
| return True, reg_score, "auto-approved" | |
| def _log(self, candidate, approved, reason): | |
| self.audit_log.append({"timestamp": time.time(), "approved": approved, | |
| "hash": hashlib.sha256(json.dumps(asdict(candidate), sort_keys=True).encode()).hexdigest()[:16], | |
| "reason": reason}) | |
| def update_good_checkpoint(self, config, fitness): | |
| self.last_good_config = copy.deepcopy(config) | |
| self.last_good_fitness = fitness | |
| def summary(self): | |
| total = len(self.audit_log) | |
| approved = sum(1 for m in self.audit_log if m["approved"]) | |
| return {"total_attempted": total, "approved": approved, "rejected": total - approved, | |
| "consecutive_rejections": self.consecutive_rejections, | |
| "baseline_fitness": self.baseline_fitness, "last_good_fitness": self.last_good_fitness} | |
| class MAPelitesArchive: | |
| def __init__(self, dims=(10, 10), ranges=None): | |
| self.dims = dims | |
| self.ranges = ranges or [(0, 1), (0, 1)] | |
| self.archive = {} | |
| def _index(self, measures): | |
| indices = [] | |
| for m, (lo, hi), dim in zip(measures, self.ranges, self.dims): | |
| norm = (m - lo) / (hi - lo + 1e-8) | |
| idx = int(np.clip(norm * dim, 0, dim - 1)) | |
| indices.append(idx) | |
| return tuple(indices) | |
| def add(self, config, fitness, measures): | |
| idx = self._index(measures) | |
| if idx not in self.archive or self.archive[idx][1] < fitness: | |
| self.archive[idx] = (config, fitness) | |
| return True | |
| return False | |
| def sample(self, n=1): | |
| if not self.archive: return [] | |
| items = list(self.archive.values()) | |
| selected = random.sample(items, min(n, len(items))) | |
| return [cfg for cfg, _ in selected] | |
| def get_best(self): | |
| if not self.archive: return None | |
| return max(self.archive.values(), key=lambda x: x[1]) | |
| def stats(self): | |
| total_cells = self.dims[0] * self.dims[1] | |
| return {"coverage": len(self.archive) / total_cells, | |
| "qd_score": sum(f for _, f in self.archive.values()), | |
| "max_fitness": max((f for _, f in self.archive.values()), default=0)} | |
| class AetherEvolutionEngine: | |
| def __init__(self, config): | |
| self.config = config | |
| self.archive = MAPelitesArchive(dims=config.archive_dims, ranges=[(0, 1), (0, 1)]) | |
| self.generation = 0 | |
| def generate_candidates(self, base_config, population_size=8): | |
| candidates = [base_config] | |
| archive_seeds = self.archive.sample(n=min(2, len(self.archive))) | |
| for _ in range(population_size - len(archive_seeds) - 1): | |
| candidates.append(self._mutate(base_config)) | |
| for cfg in archive_seeds: | |
| candidates.append(cfg) | |
| return candidates | |
| def _mutate(self, config): | |
| vec = config.to_vector() | |
| noise = np.random.normal(0, config.mutation_rate, size=vec.shape) | |
| mutated = vec + noise * vec | |
| new_cfg = AetherConfig.from_vector(mutated) | |
| new_cfg.generations = config.generations | |
| new_cfg.enable_self_modification = config.enable_self_modification | |
| new_cfg.archive_dims = config.archive_dims | |
| return new_cfg | |
| def select(self, candidates, fitness_scores): | |
| if not candidates or not fitness_scores: | |
| return candidates[:2] if len(candidates) >= 2 else candidates | |
| vectors = np.array([c.to_vector() for c in candidates]) | |
| f = np.array(fitness_scores) | |
| f_norm = (f - f.min()) / (f.max() - f.min() + 1e-8) | |
| k = min(4, len(candidates) - 1) | |
| novelties = [] | |
| for i, v in enumerate(vectors): | |
| dists = np.linalg.norm(vectors - v, axis=1) | |
| dists[i] = np.inf | |
| knn = np.partition(dists, k)[:k] | |
| novelties.append(np.mean(knn)) | |
| nov_norm = np.array(novelties) / (max(novelties) + 1e-8) | |
| scores = f_norm * np.sqrt(nov_norm + 1e-8) | |
| n_select = max(1, len(candidates) // 2) | |
| top_indices = np.argsort(scores)[-n_select:] | |
| return [candidates[i] for i in top_indices] | |
| def mutate(self, candidates): | |
| mutated = [] | |
| for cfg in candidates: | |
| new_cfg = self._mutate(cfg) | |
| if new_cfg.macro_policy_dim > 512: | |
| new_cfg.macro_policy_dim = 512 | |
| if new_cfg.micro_policy_dim > new_cfg.macro_policy_dim: | |
| new_cfg.micro_policy_dim = new_cfg.macro_policy_dim // 2 | |
| mutated.append(new_cfg) | |
| return mutated | |
| def update_archive(self, candidates, fitness_scores): | |
| for cfg, fitness in zip(candidates, fitness_scores): | |
| if fitness == -float("inf"): | |
| continue | |
| sym_proxy = cfg.num_agents / cfg.max_agents | |
| measures = np.array([sym_proxy, np.clip(fitness, 0, 1)]) | |
| self.archive.add(cfg, fitness, measures) | |
| def get_diversity_stats(self): | |
| return self.archive.stats() | |
| class AetherCore(nn.Module): | |
| def __init__(self, config=None): | |
| super().__init__() | |
| self.config = config or AetherConfig() | |
| self.generation = 0 | |
| self.architecture_history = [] | |
| self.fitness_log = [] | |
| self.metadata = {"birth": time.time(), "version": "0.3.0-space"} | |
| self._memory = None | |
| self._temporal = None | |
| self._evolution = None | |
| self._agents = None | |
| self._knowledge = None | |
| self._oversight = None | |
| self.symbolic_gate = nn.Parameter(torch.tensor(0.0)) | |
| self.neural_gate = nn.Parameter(torch.tensor(0.0)) | |
| def memory(self): | |
| if self._memory is None: | |
| self._memory = CoALAMemory(capacity=self.config.working_memory_capacity) | |
| return self._memory | |
| def temporal(self): | |
| if self._temporal is None: | |
| self._temporal = TemporalMemory(buffer_size=self.config.episodic_buffer_size) | |
| return self._temporal | |
| def evolution(self): | |
| if self._evolution is None: | |
| self._evolution = AetherEvolutionEngine(self.config) | |
| return self._evolution | |
| def agents(self): | |
| if self._agents is None: | |
| self._agents = AetherAgentOrchestrator(self.config) | |
| return self._agents | |
| def knowledge(self): | |
| if self._knowledge is None: | |
| self._knowledge = KnowledgeGraphEngine( | |
| embedding_dim=self.config.kg_embedding_dim, | |
| num_relations=self.config.kg_num_relations, | |
| ) | |
| return self._knowledge | |
| def oversight(self): | |
| if self._oversight is None: | |
| self._oversight = AutoOversight(self.config) | |
| return self._oversight | |
| def forward(self, task, context=None): | |
| context = context or {} | |
| kg_context = self.knowledge.query(task, top_k=5) | |
| self.memory.store({"task": task, "kg_context": kg_context, "t": time.time()}) | |
| result = self.agents.execute(task, kg_context, context) | |
| sym_w = torch.sigmoid(self.symbolic_gate) | |
| neu_w = torch.sigmoid(self.neural_gate) | |
| total = sym_w + neu_w + 1e-8 | |
| sym_w, neu_w = sym_w / total, neu_w / total | |
| self.temporal.store({ | |
| "task": task, "result": result, | |
| "weights": {"symbolic": sym_w.item(), "neural": neu_w.item()}, | |
| }) | |
| return {"output": result, "symbolic_weight": sym_w.item(), | |
| "neural_weight": neu_w.item(), "kg_context": kg_context, | |
| "generation": self.generation} | |
| def _default_evaluator(self, candidate): | |
| scores = [] | |
| try: | |
| orch = AetherAgentOrchestrator(candidate) | |
| task_embed = torch.randn(1, candidate.macro_policy_dim) | |
| blueprint = orch.hierarchical.generate_blueprint(task_embed) | |
| scores.append(min(1.0, len(blueprint) / 4.0)) | |
| kg = KnowledgeGraphEngine(embedding_dim=candidate.kg_embedding_dim, num_relations=candidate.kg_num_relations) | |
| for i in range(15): | |
| kg.add_fact(f"Entity{i}", "connects_to", f"Entity{i+1}") | |
| q = kg.query("Entity0 connects_to", top_k=5) | |
| scores.append(min(1.0, len(q["results"]) / 3.0)) | |
| mem = WorkingMemory(capacity=candidate.working_memory_capacity) | |
| for i in range(50): | |
| mem.store({"idx": i, "data": list(range(10))}) | |
| retrieved = mem.retrieve("idx", top_k=5) | |
| scores.append(min(1.0, len(retrieved) / 5.0)) | |
| balance = 1.0 - abs(candidate.macro_policy_dim - 256) / 256.0 | |
| scores.append(max(0.0, balance)) | |
| except Exception: | |
| return -float("inf") | |
| return float(np.mean(scores)) | |
| def evolve(self, num_generations=None, evaluator=None): | |
| num_generations = num_generations or self.config.generations | |
| evaluator = evaluator or self._default_evaluator | |
| best_fitness = -float("inf") | |
| best_config = None | |
| for gen in range(num_generations): | |
| self.generation = gen | |
| candidates = self.evolution.generate_candidates(self.config, self.config.population_size) | |
| fitness_scores = [] | |
| for candidate in candidates: | |
| approved, score, reason = self.oversight.decide(candidate, self) | |
| if approved: | |
| fitness = evaluator(candidate) | |
| fitness_scores.append(fitness) | |
| else: | |
| fitness_scores.append(-float("inf")) | |
| current_best = max((f for f in fitness_scores if f > -float("inf")), default=-float("inf")) | |
| if self.oversight.should_rollback(current_best): | |
| if self.oversight.last_good_config is not None: | |
| self.config = copy.deepcopy(self.oversight.last_good_config) | |
| continue | |
| selected = self.evolution.select(candidates, fitness_scores) | |
| mutated = self.evolution.mutate(selected) | |
| validated = [] | |
| validated_scores = [] | |
| for m in mutated: | |
| ok, _, reason = self.oversight.decide(m, self) | |
| if ok: | |
| validated.append(m) | |
| validated_scores.append(evaluator(m)) | |
| if validated and validated_scores: | |
| best_idx = int(np.argmax(validated_scores)) | |
| best_mutated = validated[best_idx] | |
| current_fitness = validated_scores[best_idx] | |
| if current_fitness > best_fitness: | |
| best_fitness = current_fitness | |
| best_config = best_mutated | |
| self.config = best_mutated | |
| self.oversight.update_good_checkpoint(best_mutated, best_fitness) | |
| arch_hash = hashlib.sha256(json.dumps(asdict(best_mutated), sort_keys=True).encode()).hexdigest()[:16] | |
| self.architecture_history.append({ | |
| "generation": gen, "hash": arch_hash, | |
| "fitness": best_fitness, "config": asdict(best_mutated), | |
| }) | |
| self.evolution.update_archive(candidates, fitness_scores) | |
| self.fitness_log.append(best_fitness) | |
| return {"best_fitness": best_fitness, "best_config": asdict(best_config) if best_config else None, | |
| "generations": num_generations, "history": self.architecture_history, | |
| "oversight_summary": self.oversight.summary(), | |
| "archive_stats": self.evolution.get_diversity_stats()} | |
| def self_reflect(self): | |
| recs = [] | |
| if len(self.fitness_log) > 5: | |
| recent = self.fitness_log[-5:] | |
| if max(recent) - min(recent) < 0.01: | |
| recs.append("Fitness plateau detected.") | |
| if recent[-1] < recent[0]: | |
| recs.append("Declining trend. Rollback or expand search.") | |
| sym = torch.sigmoid(self.symbolic_gate).item() | |
| if sym < 0.3: | |
| recs.append("Symbolic reasoning underutilized.") | |
| elif sym > 0.7: | |
| recs.append("Symbolic dominance. Increase neural flexibility.") | |
| return {"generation": self.generation, | |
| "architectures_tested": len(self.architecture_history), | |
| "fitness_trend": self.fitness_log, | |
| "neuro_symbolic_balance": {"symbolic": sym, "neural": 1.0 - sym}, | |
| "recommendations": recs, | |
| "oversight": self.oversight.summary()} | |
| def export_state(self): | |
| return {"config": asdict(self.config), "generation": self.generation, | |
| "architecture_history": self.architecture_history, | |
| "fitness_log": self.fitness_log, "metadata": self.metadata, | |
| "knowledge": self.knowledge.export(), | |
| "memory": self.memory.export(), | |
| "model_state_dict": {k: v.cpu().tolist() for k, v in self.state_dict().items()}} | |
| def from_state(cls, state): | |
| cfg = AetherConfig(**state["config"]) | |
| core = cls(config=cfg) | |
| core.generation = state["generation"] | |
| core.architecture_history = state["architecture_history"] | |
| core.fitness_log = state["fitness_log"] | |
| core.metadata = state["metadata"] | |
| core._knowledge = KnowledgeGraphEngine.from_export( | |
| state.get("knowledge", {}), | |
| embedding_dim=cfg.kg_embedding_dim, | |
| num_relations=cfg.kg_num_relations, | |
| ) | |
| return core | |
| aether_core = None | |
| stop_event = threading.Event() | |
| task_results = {} | |
| task_counter = 0 | |
| def background_evolution(): | |
| global aether_core | |
| gen_since_save = 0 | |
| SAVE_EVERY = 3 | |
| while not stop_event.is_set(): | |
| try: | |
| if aether_core is not None: | |
| print("[EVOLUTION] Running generation batch...") | |
| result = aether_core.evolve(num_generations=1) | |
| gen_since_save += 1 | |
| if gen_since_save >= SAVE_EVERY: | |
| save_state(aether_core.export_state(), name="latest") | |
| gen_since_save = 0 | |
| if result["best_fitness"] > 0.9: | |
| aether_core.knowledge.add_fact( | |
| f"Gen_{aether_core.generation}", "achieved", f"fitness_{result['best_fitness']:.4f}" | |
| ) | |
| time.sleep(30) | |
| except Exception as e: | |
| print(f"[EVOLUTION] Error: {e}") | |
| time.sleep(10) | |
| def seed_knowledge(core): | |
| facts = [ | |
| ("Intelligence", "requires", "Reasoning"), | |
| ("Reasoning", "requires", "Memory"), | |
| ("Memory", "enables", "Learning"), | |
| ("Learning", "produces", "Intelligence"), | |
| ("Agent", "has_role", "Researcher"), | |
| ("Agent", "has_role", "Engineer"), | |
| ("Agent", "has_role", "Analyzer"), | |
| ("Agent", "has_role", "Integrator"), | |
| ] | |
| for h, r, t in facts: | |
| core.knowledge.add_fact(h, r, t) | |
| async def lifespan(app): | |
| global aether_core | |
| print("[STARTUP] Restoring AETHER state...") | |
| saved = load_state("latest") | |
| if saved: | |
| try: | |
| aether_core = AetherCore.from_state(saved) | |
| print("[STARTUP] State restored from Hub") | |
| except Exception as e: | |
| print(f"[STARTUP] Restore failed: {e}, initializing fresh") | |
| aether_core = AetherCore(AetherConfig()) | |
| seed_knowledge(aether_core) | |
| else: | |
| aether_core = AetherCore(AetherConfig()) | |
| seed_knowledge(aether_core) | |
| print("[STARTUP] Fresh AETHER initialized") | |
| init_scheduler() | |
| thread = threading.Thread(target=background_evolution, daemon=True) | |
| thread.start() | |
| print("[STARTUP] Background evolution thread started") | |
| yield | |
| print("[SHUTDOWN] Stopping evolution thread...") | |
| stop_event.set() | |
| thread.join(timeout=5) | |
| if aether_core: | |
| save_state(aether_core.export_state(), name="latest") | |
| print("[SHUTDOWN] State saved, exiting") | |
| app = FastAPI(title="AETHER Autonomous API", lifespan=lifespan) | |
| class TaskRequest(BaseModel): | |
| task: str | |
| context: Optional[Dict[str, Any]] = {} | |
| class ConfigUpdate(BaseModel): | |
| mutation_rate: Optional[float] = None | |
| population_size: Optional[int] = None | |
| num_agents: Optional[int] = None | |
| async def get_status(): | |
| if aether_core is None: | |
| return {"status": "initializing"} | |
| ref = aether_core.self_reflect() | |
| return { | |
| "status": "running", | |
| "generation": aether_core.generation, | |
| "best_fitness": aether_core.fitness_log[-1] if aether_core.fitness_log else None, | |
| "fitness_history": aether_core.fitness_log, | |
| "architecture_changes": len(aether_core.architecture_history), | |
| "kg_stats": aether_core.knowledge.stats(), | |
| "agent_stats": aether_core.agents.stats(), | |
| "reflection": ref, | |
| } | |
| async def submit_task(req: TaskRequest, background: BackgroundTasks): | |
| global task_counter | |
| task_id = f"task_{task_counter}_{int(time.time())}" | |
| task_counter += 1 | |
| def execute_task(tid, task, ctx): | |
| try: | |
| result = aether_core.forward(task, ctx) | |
| task_results[tid] = {"status": "complete", "result": result, "timestamp": time.time()} | |
| except Exception as e: | |
| task_results[tid] = {"status": "error", "error": str(e), "timestamp": time.time()} | |
| background.add_task(execute_task, task_id, req.task, req.context) | |
| return {"task_id": task_id, "status": "queued"} | |
| async def get_task(task_id: str): | |
| return task_results.get(task_id, {"status": "not_found"}) | |
| async def trigger_evolve(): | |
| if aether_core is None: | |
| return {"status": "error", "message": "AETHER not initialized"} | |
| result = aether_core.evolve(num_generations=1) | |
| save_state(aether_core.export_state(), name="latest") | |
| return {"status": "evolved", "result": result} | |
| async def get_history(): | |
| if aether_core is None: | |
| return {"history": []} | |
| return {"history": aether_core.architecture_history} | |
| async def get_kg(): | |
| if aether_core is None: | |
| return {"kg": {}} | |
| return {"kg": aether_core.knowledge.export()} | |
| async def add_kg_fact(head: str, relation: str, tail: str, confidence: float = 1.0): | |
| if aether_core is None: | |
| return {"status": "error"} | |
| aether_core.knowledge.add_fact(head, relation, tail, confidence) | |
| return {"status": "added", "kg_stats": aether_core.knowledge.stats()} | |
| async def update_config(update: ConfigUpdate): | |
| if aether_core is None: | |
| return {"status": "error"} | |
| if update.mutation_rate is not None: | |
| aether_core.config.mutation_rate = update.mutation_rate | |
| if update.population_size is not None: | |
| aether_core.config.population_size = update.population_size | |
| if update.num_agents is not None: | |
| aether_core.config.num_agents = update.num_agents | |
| save_state(aether_core.export_state(), name="latest") | |
| return {"status": "updated", "config": asdict(aether_core.config)} | |
| async def get_snapshot(): | |
| if aether_core is None: | |
| return {} | |
| save_state(aether_core.export_state(), name="latest") | |
| return {"status": "saved", "snapshot_path": str(STATE_DIR / "latest.json")} | |
| def get_live_status(): | |
| if aether_core is None: | |
| return "Initializing..." | |
| ref = aether_core.self_reflect() | |
| lines = [ | |
| f"Generation: {aether_core.generation}", | |
| f"Best Fitness: {aether_core.fitness_log[-1]:.4f}" if aether_core.fitness_log else "N/A", | |
| f"Arch Changes: {len(aether_core.architecture_history)}", | |
| f"KG Nodes: {aether_core.knowledge.stats()['num_nodes']}", | |
| f"KG Edges: {aether_core.knowledge.stats()['num_edges']}", | |
| f"Symbolic Gate: {ref['neuro_symbolic_balance']['symbolic']:.3f}", | |
| f"Neural Gate: {ref['neuro_symbolic_balance']['neural']:.3f}", | |
| "---", | |
| "Recommendations:", | |
| ] | |
| lines.extend(ref["recommendations"] or ["No recommendations at this time"]) | |
| return "\n".join(lines) | |
| def get_history_text(): | |
| if aether_core is None: | |
| return "No history" | |
| lines = ["Generation | Hash | Fitness | Agents | Macro-Dim | Mut-Rate"] | |
| for entry in aether_core.architecture_history: | |
| lines.append( | |
| f" {entry['generation']:02d} | {entry['hash']} | {entry['fitness']:.4f} | " | |
| f"{entry['config']['num_agents']} | {entry['config']['macro_policy_dim']} | {entry['config']['mutation_rate']:.3f}" | |
| ) | |
| return "\n".join(lines) | |
| def execute_gradio_task(task_text): | |
| if aether_core is None: | |
| return "AETHER not ready" | |
| result = aether_core.forward(task_text) | |
| out_lines = [ | |
| f"Task: {task_text}", | |
| f"Symbolic Weight: {result['symbolic_weight']:.3f}", | |
| f"Neural Weight: {result['neural_weight']:.3f}", | |
| f"Output: {result['output']['output'][:200]}...", | |
| f"Agents: {list(result['output']['agent_outputs'].keys())}", | |
| ] | |
| return "\n".join(out_lines) | |
| with gr.Blocks(title="AETHER Monitor") as demo: | |
| gr.Markdown("## π§ AETHER v0.3.0 β Autonomous Self-Evolving Architecture") | |
| gr.Markdown("Runs 24/7. Evolves in background. State auto-persisted to Hub. REST API accessible at `/status`, `/task`, `/evolve`, etc.") | |
| with gr.Row(): | |
| with gr.Column(): | |
| status_box = gr.Textbox(label="Live System Status", value=get_live_status, lines=12, every=5) | |
| refresh_btn = gr.Button("π Refresh") | |
| with gr.Column(): | |
| history_box = gr.Textbox(label="Evolution History", value=get_history_text, lines=12, every=10) | |
| with gr.Row(): | |
| with gr.Column(): | |
| task_input = gr.Textbox(label="Submit a Task", placeholder="e.g., Intelligence requires...") | |
| task_btn = gr.Button("β‘ Execute") | |
| task_output = gr.Textbox(label="Task Result", lines=6) | |
| with gr.Column(): | |
| gr.Markdown("### Quick Actions") | |
| evolve_btn = gr.Button("𧬠Trigger 1 Evolution Cycle") | |
| evolve_out = gr.Textbox(label="Evolution Result", lines=4) | |
| snapshot_btn = gr.Button("πΎ Force Save Snapshot") | |
| snapshot_out = gr.Textbox(label="Save Status", lines=2) | |
| refresh_btn.click(get_live_status, outputs=status_box) | |
| task_btn.click(execute_gradio_task, inputs=task_input, outputs=task_output) | |
| evolve_btn.click(lambda: "Evolution triggered via API" if aether_core else "Not ready", outputs=evolve_out) | |
| snapshot_btn.click(lambda: "Snapshot saved" if aether_core else "Not ready", outputs=snapshot_out) | |
| app = gr.mount_gradio_app(app, demo, path="/") | |
| if __name__ == "__main__": | |
| uvicorn.run(app, host="0.0.0.0", port=7860) | |