immutable_reality_engine / EIS_ESL_PNC_CEC_INFMOD.txt
upgraedd's picture
Upload 2 files
b04791f verified
#!/usr/bin/env python3
"""
Intent Inference Module (INFMOD) for EIS/ESL/PNC/CEC v6
========================================================
This module performs *hypothesis-level* intent inference
based on structural signals from the ESLedger.
It does NOT:
- assert intent
- assert agency
- assert truth
It DOES:
- generate competing hypotheses
- attach explicit evidence
- propagate uncertainty
- maintain epistemic separation
"""
from dataclasses import dataclass
from typing import List, Dict, Optional
from datetime import datetime
# ------------------------------------------------------------
# INTENT HYPOTHESIS DATA MODEL
# ------------------------------------------------------------
@dataclass
class IntentHypothesis:
agent: str # e.g., "institutional", "network", "emergent", "unknown"
incentive: str # e.g., "reputation_protection", "narrative_control"
causal_graph: Dict # minimal DAG of event relationships
probability: float # 0–1 weight, not a verdict
uncertainty_factors: List[str] # explicit epistemic humility
evidence: List[str] # concrete observations supporting the hypothesis
# ------------------------------------------------------------
# INTENT INFERENCE ENGINE
# ------------------------------------------------------------
class IntentInferenceEngine:
def __init__(self, structural_layer):
"""
structural_layer: an ESLedger instance or compatible interface
"""
self.structural = structural_layer
# --------------------------------------------------------
# MAIN ENTRY POINT
# --------------------------------------------------------
def infer(self, target: str) -> List[IntentHypothesis]:
"""
Generate competing intent hypotheses for a given entity or term.
Returns a list of IntentHypothesis objects.
"""
metrics = self._gather_structural_metrics(target)
incentive_models = self._build_incentive_models(metrics)
hypotheses = self._generate_hypotheses(metrics, incentive_models)
return hypotheses
# --------------------------------------------------------
# STEP 1 β€” STRUCTURAL METRIC EXTRACTION
# --------------------------------------------------------
def _gather_structural_metrics(self, target: str) -> Dict:
"""
Pulls structural signals from the ESLedger.
These are *signals*, not interpretations.
"""
metrics = {
"suppression_score": self.structural.get_entity_suppression(target),
"coordination_likelihood": self._avg_claim_field(target, "coordination_likelihood"),
"negation_density": self._negation_density(target),
"temporal_pattern": self._temporal_pattern(target),
"entity_presence": self._entity_presence(target),
}
return metrics
def _avg_claim_field(self, target: str, field: str) -> float:
vals = []
for cid, claim in self.structural.claims.items():
if target.lower() in claim["text"].lower():
vals.append(claim.get(field, 0.0))
return sum(vals) / len(vals) if vals else 0.0
def _negation_density(self, target: str) -> float:
neg = 0
total = 0
for cid, claim in self.structural.claims.items():
if target.lower() in claim["text"].lower():
total += 1
if any(n in claim["text"].lower() for n in ["not", "never", "no "]):
neg += 1
return neg / total if total else 0.0
def _temporal_pattern(self, target: str) -> Dict:
timestamps = []
for cid, claim in self.structural.claims.items():
if target.lower() in claim["text"].lower():
try:
timestamps.append(datetime.fromisoformat(claim["timestamp"].replace("Z", "+00:00")))
except:
pass
timestamps.sort()
return {"count": len(timestamps), "first": timestamps[0] if timestamps else None}
def _entity_presence(self, target: str) -> int:
return sum(1 for cid, claim in self.structural.claims.items() if target.lower() in claim["text"].lower())
# --------------------------------------------------------
# STEP 2 β€” INCENTIVE MODELING
# --------------------------------------------------------
def _build_incentive_models(self, metrics: Dict) -> List[Dict]:
"""
Creates abstract incentive models based on structural signals.
These are NOT intent claims β€” they are interpretive scaffolds.
"""
models = []
# Institutional incentive model
if metrics["suppression_score"] > 0.4 or metrics["coordination_likelihood"] > 0.5:
models.append({
"agent": "institutional",
"incentive": "narrative_control",
"weight": 0.4 + metrics["coordination_likelihood"] * 0.3,
"uncertainties": ["no direct evidence of agency"],
})
# Network incentive model
if metrics["coordination_likelihood"] > 0.3:
models.append({
"agent": "network",
"incentive": "signal_amplification",
"weight": 0.3 + metrics["coordination_likelihood"] * 0.2,
"uncertainties": ["coordination may be emergent"],
})
# Emergent systemic model
models.append({
"agent": "emergent",
"incentive": "incentive_alignment",
"weight": 0.2 + metrics["negation_density"] * 0.2,
"uncertainties": ["emergent patterns mimic intent"],
})
# Unknown agent model
models.append({
"agent": "unknown",
"incentive": "unclear",
"weight": 0.1,
"uncertainties": ["insufficient structural signal"],
})
return models
# --------------------------------------------------------
# STEP 3 β€” HYPOTHESIS GENERATION
# --------------------------------------------------------
def _generate_hypotheses(self, metrics: Dict, models: List[Dict]) -> List[IntentHypothesis]:
hypotheses = []
for model in models:
evidence = self._collect_evidence(metrics, model)
hypotheses.append(
IntentHypothesis(
agent=model["agent"],
incentive=model["incentive"],
causal_graph=self._build_causal_graph(metrics),
probability=min(1.0, model["weight"]),
uncertainty_factors=model["uncertainties"],
evidence=evidence
)
)
return hypotheses
def _collect_evidence(self, metrics: Dict, model: Dict) -> List[str]:
evidence = []
if metrics["suppression_score"] > 0.4:
evidence.append(f"High suppression_score: {metrics['suppression_score']:.2f}")
if metrics["coordination_likelihood"] > 0.3:
evidence.append(f"Elevated coordination_likelihood: {metrics['coordination_likelihood']:.2f}")
if metrics["negation_density"] > 0.2:
evidence.append(f"Negation density suggests contested narrative: {metrics['negation_density']:.2f}")
if metrics["entity_presence"] > 10:
evidence.append(f"High entity presence: {metrics['entity_presence']} mentions")
return evidence or ["No strong evidence β€” hypothesis weak"]
def _build_causal_graph(self, metrics: Dict) -> Dict:
"""
Minimal DAG: structural signals β†’ incentive model
"""
return {
"suppression_score": metrics["suppression_score"],
"coordination_likelihood": metrics["coordination_likelihood"],
"negation_density": metrics["negation_density"],
"leads_to": "incentive_hypothesis"
}