diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 0000000000000000000000000000000000000000..134ecd663699948a60e93efc0c4e62833bd6f12b --- /dev/null +++ b/.dockerignore @@ -0,0 +1,9 @@ +.git +.pytest_cache +__pycache__ +*.pyc +*.pyo +*.pyd +.Python +artifacts +tests diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000000000000000000000000000000000000..a6344aac8c09253b3b630fb776ae94478aa0275b --- /dev/null +++ b/.gitattributes @@ -0,0 +1,35 @@ +*.7z filter=lfs diff=lfs merge=lfs -text +*.arrow filter=lfs diff=lfs merge=lfs -text +*.bin filter=lfs diff=lfs merge=lfs -text +*.bz2 filter=lfs diff=lfs merge=lfs -text +*.ckpt filter=lfs diff=lfs merge=lfs -text +*.ftz filter=lfs diff=lfs merge=lfs -text +*.gz filter=lfs diff=lfs merge=lfs -text +*.h5 filter=lfs diff=lfs merge=lfs -text +*.joblib filter=lfs diff=lfs merge=lfs -text +*.lfs.* filter=lfs diff=lfs merge=lfs -text +*.mlmodel filter=lfs diff=lfs merge=lfs -text +*.model filter=lfs diff=lfs merge=lfs -text +*.msgpack filter=lfs diff=lfs merge=lfs -text +*.npy filter=lfs diff=lfs merge=lfs -text +*.npz filter=lfs diff=lfs merge=lfs -text +*.onnx filter=lfs diff=lfs merge=lfs -text +*.ot filter=lfs diff=lfs merge=lfs -text +*.parquet filter=lfs diff=lfs merge=lfs -text +*.pb filter=lfs diff=lfs merge=lfs -text +*.pickle filter=lfs diff=lfs merge=lfs -text +*.pkl filter=lfs diff=lfs merge=lfs -text +*.pt filter=lfs diff=lfs merge=lfs -text +*.pth filter=lfs diff=lfs merge=lfs -text +*.rar filter=lfs diff=lfs merge=lfs -text +*.safetensors filter=lfs diff=lfs merge=lfs -text +saved_model/**/* filter=lfs diff=lfs merge=lfs -text +*.tar.* filter=lfs diff=lfs merge=lfs -text +*.tar filter=lfs diff=lfs merge=lfs -text +*.tflite filter=lfs diff=lfs merge=lfs -text +*.tgz filter=lfs diff=lfs merge=lfs -text +*.wasm filter=lfs diff=lfs merge=lfs -text +*.xz filter=lfs diff=lfs merge=lfs -text +*.zip filter=lfs diff=lfs merge=lfs -text +*.zst filter=lfs diff=lfs merge=lfs -text +*tfevents* filter=lfs diff=lfs merge=lfs -text diff --git a/.github/workflows/validation.yml b/.github/workflows/validation.yml new file mode 100644 index 0000000000000000000000000000000000000000..9a4c696c6ec778e18ec89ddc43506bf8ab350d54 --- /dev/null +++ b/.github/workflows/validation.yml @@ -0,0 +1,32 @@ +name: Validation + +on: + push: + pull_request: + workflow_dispatch: + +jobs: + validate: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: "3.12" + + - name: Install project + run: | + python -m pip install --upgrade pip + python -m pip install -e .[dev] + + - name: Run test suite + run: python -m pytest -q + + - name: Run validation gate + run: python scripts/validate_release.py + + - name: Build Docker image + run: docker build -t osint-openenv-validation . diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..43cbbde99bac58abdead4dd092fa060443272035 --- /dev/null +++ b/.gitignore @@ -0,0 +1,11 @@ +*.pyc +blueprint.txt +*.egg-info +artifacts/* +*.html +osint_dashboard.html +.venv/ +.tmp_compare/ +metaQA/ +.codex +TODO.txt diff --git a/.tmp_compare/Meta-s-LedgerShield b/.tmp_compare/Meta-s-LedgerShield new file mode 160000 index 0000000000000000000000000000000000000000..fd5c9b60ddfbd2eba9d09001938b63169ac98f7b --- /dev/null +++ b/.tmp_compare/Meta-s-LedgerShield @@ -0,0 +1 @@ +Subproject commit fd5c9b60ddfbd2eba9d09001938b63169ac98f7b diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..22b1e97852a90f6f86d1d9e8cd27e0fc0f8338e0 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,28 @@ +FROM python:3.12-slim + +RUN useradd -m -u 1000 user + +USER user +ENV HOME=/home/user \ + PATH=/home/user/.local/bin:$PATH \ + PYTHONDONTWRITEBYTECODE=1 \ + PYTHONUNBUFFERED=1 \ + PORT=7860 + +WORKDIR $HOME/app + +COPY --chown=user pyproject.toml README.md openenv.yaml inference.py $HOME/app/ +COPY --chown=user src $HOME/app/src +COPY --chown=user config $HOME/app/config +COPY --chown=user datasets $HOME/app/datasets +COPY --chown=user docs $HOME/app/docs +COPY --chown=user scripts $HOME/app/scripts +COPY --chown=user server.py $HOME/app/server.py + +RUN pip install --no-cache-dir --upgrade pip && \ + pip install --no-cache-dir -e ".[train]" && \ + chmod +x $HOME/app/scripts/space_start.sh + +EXPOSE 7860 + +CMD ["sh", "/home/user/app/scripts/space_start.sh"] diff --git a/README.md b/README.md new file mode 100644 index 0000000000000000000000000000000000000000..bf5cd40da149b40de12070d741d7d361013ea236 --- /dev/null +++ b/README.md @@ -0,0 +1,392 @@ +--- +title: OSINT OpenEnv +emoji: 🕵️ +colorFrom: blue +colorTo: yellow +sdk: docker +app_port: 7860 +pinned: false +license: apache-2.0 +tags: + - openenv + - osint + - benchmark + - docker + - fastapi +short_description: Docker OSINT benchmark with fixed OpenEnv tasks. +--- + +# OSINT OpenEnv + +OSINT OpenEnv is a synthetic benchmark environment for tool-using agents that must recover identities, trace events, and link entities across noisy multi-platform records. The project is designed to feel like a compact OSINT workflow rather than a raw QA dataset: agents query mock profiles, posts, forum threads, and semantic memory, build a working graph, and then submit an answer. + +The motivation is to provide a reproducible OpenEnv-compatible environment for evaluating graph-building and tool-using reasoning without depending on live web data, unstable APIs, or private corpora. That makes it useful for local development, regression testing, and hosted demos such as a Docker-based Hugging Face Space. + +## Environment Summary + +The environment generates or loads a hidden canonical graph of users, aliases, organizations, locations, posts, threads, and events. It then exposes partial platform views and a task list drawn from that graph. + +The default hosted Space uses the fixed-level benchmark in `datasets/fixed_levels/seed_fixed_levels.json`, which now contains 30 stable tasks over a larger shared seeded graph. + +The repository now supports two dataset backends: + +- `canonical` (existing fixed-level synthetic graph pipeline) +- `metaqa` (MetaQA KB + QA files for `1-hop`, `2-hop`, and `3-hop`) + +Use `config/shared_config.json` or CLI flags (`--dataset-mode`, `--metaqa-root`, `--metaqa-hops`, `--metaqa-splits`) to choose which backend to run. + +## Action Space + +The environment exposes three actions: + +- `CALL_TOOL`: query platform views or semantic memory such as `search_posts`, `get_profile`, `search_threads`, `get_connections`, or `search_memory`. +- `ADD_EDGE`: add a candidate relation to the working memory graph. +- `ANSWER`: submit the final answer as an exact node id string. + +## Observation Space + +Each step returns a JSON observation with four parts: + +- `tool_outputs`: the most recent tool results. +- `graph_snapshot`: the current working-memory graph edges. +- `action_history`: recent actions and rewards. +- `task`: the active task id, task type, and question. + +## Task Types And Difficulty + +The benchmark mixes direct lookups with multi-hop traces: + +- Easy: single-hop identity resolution, organization lookup, event lookup, or location lookup. +- Mid: two-hop alias-to-user-to-organization or thread-to-event-to-user traces. +- High: cross-platform multi-hop traces combining aliases, authored content, event references, organization links, and direct connections. + +In MetaQA mode, hop buckets are mapped into the same reward difficulty tiers: + +- `1-hop` -> `easy` +- `2-hop` -> `medium` +- `3-hop` -> `hard` + +Common task families include: + +- `identity_resolution` +- `network_discovery` +- `event_tracing` +- `cross_platform_linking` +- `deanonymization` +- `convoluted_trace` + +Expected difficulty increases with the number of relations the agent must chain together and whether the evidence is split across posts, threads, aliases, and profile edges. + +## Repository Layout + +```text +src/osint_env/ + agents/ single-agent and swarm runners + baselines/ reusable OpenAI baseline runner + config/ shared config and seed loading + data/ graph/view/task generation + domain/ dataclasses and environment models + env/ environment, reward logic, OpenEnv compatibility shim + eval/ evaluation metrics and leaderboard helpers + llm/ mock, Ollama, and OpenAI client wrappers + memory/ working graph and semantic memory + platforms/ tool APIs over synthetic platform views + viz/ dashboard export + +scripts/ + build_fixed_levels_dataset.py + run_openai_baseline.py + +datasets/fixed_levels/ + seed_fixed_levels.json + shared_config_fixed_levels.json + qwen_swarm_benchmark_fixed_levels.json + +server.py FastAPI app for local use and Docker/HF Spaces +Dockerfile Container entrypoint for Hugging Face Docker Spaces +``` + +## Setup + +Python 3.10+ is required. + +Local install: + +```bash +python -m pip install -e . +``` + +Install optional adversarial self-play training stack (TRL + Transformers): + +```bash +python -m pip install -e ".[train]" +``` + +Run tests: + +```bash +python -m pytest -q +``` + +Run the automated release gate: + +```bash +python scripts/validate_release.py +``` + +## Usage + +Run one demo episode: + +```bash +osint-env demo --agent-mode swarm --llm-provider mock +``` + +Run against MetaQA using the provided sample config: + +```bash +osint-env demo --config config/shared_config_metaqa.json --dataset-mode metaqa --llm-provider mock +``` + +Run MetaQA with only selected hop buckets: + +```bash +osint-env eval --config config/shared_config_metaqa.json --dataset-mode metaqa --metaqa-hops 1-hop,2-hop --episodes 5 --llm-provider mock +``` + +Run a quick evaluation: + +```bash +osint-env eval --episodes 5 --agent-mode swarm --llm-provider mock +``` + +Export a dashboard: + +```bash +osint-env benchmark --episodes 5 --agent-mode swarm --llm-provider mock --name quick_check +``` + +Run Kimi-style adversarial self-play scaffold (dry-run by default in the example config): + +```bash +osint-env train-self-play --config config/shared_config.json --train-config config/self_play_training_example.json --dry-run +``` + +When you have compute and the train dependencies installed, remove `--dry-run` (or set `"dry_run": false` in the training config) to execute TRL GRPO updates for alternating generator and answerer phases. + +The training config also supports `"model_topology": "dual"|"shared"`, `"phase_schedule": "generator_answerer"|"answerer_generator_answerer"`, `"tuning_mode": "full"|"lora"`, and `"canonical_graph_mode": "generate"|"fixed"` so you can switch between two-model vs single-model self-play, full fine-tuning vs LoRA adapters, and whether canonical graph structure is generated each round or kept fixed while training question/answer behavior. + +### Hugging Face Space Smoke Run (Qwen 3.5 0.8B + W&B) + +For a short verification run (enough to confirm W&B logging before scaling up), use: + +```bash +osint-env train-self-play --config config/shared_config.json --train-config config/self_play_training_hf_a10g_smoke.json +``` + +This config: + +- uses `Qwen/Qwen3.5-0.8B` +- enables W&B reporting (`wandb_enabled: true`) +- uses `pipeline_mode: "swarm_v2"` with `canonical_graph_mode: "fixed"` to keep canonical graph candidates stable while training question/answer behavior +- keeps training intentionally short (`rounds=1`, `max_steps=5` per phase) +- uses LoRA with small batch settings so it can run as a smoke test on an A10G + +To enable canonical graph generation during swarm_v2 training, switch `"canonical_graph_mode"` to `"generate"` in the training config. + +Space setup checklist: + +1. In Space **Settings -> Hardware**, select **NVIDIA A10G (large)**. +2. In Space **Settings -> Variables and secrets**, set `WANDB_API_KEY`. +3. Set `HF_TOKEN` in Space secrets to avoid unauthenticated Hub downloads and stricter rate limits. +4. Optionally set `WANDB_ENTITY` if your project belongs to a team. +5. Set `RUN_SELF_PLAY_TRAINING=1` in Space variables to trigger training during container startup. +6. Optional overrides: + - `TRAIN_SELF_PLAY_CONFIG_PATH` (default: `config/self_play_training_hf_a10g_smoke.json`) + - `TRAIN_ENV_CONFIG_PATH` (default: `config/shared_config.json`) + - `RUN_SELF_PLAY_DRY_RUN=1` to test startup wiring without GRPO updates. + - `OSINT_TRAIN_STRICT_ASSERTS=1` to fail fast when reward variance, KL, loss, grad norms, or parameter updates stay zero. +7. Restart the Space and monitor build/runtime logs for the training run. + +W&B run naming is controlled by `wandb_run_name_prefix` and will emit phase-specific runs like `...-r001-generator` and `...-r001-answerer`. + +### Reward Functions In Self-Play (Generator + Answerer) + +Self-play trains two policies with role-specific reward functions defined in `src/osint_env/training/rewards.py`. + +Generator reward (`GeneratorRewardFunction`) and answerer reward (`AnswererRewardFunction`) are both returned to GRPO as scalar scores per completion, and both are clipped to a stable range before optimization. + +#### Generator Reward (Task-Proposing Agent) + +The generator is rewarded for producing valid, grounded, diverse, and hard tasks. + +In `legacy` pipeline mode, the reward is a weighted sum: + +- `validity`: checks non-empty `question`, non-empty `answer`, and bounded `supporting_edges`. +- `hardness`: uses a frozen answerer judge; reward is higher when the judge gets the generated question wrong. +- `diversity`: penalizes near-duplicate questions via token-level Jaccard similarity against prior generated questions. +- `consistency`: checks that support edges exist in the canonical graph and that the answer/question are graph-grounded. + +Default weights (configurable through `generator_reward_weights` in training config): + +- `validity`: `0.35` +- `hardness`: `0.45` +- `diversity`: `0.10` +- `consistency`: `0.10` + +In `swarm_v2` pipeline mode, generation uses strict replay/validation first, then a structured reward: + +- Hard-gated validation via `SwarmV2ReplayValidator` (invalid samples get a fixed negative reward path). +- Reward components include validity, derivability/replayability, hardness, swarm diversity, shared-context pressure targeting, and PARL-inspired orchestration bonuses (`parallel` + `finish`). +- Invalid or non-replayable candidates are penalized before the weighted positive terms are applied. + +#### Answerer Reward (Question-Solving Agent) + +The answerer reward wraps environment-native grading so train-time behavior matches benchmark-time incentives. + +For each completion, `AnswererRewardFunction`: + +- extracts the predicted answer from completion text/JSON, +- reconstructs a transient `TaskInstance` from row fields (`question`, `answer`, `supporting_edges_json`, `difficulty`), +- optionally extracts predicted supporting edges from JSON or text, +- calls `compute_answer_reward(...)` from `src/osint_env/env/reward.py`. + +`compute_answer_reward` combines exact answer quality with graph-utility shaping: + +- output format validity and exact correctness, +- knowledge-carrier and knowledge-indexing utility, +- connectivity and supporting-edge F1 against task support edges, +- efficiency and compactness penalties, +- relation/entity informativeness and repetition control (difficulty-dependent). + +Difficulty controls (`easy`, `medium`, `hard`) are preserved during training exactly as in the environment scorer, so the answerer sees the same tiered reward profile used in evaluation. + +In `swarm_v2`, the answerer reward also adds PARL-style orchestration credit (spawn/finish behavior) on top of base answer reward when orchestrator telemetry is present in the completion payload. + +Detailed design notes are in `docs/adversarial_self_play.md`. + +## OpenAI Baseline + +The reproducible OpenAI baseline is implemented in `scripts/run_openai_baseline.py`. It runs on the fixed-level benchmark, uses a stable seeded graph/task set, writes a JSON artifact, appends a leaderboard record, and exports a dashboard. + +Default behavior: + +- dataset: fixed-level benchmark +- episodes: 30 +- max steps per episode: 8 +- temperature: 0.0 +- output artifact: `artifacts/baselines/openai_fixed_levels_latest.json` + +Run it with an API key: + +```bash +export OPENAI_API_KEY="your_key_here" +python scripts/run_openai_baseline.py --model gpt-5-nano +``` + +The script is designed to stay bounded enough for a normal benchmark pass to finish comfortably under 20 minutes on a lightweight chat model, while still using the full fixed task set. For repeatability it fixes the benchmark graph/tasks and uses deterministic decoding settings. Because remote model backends can still change over time, the output artifact also records model metadata and system fingerprints when available. + +## Inference Script + +The submission-ready inference entrypoint is the root `inference.py` file. It talks to the deployed Hugging Face Space over HTTP, uses the OpenAI client for all model calls, and emits structured stdout logs in the `[START]`, `[STEP]`, and `[END]` format. + +The script accepts `HF_TOKEN` as the primary auth variable and also supports `OPENAI_API_KEY` or `API_KEY` as local fallbacks. +After a successful run, `inference.py` also posts the evaluation summary back to the Space so the latest `/dashboard` view reflects that run. + +Required environment variables: + +- `API_BASE_URL` +- `MODEL_NAME` +- `HF_TOKEN` + +Optional environment variables: + +- `SPACE_URL` default: `https://siddeshwar1625-osint.hf.space` +- `TASK_INDICES` default: `0,10,20` +- `MAX_STEPS` default: `8` + +Example local test command against a running local server: + +```bash +API_BASE_URL=https://api.openai.com/v1 MODEL_NAME=gpt-5.4-mini OPENAI_API_KEY=your_key SPACE_URL=http://127.0.0.1:7860 python inference.py +``` + +Example test command against the deployed Space: + +```bash +API_BASE_URL=https://api.openai.com/v1 MODEL_NAME=gpt-5.4-mini OPENAI_API_KEY=your_key SPACE_URL=https://siddeshwar1625-osint.hf.space python inference.py +``` + +## Docker And Hugging Face Space + +The repository is ready for a Docker-based Hugging Face Space: + +- `README.md` includes `sdk: docker` +- `README.md` includes the `openenv` Space tag +- `Dockerfile` serves `server.py` on port `7860` + +Local Docker smoke test: + +```bash +docker build -t osint-openenv . +docker run --rm -p 7860:7860 osint-openenv +``` + +Then open `http://localhost:7860`. + +The FastAPI app serves: + +- `/`: overview page +- `/dashboard`: generated benchmark dashboard +- `/api/environment`: environment metadata +- `/health`: health check (validator-friendly alias) +- `/healthz`: health check (legacy alias) +- `/openenv.yaml`: OpenEnv HTTP spec stub +- `/openenv/tasks`: task enumeration +- `/reset` and `/openenv/reset`: episode reset endpoints +- `/step` and `/openenv/step`: episode step endpoints +- `/state` and `/openenv/state/{session_id}`: session state endpoints (`/state` returns the latest session) + +## Automated Validation + +The repository includes a pass/fail validation gate for the core delivery requirements: + +- Hugging Face Space readiness +- OpenEnv spec compliance +- reproducible baseline behavior +- at least 3 fixed tasks with working graders +- Docker image build in CI + +Local gate: + +```bash +python scripts/validate_release.py +``` + +CI gate: + +- `.github/workflows/validation.yml` +- runs `pytest` +- runs the validation script +- runs `docker build` + +## Baseline Scores + +The fixed-level benchmark was expanded from the earlier 15-question set to a 30-question set with a larger seeded graph, so older benchmark artifacts should be treated as legacy and regenerated on the new dataset before using them as reference scores. + +After you supply an OpenAI API key, the current baseline scores for the expanded benchmark will be written to: + +- `artifacts/baselines/openai_fixed_levels_latest.json` +- `artifacts/baselines/openai_fixed_levels_dashboard.html` + +## Notes On `pyproject.toml` + +The packaging file is structurally correct for a `src/` layout and editable installs. The main gaps were deployment/runtime related rather than build-breaking: + +- `openenv` is now version-bounded explicitly. +- `fastapi` and `uvicorn` are included because the repo now ships a real web server. +- pytest is pointed at the `tests/` directory, and the test suite also adds `src/` to `sys.path` so source-layout imports work reliably during local runs. + +## Development Notes + +The project keeps a lightweight local compatibility shim for `openenv` so the source tree remains importable even before dependencies are installed. In a normal install or Docker build, the real `openenv` package from PyPI is still used. diff --git a/artifacts/leaderboard.json b/artifacts/leaderboard.json new file mode 100644 index 0000000000000000000000000000000000000000..86bfca58d7583f431e001f412bd04a68e16c95ae --- /dev/null +++ b/artifacts/leaderboard.json @@ -0,0 +1,453 @@ +[ + { + "config": { + "max_agents": 3, + "max_breadth": 2, + "max_depth": 2, + "max_steps": 18, + "max_width": 2, + "seed": 7, + "seeded_questions": 1, + "swarm_enabled": true + }, + "created_at": "2026-04-01T12:03:13+00:00", + "episodes": 2, + "metrics": { + "avg_compactness_reward": 0.0, + "avg_connectivity_gain_reward": 0.1, + "avg_connectivity_reward": 0.3, + "avg_diversity_reward": 0.08, + "avg_entity_informativeness_reward": 0.024705877237863647, + "avg_format_reward": 0.15, + "avg_graph_f1": 1.0, + "avg_knowledge_carrier_reward": 0.5, + "avg_knowledge_indexing_reward": 0.15, + "avg_relation_informativeness_reward": 0.03137141693971891, + "avg_reward": 3.534162700533434, + "avg_soft_shaping_reward": 0.15, + "avg_spawn_count": 4.0, + "avg_spawn_critical_steps": 6.0, + "avg_steps_to_solution": 9.0, + "deanonymization_accuracy": 1.0, + "leaderboard_score": 0.8618382743087459, + "retrieval_signal": 0.7275, + "spawn_completion_rate": 1.0, + "spawn_signal": 0.6666666666666666, + "structural_signal": 0.6082154588355165, + "task_success_rate": 1.0, + "tool_efficiency": 0.25 + }, + "run_id": "run_0001", + "run_name": "swarm_seed_smoke" + }, + { + "config": { + "max_agents": 3, + "max_breadth": 2, + "max_depth": 2, + "max_steps": 18, + "max_width": 2, + "seed": 7, + "seeded_questions": 1, + "swarm_enabled": true + }, + "created_at": "2026-04-01T12:16:28+00:00", + "episodes": 2, + "metrics": { + "avg_compactness_reward": 0.0, + "avg_connectivity_gain_reward": 0.1, + "avg_connectivity_reward": 0.3, + "avg_diversity_reward": 0.08, + "avg_entity_informativeness_reward": 0.024705877237863647, + "avg_format_reward": 0.15, + "avg_graph_f1": 1.0, + "avg_knowledge_carrier_reward": 0.5, + "avg_knowledge_indexing_reward": 0.15, + "avg_relation_informativeness_reward": 0.03137141693971891, + "avg_reward": 3.534162700533434, + "avg_soft_shaping_reward": 0.15, + "avg_spawn_count": 4.0, + "avg_spawn_critical_steps": 6.0, + "avg_steps_to_solution": 9.0, + "deanonymization_accuracy": 1.0, + "leaderboard_score": 0.8618382743087459, + "retrieval_signal": 0.7275, + "spawn_completion_rate": 1.0, + "spawn_signal": 0.6666666666666666, + "structural_signal": 0.6082154588355165, + "task_success_rate": 1.0, + "tool_efficiency": 0.25 + }, + "run_id": "run_0002", + "run_name": "swarm_seed_smoke" + }, + { + "config": { + "max_agents": 3, + "max_breadth": 2, + "max_depth": 2, + "max_steps": 18, + "max_width": 2, + "seed": 7, + "seeded_questions": 0, + "swarm_enabled": true + }, + "created_at": "2026-04-01T12:25:15+00:00", + "episodes": 20, + "metrics": { + "avg_compactness_reward": 0.0, + "avg_connectivity_gain_reward": 0.10000000000000002, + "avg_connectivity_reward": 0.23999999999999994, + "avg_diversity_reward": 0.08000000000000002, + "avg_entity_informativeness_reward": -0.00983642442912193, + "avg_format_reward": 0.14999999999999997, + "avg_graph_f1": 1.0, + "avg_knowledge_carrier_reward": 0.5, + "avg_knowledge_indexing_reward": 0.1125, + "avg_relation_informativeness_reward": 0.007185245326892638, + "avg_reward": 3.351267560586956, + "avg_soft_shaping_reward": 0.14999999999999997, + "avg_spawn_count": 4.0, + "avg_spawn_critical_steps": 6.0, + "avg_steps_to_solution": 9.0, + "deanonymization_accuracy": 1.0, + "leaderboard_score": 0.8573187614039594, + "retrieval_signal": 0.7143750000000001, + "spawn_completion_rate": 1.0, + "spawn_signal": 0.6666666666666666, + "structural_signal": 0.5814697641795541, + "task_success_rate": 1.0, + "tool_efficiency": 0.25 + }, + "run_id": "run_0003", + "run_name": "baseline_swarm" + }, + { + "config": { + "max_agents": 3, + "max_breadth": 2, + "max_depth": 2, + "max_steps": 18, + "max_width": 2, + "seed": 7, + "seeded_questions": 1, + "swarm_enabled": true + }, + "created_at": "2026-04-01T17:27:30+00:00", + "episodes": 1, + "metrics": { + "avg_compactness_reward": 0.0, + "avg_connectivity_gain_reward": 0.1, + "avg_connectivity_reward": 0.3, + "avg_diversity_reward": 0.08, + "avg_entity_informativeness_reward": 0.06128386989162576, + "avg_format_reward": 0.15, + "avg_graph_f1": 1.0, + "avg_knowledge_carrier_reward": 0.5, + "avg_knowledge_indexing_reward": 0.3, + "avg_relation_informativeness_reward": 0.12, + "avg_reward": 3.916035942914144, + "avg_soft_shaping_reward": 0.15, + "avg_spawn_count": 4.0, + "avg_spawn_critical_steps": 6.0, + "avg_steps_to_solution": 9.0, + "deanonymization_accuracy": 1.0, + "leaderboard_score": 0.8718832338515622, + "retrieval_signal": 0.78, + "spawn_completion_rate": 1.0, + "spawn_signal": 0.6666666666666666, + "structural_signal": 0.6332567739783251, + "task_success_rate": 1.0, + "tool_efficiency": 0.25 + }, + "run_id": "run_0004", + "run_name": "ollama_qwen_smoke" + }, + { + "config": { + "max_agents": 3, + "max_breadth": 2, + "max_depth": 2, + "max_steps": 18, + "max_width": 2, + "seed": 7, + "seeded_questions": 1, + "swarm_enabled": true + }, + "created_at": "2026-04-01T17:29:12+00:00", + "episodes": 1, + "metrics": { + "avg_compactness_reward": 0.0, + "avg_connectivity_gain_reward": 0.1, + "avg_connectivity_reward": 0.3, + "avg_diversity_reward": 0.08, + "avg_entity_informativeness_reward": 0.06128386989162576, + "avg_format_reward": 0.15, + "avg_graph_f1": 1.0, + "avg_knowledge_carrier_reward": 0.5, + "avg_knowledge_indexing_reward": 0.3, + "avg_relation_informativeness_reward": 0.12, + "avg_reward": 4.059369276247478, + "avg_soft_shaping_reward": 0.15, + "avg_spawn_count": 4.0, + "avg_spawn_critical_steps": 6.0, + "avg_steps_to_solution": 9.0, + "deanonymization_accuracy": 1.0, + "leaderboard_score": 0.9020114237119466, + "retrieval_signal": 0.78, + "spawn_completion_rate": 1.0, + "spawn_signal": 0.6666666666666666, + "structural_signal": 0.6332567739783251, + "task_success_rate": 1.0, + "tool_efficiency": 0.5 + }, + "run_id": "run_0005", + "run_name": "ollama_qwen_smoke2" + }, + { + "config": { + "max_agents": 3, + "max_breadth": 2, + "max_depth": 2, + "max_steps": 18, + "max_width": 2, + "seed": 7, + "seeded_questions": 0, + "swarm_enabled": true + }, + "created_at": "2026-04-01T17:39:15+00:00", + "episodes": 2, + "metrics": { + "avg_compactness_reward": 0.0, + "avg_connectivity_gain_reward": 0.2, + "avg_connectivity_reward": 0.0, + "avg_diversity_reward": 0.0683333333333333, + "avg_entity_informativeness_reward": -0.07397348480982455, + "avg_format_reward": 0.15, + "avg_graph_f1": 0.6666666666666667, + "avg_knowledge_carrier_reward": 0.5, + "avg_knowledge_indexing_reward": 0.14884615384615385, + "avg_relation_informativeness_reward": -0.00860389783205907, + "avg_reward": 4.351764433970379, + "avg_soft_shaping_reward": 0.3, + "avg_spawn_count": 4.0, + "avg_spawn_critical_steps": 6.0, + "avg_steps_to_solution": 9.0, + "deanonymization_accuracy": 0.0, + "leaderboard_score": 0.6973935600514568, + "retrieval_signal": 0.7270961538461539, + "spawn_completion_rate": 1.0, + "spawn_signal": 0.6666666666666666, + "structural_signal": 0.5137345234716233, + "task_success_rate": 1.0, + "tool_efficiency": 0.5 + }, + "run_id": "run_0006", + "run_name": "high_timeout_shared_ctx" + }, + { + "config": { + "max_agents": 3, + "max_breadth": 2, + "max_depth": 2, + "max_steps": 18, + "max_width": 2, + "seed": 7, + "seeded_questions": 0, + "swarm_enabled": true + }, + "created_at": "2026-04-01T18:57:40+00:00", + "episodes": 3, + "metrics": { + "avg_compactness_reward": 0.0, + "avg_connectivity_gain_reward": 0.13333333333333333, + "avg_connectivity_reward": 0.09999999999999999, + "avg_diversity_reward": 0.056666666666666664, + "avg_entity_informativeness_reward": -0.020478979694240708, + "avg_format_reward": 0.15, + "avg_graph_f1": 0.8148148148148149, + "avg_knowledge_carrier_reward": 0.5, + "avg_knowledge_indexing_reward": 0.27, + "avg_relation_informativeness_reward": 0.07174291752145656, + "avg_reward": 4.0269419367756605, + "avg_soft_shaping_reward": 0.19999999999999998, + "avg_spawn_count": 4.0, + "avg_spawn_critical_steps": 6.0, + "avg_steps_to_solution": 9.0, + "deanonymization_accuracy": 0.0, + "leaderboard_score": 0.7366215569569294, + "retrieval_signal": 0.7695000000000001, + "spawn_completion_rate": 1.0, + "spawn_signal": 0.6666666666666666, + "structural_signal": 0.5570861208987765, + "task_success_rate": 1.0, + "tool_efficiency": 0.5 + }, + "run_id": "run_0007", + "run_name": "episode_selector_check" + }, + { + "config": { + "max_agents": 3, + "max_breadth": 2, + "max_depth": 2, + "max_steps": 18, + "max_width": 2, + "seed": 7, + "seeded_questions": 15, + "swarm_enabled": true + }, + "created_at": "2026-04-01T19:11:44+00:00", + "episodes": 3, + "metrics": { + "avg_compactness_reward": 0.0, + "avg_connectivity_gain_reward": 0.10000000000000002, + "avg_connectivity_reward": 0.3, + "avg_diversity_reward": 0.08, + "avg_entity_informativeness_reward": -0.02722031691758704, + "avg_format_reward": 0.15, + "avg_graph_f1": 1.0, + "avg_knowledge_carrier_reward": 0.5, + "avg_knowledge_indexing_reward": 0.0, + "avg_relation_informativeness_reward": -0.00011920119799207429, + "avg_reward": 3.444079221573606, + "avg_soft_shaping_reward": 0.15, + "avg_spawn_count": 4.0, + "avg_spawn_critical_steps": 6.0, + "avg_steps_to_solution": 9.0, + "deanonymization_accuracy": 1.0, + "leaderboard_score": 0.8828572592896698, + "retrieval_signal": 0.675, + "spawn_completion_rate": 1.0, + "spawn_signal": 0.6666666666666666, + "structural_signal": 0.5915320963768841, + "task_success_rate": 1.0, + "tool_efficiency": 0.5 + }, + "run_id": "run_0008", + "run_name": "qwen_rerun" + }, + { + "config": { + "max_agents": 3, + "max_breadth": 2, + "max_depth": 2, + "max_steps": 18, + "max_width": 2, + "seed": 7, + "seeded_questions": 15, + "swarm_enabled": true + }, + "created_at": "2026-04-01T19:19:34+00:00", + "episodes": 3, + "metrics": { + "avg_compactness_reward": 0.0, + "avg_connectivity_gain_reward": 0.10000000000000002, + "avg_connectivity_reward": 0.3, + "avg_diversity_reward": 0.08, + "avg_entity_informativeness_reward": -0.024861029515896544, + "avg_format_reward": 0.15, + "avg_graph_f1": 1.0, + "avg_knowledge_carrier_reward": 0.5, + "avg_knowledge_indexing_reward": 0.0, + "avg_relation_informativeness_reward": -0.0024320085090966614, + "avg_reward": 3.4441257016641917, + "avg_soft_shaping_reward": 0.15, + "avg_spawn_count": 4.0, + "avg_spawn_critical_steps": 6.0, + "avg_steps_to_solution": 9.0, + "deanonymization_accuracy": 1.0, + "leaderboard_score": 0.8828581656226586, + "retrieval_signal": 0.675, + "spawn_completion_rate": 1.0, + "spawn_signal": 0.6666666666666666, + "structural_signal": 0.5915413923950014, + "task_success_rate": 1.0, + "tool_efficiency": 0.5 + }, + "run_id": "run_0009", + "run_name": "qwen_episode_fix" + }, + { + "config": { + "max_agents": 3, + "max_breadth": 2, + "max_depth": 2, + "max_steps": 18, + "max_width": 2, + "seed": 7, + "seeded_questions": 15, + "swarm_enabled": true + }, + "created_at": "2026-04-01T19:24:37+00:00", + "episodes": 3, + "metrics": { + "avg_compactness_reward": 0.0, + "avg_connectivity_gain_reward": 0.10000000000000002, + "avg_connectivity_reward": 0.3, + "avg_diversity_reward": 0.08, + "avg_entity_informativeness_reward": -0.02722031691758704, + "avg_format_reward": 0.15, + "avg_graph_f1": 1.0, + "avg_knowledge_carrier_reward": 0.5, + "avg_knowledge_indexing_reward": 0.0, + "avg_relation_informativeness_reward": -0.0030604289114462002, + "avg_reward": 3.4411379938601514, + "avg_soft_shaping_reward": 0.15, + "avg_spawn_count": 4.0, + "avg_spawn_critical_steps": 6.0, + "avg_steps_to_solution": 9.0, + "deanonymization_accuracy": 1.0, + "leaderboard_score": 0.8827999009847504, + "retrieval_signal": 0.675, + "spawn_completion_rate": 1.0, + "spawn_signal": 0.6666666666666666, + "structural_signal": 0.5909438508341933, + "task_success_rate": 1.0, + "tool_efficiency": 0.5 + }, + "run_id": "run_0010", + "run_name": "qwen_rerun_graph_fix" + }, + { + "config": { + "max_agents": 3, + "max_breadth": 2, + "max_depth": 2, + "max_steps": 18, + "max_width": 2, + "seed": 7, + "seeded_questions": 15, + "swarm_enabled": true + }, + "created_at": "2026-04-01T19:31:54+00:00", + "episodes": 15, + "metrics": { + "avg_compactness_reward": 0.0, + "avg_connectivity_gain_reward": 0.16666666666666666, + "avg_connectivity_reward": 0.16999999999999998, + "avg_diversity_reward": 0.1157777777777778, + "avg_entity_informativeness_reward": -0.0181244777358718, + "avg_format_reward": 0.14999999999999997, + "avg_graph_f1": 0.8492063492063492, + "avg_knowledge_carrier_reward": 0.5, + "avg_knowledge_indexing_reward": 0.012000000000000002, + "avg_relation_informativeness_reward": 0.05935837081627929, + "avg_reward": 4.201760569277529, + "avg_soft_shaping_reward": 0.24999999999999994, + "avg_spawn_count": 4.0, + "avg_spawn_critical_steps": 6.0, + "avg_steps_to_solution": 9.0, + "deanonymization_accuracy": 1.0, + "leaderboard_score": 0.8534887252258901, + "retrieval_signal": 0.6792, + "spawn_completion_rate": 1.0, + "spawn_signal": 0.6666666666666666, + "structural_signal": 0.5847801119494148, + "task_success_rate": 1.0, + "tool_efficiency": 0.5 + }, + "run_id": "run_0011", + "run_name": "qwen_rerun_graph_fix" + } +] \ No newline at end of file diff --git a/config/seed_example.json b/config/seed_example.json new file mode 100644 index 0000000000000000000000000000000000000000..92b4aec40eac09d3a4dae1e70e34d9be4fe1bfd1 --- /dev/null +++ b/config/seed_example.json @@ -0,0 +1,52 @@ +{ + "seeding": { + "seeded_nodes": [ + { + "node_id": "alias_seed_001", + "node_type": "alias", + "attrs": { + "handle": "@shadow_seed" + } + }, + { + "node_id": "user_seed_001", + "node_type": "user", + "attrs": { + "name": "Seed User", + "org": "Helios Labs", + "location": "Pune" + } + } + ], + "_note": "Use with --seed-file. LLM provider and API keys are configured in config/shared_config.json or CLI flags.", + "seeded_edges": [ + { + "src": "alias_seed_001", + "rel": "alias_of", + "dst": "user_seed_001", + "confidence": 1.0 + } + ], + "seeded_questions": [ + { + "task_type": "identity_resolution", + "question": "Which canonical user owns alias alias_seed_001?", + "answer": "user_seed_001", + "supporting_edges": [ + { + "src": "alias_seed_001", + "rel": "alias_of", + "dst": "user_seed_001" + } + ], + "metadata": { + "source": "manual_seed" + } + } + ], + "llm_generate_remaining_graph": true, + "llm_generate_remaining_tasks": true, + "llm_generated_edge_budget": 6, + "llm_generated_task_budget": 8 + } +} diff --git a/config/seed_ollama_smoke.json b/config/seed_ollama_smoke.json new file mode 100644 index 0000000000000000000000000000000000000000..a26bd097e7f482dce893ef83df92c53712c28fbb --- /dev/null +++ b/config/seed_ollama_smoke.json @@ -0,0 +1,51 @@ +{ + "seeding": { + "seeded_nodes": [ + { + "node_id": "alias_smoke_001", + "node_type": "alias", + "attrs": { + "handle": "@smoke_alias" + } + }, + { + "node_id": "user_smoke_001", + "node_type": "user", + "attrs": { + "name": "Smoke User", + "org": "Apex Dynamics", + "location": "Bengaluru" + } + } + ], + "seeded_edges": [ + { + "src": "alias_smoke_001", + "rel": "alias_of", + "dst": "user_smoke_001", + "confidence": 1.0 + } + ], + "seeded_questions": [ + { + "task_type": "identity_resolution", + "question": "Which canonical user owns alias alias_smoke_001?", + "answer": "user_smoke_001", + "supporting_edges": [ + { + "src": "alias_smoke_001", + "rel": "alias_of", + "dst": "user_smoke_001" + } + ], + "metadata": { + "source": "ollama_smoke" + } + } + ], + "llm_generate_remaining_graph": false, + "llm_generate_remaining_tasks": false, + "llm_generated_edge_budget": 0, + "llm_generated_task_budget": 0 + } +} diff --git a/config/self_play_training_example.json b/config/self_play_training_example.json new file mode 100644 index 0000000000000000000000000000000000000000..bb7c1ba689068eec2c29553c66d433bfadba391b --- /dev/null +++ b/config/self_play_training_example.json @@ -0,0 +1,105 @@ +{ + "rounds": 3, + "output_dir": "artifacts/self_play", + "dry_run": true, + "canonical_graph_mode": "generate", + "pipeline_mode": "swarm_v2", + "model_topology": "dual", + "phase_schedule": "generator_answerer", + "tuning_mode": "full", + "shared_model_name_or_path": "", + "seed_tasks_per_round": 16, + "generated_tasks_per_round": 24, + "generator_prompts_per_round": 24, + "max_graph_context_nodes": 100, + "max_graph_context_edges": 100, + "max_support_edges": 8, + "answerer_judge_max_new_tokens": 48, + "generator_reward_weights": { + "validity": 0.35, + "hardness": 0.45, + "diversity": 0.1, + "consistency": 0.1 + }, + "lora": { + "r": 16, + "alpha": 32, + "dropout": 0.05, + "target_modules": ["q_proj", "k_proj", "v_proj", "o_proj"], + "bias": "none", + "task_type": "CAUSAL_LM" + }, + "swarm_v2": { + "generator_swarm": { + "shared_context": true, + "max_agents": 4, + "max_breadth": 3, + "max_depth": 2, + "planner_rounds": 2, + "tools_per_agent": 2 + }, + "answerer_swarm": { + "shared_context": true, + "max_agents": 3, + "max_breadth": 2, + "max_depth": 2, + "planner_rounds": 2, + "tools_per_agent": 2 + }, + "validation": { + "max_support_edges": 8, + "max_path_hops": 4, + "max_context_nodes": 14, + "max_context_edges": 8, + "duplicate_similarity_threshold": 0.8 + }, + "shared_context": { + "shared_by_default": true, + "max_nodes": 14, + "max_edges": 8, + "target_pressure": 0.85 + } + }, + "generator_phase": { + "model_name_or_path": "Qwen/Qwen2.5-0.5B-Instruct", + "learning_rate": 1e-06, + "max_steps": 64, + "per_device_train_batch_size": 2, + "gradient_accumulation_steps": 4, + "num_generations": 4, + "max_completion_length": 256, + "temperature": 1.0, + "top_p": 1.0, + "beta": 0.01, + "epsilon": 0.2, + "num_iterations": 1, + "loss_type": "dapo", + "scale_rewards": "none", + "logging_steps": 10, + "save_steps": 50, + "output_subdir": "generator", + "use_vllm": false, + "vllm_mode": "colocate" + }, + "answerer_phase": { + "model_name_or_path": "Qwen/Qwen2.5-0.5B-Instruct", + "learning_rate": 1e-06, + "max_steps": 64, + "per_device_train_batch_size": 2, + "gradient_accumulation_steps": 4, + "num_generations": 4, + "max_completion_length": 192, + "temperature": 1.0, + "top_p": 1.0, + "beta": 0.01, + "epsilon": 0.2, + "num_iterations": 1, + "loss_type": "dapo", + "scale_rewards": "none", + "logging_steps": 10, + "save_steps": 50, + "output_subdir": "answerer", + "use_vllm": false, + "vllm_mode": "colocate" + } +} diff --git a/config/self_play_training_hf_a10g_smoke.json b/config/self_play_training_hf_a10g_smoke.json new file mode 100644 index 0000000000000000000000000000000000000000..4a5cabb9a2710dc0968b282805778c057ed1bfcb --- /dev/null +++ b/config/self_play_training_hf_a10g_smoke.json @@ -0,0 +1,72 @@ +{ + "rounds": 2, + "output_dir": "artifacts/self_play_hf_a10g_train", + "dry_run": false, + "wandb_enabled": true, + "wandb_project": "osint-self-play-train", + "wandb_entity": "", + "wandb_run_name_prefix": "qwen35-08b-a10g-train", + "pipeline_mode": "swarm_v2", + "canonical_graph_mode": "fixed", + "model_topology": "shared", + "phase_schedule": "generator_answerer", + "tuning_mode": "lora", + "shared_model_name_or_path": "Qwen/Qwen3.5-0.8B", + "seed_tasks_per_round": 16, + "generated_tasks_per_round": 24, + "generator_prompts_per_round": 24, + "max_graph_context_nodes": 40, + "max_graph_context_edges": 40, + "max_support_edges": 6, + "answerer_judge_max_new_tokens": 32, + "generator_phase": { + "model_name_or_path": "Qwen/Qwen3.5-0.8B", + "learning_rate": 1e-06, + "max_steps": 50, + "per_device_train_batch_size": 2, + "gradient_accumulation_steps": 1, + "num_generations": 2, + "max_completion_length": 384, + "temperature": 0.7, + "top_p": 0.9, + "beta": 0.01, + "epsilon": 0.2, + "num_iterations": 1, + "loss_type": "dapo", + "scale_rewards": "group", + "logging_steps": 1, + "save_steps": 10, + "output_subdir": "generator_train", + "use_vllm": false, + "vllm_mode": "colocate" + }, + "answerer_phase": { + "model_name_or_path": "Qwen/Qwen3.5-0.8B", + "learning_rate": 1e-06, + "max_steps": 50, + "per_device_train_batch_size": 2, + "gradient_accumulation_steps": 1, + "num_generations": 2, + "max_completion_length": 192, + "temperature": 0.4, + "top_p": 0.9, + "beta": 0.01, + "epsilon": 0.2, + "num_iterations": 1, + "loss_type": "dapo", + "scale_rewards": "group", + "logging_steps": 1, + "save_steps": 10, + "output_subdir": "answerer_train", + "use_vllm": false, + "vllm_mode": "colocate" + }, + "lora": { + "r": 8, + "alpha": 16, + "dropout": 0.05, + "target_modules": ["q_proj", "k_proj", "v_proj", "o_proj"], + "bias": "none", + "task_type": "CAUSAL_LM" + } +} diff --git a/config/shared_config.json b/config/shared_config.json new file mode 100644 index 0000000000000000000000000000000000000000..25b6a9f0fb7ce545f7ac4b9d06a241e6d714a59f --- /dev/null +++ b/config/shared_config.json @@ -0,0 +1,63 @@ +{ + "environment": { + "n_users": 40, + "alias_density": 0.35, + "noise_level": 0.15, + "red_herring_rate": 0.1, + "max_steps": 18, + "seed": 7 + }, + "dataset": { + "mode": "canonical", + "metaqa_root": "metaQA", + "metaqa_kb_path": "", + "metaqa_variant": "vanilla", + "metaqa_hops": ["1-hop", "2-hop", "3-hop"], + "metaqa_splits": ["train", "dev", "test"] + }, + "swarm": { + "enabled": true, + "max_agents": 3, + "max_breadth": 2, + "max_width": 2, + "max_depth": 2, + "planner_rounds": 2, + "tools_per_agent": 1 + }, + "spawn_reward": { + "lambda_parallel": 0.15, + "lambda_finish": 0.2, + "anneal": 1.0, + "max_parallel_hint": 3 + }, + "seeding": { + "seeded_nodes": [], + "seeded_edges": [], + "seeded_questions": [], + "llm_generate_remaining_graph": true, + "llm_generate_remaining_tasks": true, + "llm_generated_edge_budget": 6, + "llm_generated_task_budget": 8, + "llm_generation_parallel": true, + "llm_generation_workers": 3, + "llm_generation_retries": 2, + "allow_template_fallback_on_llm_failure": false + }, + "llm": { + "provider": "ollama", + "model": "qwen3:2b", + "temperature": 0.1, + "max_tokens": 256, + "timeout_seconds": 240, + "ollama_base_url": "http://127.0.0.1:11434", + "openai_base_url": "https://api.openai.com/v1", + "openai_api_key_env": "OPENAI_API_KEY", + "openai_api_key": "" + }, + "runtime": { + "default_episodes": 20, + "leaderboard_path": "artifacts/leaderboard.json", + "dashboard_path": "artifacts/osint_dashboard.html", + "sweep_dashboard_dir": "artifacts/sweep_dashboards" + } +} diff --git a/config/shared_config_metaqa.json b/config/shared_config_metaqa.json new file mode 100644 index 0000000000000000000000000000000000000000..686d8e036188c61d3c14d325a19a8a618c49c84d --- /dev/null +++ b/config/shared_config_metaqa.json @@ -0,0 +1,63 @@ +{ + "environment": { + "n_users": 40, + "alias_density": 0.35, + "noise_level": 0.15, + "red_herring_rate": 0.1, + "max_steps": 18, + "seed": 7 + }, + "dataset": { + "mode": "metaqa", + "metaqa_root": "metaQA", + "metaqa_kb_path": "metaQA/kb.txt", + "metaqa_variant": "vanilla", + "metaqa_hops": ["1-hop", "2-hop", "3-hop"], + "metaqa_splits": ["train", "dev", "test"] + }, + "swarm": { + "enabled": true, + "max_agents": 3, + "max_breadth": 2, + "max_width": 2, + "max_depth": 2, + "planner_rounds": 2, + "tools_per_agent": 1 + }, + "spawn_reward": { + "lambda_parallel": 0.15, + "lambda_finish": 0.2, + "anneal": 1.0, + "max_parallel_hint": 3 + }, + "seeding": { + "seeded_nodes": [], + "seeded_edges": [], + "seeded_questions": [], + "llm_generate_remaining_graph": false, + "llm_generate_remaining_tasks": false, + "llm_generated_edge_budget": 0, + "llm_generated_task_budget": 0, + "llm_generation_parallel": true, + "llm_generation_workers": 3, + "llm_generation_retries": 2, + "allow_template_fallback_on_llm_failure": false + }, + "llm": { + "provider": "mock", + "model": "qwen3:2b", + "temperature": 0.1, + "max_tokens": 256, + "timeout_seconds": 240, + "ollama_base_url": "http://127.0.0.1:11434", + "openai_base_url": "https://api.openai.com/v1", + "openai_api_key_env": "OPENAI_API_KEY", + "openai_api_key": "" + }, + "runtime": { + "default_episodes": 20, + "leaderboard_path": "artifacts/leaderboard_metaqa.json", + "dashboard_path": "artifacts/metaqa_dashboard.html", + "sweep_dashboard_dir": "artifacts/metaqa_sweep_dashboards" + } +} diff --git a/datasets/fixed_levels/README.md b/datasets/fixed_levels/README.md new file mode 100644 index 0000000000000000000000000000000000000000..3c7e2b60d8e200f697826454595e5469c242098a --- /dev/null +++ b/datasets/fixed_levels/README.md @@ -0,0 +1,64 @@ +# Fixed Levels Submission Dataset + +This folder contains a fixed three-level OSINT benchmark set built on one shared base graph. + +## Files + +- `seed_fixed_levels.json`: master fixed seed with an expanded canonical graph and 30 fixed questions. +- `fixed_graph_questions.json`: extracted fixed dataset snapshot for submission packaging. +- `shared_config_fixed_levels.json`: run config used for generation and evaluation. +- `complete_dataset_qwen_generated.json`: full dataset after Qwen (`qwen3:2b` via Ollama) expands the graph. +- `qwen_swarm_eval_fixed_levels.json`: legacy Qwen swarm evaluation summary from the older smaller version of the set. +- `qwen_swarm_benchmark_fixed_levels.json`: legacy benchmark output from the older smaller version of the set. +- `leaderboard_fixed_levels.json`: leaderboard file for this dataset. +- `dashboard_fixed_levels.html`: interactive dashboard generated from the benchmark run. + +## Difficulty Design + +- Easy: 10 questions. These now use the older hard-style multi-hop traces as the new floor. +- Mid: 10 questions. Each question spans roughly 15-20 supporting nodes. +- High: 10 questions. Each question spans roughly 50 supporting nodes. + +All 30 questions are fixed and share the same larger seeded graph. + +## Regenerate Artifacts + +```bash +source ~/arl/bin/activate +cd /home/ritish/test1 +PYTHONPATH=src python scripts/build_fixed_levels_dataset.py \ + --seed-file datasets/fixed_levels/seed_fixed_levels.json \ + --shared-config datasets/fixed_levels/shared_config_fixed_levels.json \ + --output-dir datasets/fixed_levels +``` + +## Evaluate Qwen Swarm + +```bash +source ~/arl/bin/activate +cd /home/ritish/test1 +PYTHONPATH=src osint-env eval \ + --config datasets/fixed_levels/shared_config_fixed_levels.json \ + --seed-file datasets/fixed_levels/seed_fixed_levels.json \ + --agent-mode swarm \ + --llm-provider ollama \ + --llm-model qwen3:2b \ + --episodes 15 +``` + +## Benchmark + Dashboard + +```bash +source ~/arl/bin/activate +cd /home/ritish/test1 +PYTHONPATH=src osint-env benchmark \ + --config datasets/fixed_levels/shared_config_fixed_levels.json \ + --seed-file datasets/fixed_levels/seed_fixed_levels.json \ + --agent-mode swarm \ + --llm-provider ollama \ + --llm-model qwen3:2b \ + --episodes 15 \ + --name fixed_levels_qwen_swarm \ + --leaderboard datasets/fixed_levels/leaderboard_fixed_levels.json \ + --dashboard datasets/fixed_levels/dashboard_fixed_levels.html +``` diff --git a/datasets/fixed_levels/complete_dataset_qwen_generated.json b/datasets/fixed_levels/complete_dataset_qwen_generated.json new file mode 100644 index 0000000000000000000000000000000000000000..813cd92dfe5a5866cafac070a42dbec18a4f2cb0 --- /dev/null +++ b/datasets/fixed_levels/complete_dataset_qwen_generated.json @@ -0,0 +1,8798 @@ +{ + "canonical_graph": { + "edge_count": 249, + "edges": [ + { + "confidence": 1.0, + "dst": "org_apex_dynamics", + "rel": "works_at", + "src": "user_0" + }, + { + "confidence": 1.0, + "dst": "loc_hyderabad", + "rel": "located_in", + "src": "user_0" + }, + { + "confidence": 1.0, + "dst": "org_northbridge", + "rel": "works_at", + "src": "user_1" + }, + { + "confidence": 1.0, + "dst": "loc_bengaluru", + "rel": "located_in", + "src": "user_1" + }, + { + "confidence": 1.0, + "dst": "org_northbridge", + "rel": "works_at", + "src": "user_2" + }, + { + "confidence": 1.0, + "dst": "loc_delhi", + "rel": "located_in", + "src": "user_2" + }, + { + "confidence": 1.0, + "dst": "org_northbridge", + "rel": "works_at", + "src": "user_3" + }, + { + "confidence": 1.0, + "dst": "loc_delhi", + "rel": "located_in", + "src": "user_3" + }, + { + "confidence": 1.0, + "dst": "org_northbridge", + "rel": "works_at", + "src": "user_4" + }, + { + "confidence": 1.0, + "dst": "loc_delhi", + "rel": "located_in", + "src": "user_4" + }, + { + "confidence": 1.0, + "dst": "org_northbridge", + "rel": "works_at", + "src": "user_5" + }, + { + "confidence": 1.0, + "dst": "loc_bengaluru", + "rel": "located_in", + "src": "user_5" + }, + { + "confidence": 1.0, + "dst": "user_5", + "rel": "alias_of", + "src": "alias_5_936" + }, + { + "confidence": 1.0, + "dst": "org_apex_dynamics", + "rel": "works_at", + "src": "user_6" + }, + { + "confidence": 1.0, + "dst": "loc_delhi", + "rel": "located_in", + "src": "user_6" + }, + { + "confidence": 1.0, + "dst": "user_6", + "rel": "alias_of", + "src": "alias_6_801" + }, + { + "confidence": 1.0, + "dst": "org_helios_labs", + "rel": "works_at", + "src": "user_7" + }, + { + "confidence": 1.0, + "dst": "loc_hyderabad", + "rel": "located_in", + "src": "user_7" + }, + { + "confidence": 1.0, + "dst": "org_helios_labs", + "rel": "works_at", + "src": "user_8" + }, + { + "confidence": 1.0, + "dst": "loc_hyderabad", + "rel": "located_in", + "src": "user_8" + }, + { + "confidence": 1.0, + "dst": "org_helios_labs", + "rel": "works_at", + "src": "user_9" + }, + { + "confidence": 1.0, + "dst": "loc_delhi", + "rel": "located_in", + "src": "user_9" + }, + { + "confidence": 1.0, + "dst": "org_northbridge", + "rel": "works_at", + "src": "user_10" + }, + { + "confidence": 1.0, + "dst": "loc_bengaluru", + "rel": "located_in", + "src": "user_10" + }, + { + "confidence": 1.0, + "dst": "org_apex_dynamics", + "rel": "works_at", + "src": "user_11" + }, + { + "confidence": 1.0, + "dst": "loc_hyderabad", + "rel": "located_in", + "src": "user_11" + }, + { + "confidence": 1.0, + "dst": "org_apex_dynamics", + "rel": "works_at", + "src": "user_12" + }, + { + "confidence": 1.0, + "dst": "loc_hyderabad", + "rel": "located_in", + "src": "user_12" + }, + { + "confidence": 1.0, + "dst": "user_12", + "rel": "alias_of", + "src": "alias_12_827" + }, + { + "confidence": 1.0, + "dst": "org_helios_labs", + "rel": "works_at", + "src": "user_13" + }, + { + "confidence": 1.0, + "dst": "loc_hyderabad", + "rel": "located_in", + "src": "user_13" + }, + { + "confidence": 1.0, + "dst": "org_apex_dynamics", + "rel": "works_at", + "src": "user_14" + }, + { + "confidence": 1.0, + "dst": "loc_delhi", + "rel": "located_in", + "src": "user_14" + }, + { + "confidence": 1.0, + "dst": "org_northbridge", + "rel": "works_at", + "src": "user_15" + }, + { + "confidence": 1.0, + "dst": "loc_delhi", + "rel": "located_in", + "src": "user_15" + }, + { + "confidence": 1.0, + "dst": "org_helios_labs", + "rel": "works_at", + "src": "user_16" + }, + { + "confidence": 1.0, + "dst": "loc_delhi", + "rel": "located_in", + "src": "user_16" + }, + { + "confidence": 1.0, + "dst": "org_northbridge", + "rel": "works_at", + "src": "user_17" + }, + { + "confidence": 1.0, + "dst": "loc_hyderabad", + "rel": "located_in", + "src": "user_17" + }, + { + "confidence": 1.0, + "dst": "org_northbridge", + "rel": "works_at", + "src": "user_18" + }, + { + "confidence": 1.0, + "dst": "loc_delhi", + "rel": "located_in", + "src": "user_18" + }, + { + "confidence": 1.0, + "dst": "org_apex_dynamics", + "rel": "works_at", + "src": "user_19" + }, + { + "confidence": 1.0, + "dst": "loc_pune", + "rel": "located_in", + "src": "user_19" + }, + { + "confidence": 1.0, + "dst": "org_apex_dynamics", + "rel": "works_at", + "src": "user_20" + }, + { + "confidence": 1.0, + "dst": "loc_bengaluru", + "rel": "located_in", + "src": "user_20" + }, + { + "confidence": 1.0, + "dst": "org_helios_labs", + "rel": "works_at", + "src": "user_21" + }, + { + "confidence": 1.0, + "dst": "loc_delhi", + "rel": "located_in", + "src": "user_21" + }, + { + "confidence": 1.0, + "dst": "user_21", + "rel": "alias_of", + "src": "alias_21_334" + }, + { + "confidence": 1.0, + "dst": "org_apex_dynamics", + "rel": "works_at", + "src": "user_22" + }, + { + "confidence": 1.0, + "dst": "loc_delhi", + "rel": "located_in", + "src": "user_22" + }, + { + "confidence": 1.0, + "dst": "org_northbridge", + "rel": "works_at", + "src": "user_23" + }, + { + "confidence": 1.0, + "dst": "loc_hyderabad", + "rel": "located_in", + "src": "user_23" + }, + { + "confidence": 0.8, + "dst": "user_9", + "rel": "connected_to", + "src": "user_21" + }, + { + "confidence": 0.8, + "dst": "user_20", + "rel": "connected_to", + "src": "user_17" + }, + { + "confidence": 0.8, + "dst": "user_22", + "rel": "connected_to", + "src": "user_16" + }, + { + "confidence": 0.8, + "dst": "user_12", + "rel": "connected_to", + "src": "user_15" + }, + { + "confidence": 0.8, + "dst": "user_6", + "rel": "connected_to", + "src": "user_13" + }, + { + "confidence": 0.8, + "dst": "user_8", + "rel": "connected_to", + "src": "user_9" + }, + { + "confidence": 0.8, + "dst": "user_1", + "rel": "connected_to", + "src": "user_0" + }, + { + "confidence": 0.8, + "dst": "user_20", + "rel": "connected_to", + "src": "user_4" + }, + { + "confidence": 0.8, + "dst": "user_14", + "rel": "connected_to", + "src": "user_19" + }, + { + "confidence": 0.8, + "dst": "user_15", + "rel": "connected_to", + "src": "user_16" + }, + { + "confidence": 0.8, + "dst": "user_6", + "rel": "connected_to", + "src": "user_11" + }, + { + "confidence": 0.8, + "dst": "user_22", + "rel": "connected_to", + "src": "user_8" + }, + { + "confidence": 1.0, + "dst": "user_ivy", + "rel": "alias_of", + "src": "alias_orchidfox" + }, + { + "confidence": 1.0, + "dst": "user_bharat", + "rel": "alias_of", + "src": "alias_steelquill" + }, + { + "confidence": 1.0, + "dst": "user_diya", + "rel": "alias_of", + "src": "alias_monsoonbyte" + }, + { + "confidence": 1.0, + "dst": "user_faris", + "rel": "alias_of", + "src": "alias_nightrelay" + }, + { + "confidence": 1.0, + "dst": "user_elin", + "rel": "alias_of", + "src": "alias_mapleghost" + }, + { + "confidence": 1.0, + "dst": "user_hiro", + "rel": "alias_of", + "src": "alias_docksparrow" + }, + { + "confidence": 1.0, + "dst": "user_cyrus", + "rel": "alias_of", + "src": "alias_quartzlotus" + }, + { + "confidence": 1.0, + "dst": "user_nora", + "rel": "alias_of", + "src": "alias_emberglass" + }, + { + "confidence": 1.0, + "dst": "user_mika", + "rel": "alias_of", + "src": "alias_basinraven" + }, + { + "confidence": 1.0, + "dst": "user_soren", + "rel": "alias_of", + "src": "alias_tideshard" + }, + { + "confidence": 1.0, + "dst": "user_priya", + "rel": "alias_of", + "src": "alias_hollowsignal" + }, + { + "confidence": 1.0, + "dst": "user_omar", + "rel": "alias_of", + "src": "alias_ironwhisper" + }, + { + "confidence": 1.0, + "dst": "user_rhea", + "rel": "alias_of", + "src": "alias_cinderveil" + }, + { + "confidence": 1.0, + "dst": "user_tara", + "rel": "alias_of", + "src": "alias_sablekeel" + }, + { + "confidence": 1.0, + "dst": "user_kian", + "rel": "alias_of", + "src": "alias_lanternmoth" + }, + { + "confidence": 1.0, + "dst": "user_leena", + "rel": "alias_of", + "src": "alias_frostledger" + }, + { + "confidence": 1.0, + "dst": "org_helios_labs", + "rel": "works_at", + "src": "user_aria" + }, + { + "confidence": 1.0, + "dst": "loc_sector9", + "rel": "located_in", + "src": "user_aria" + }, + { + "confidence": 1.0, + "dst": "org_northbridge_logistics", + "rel": "works_at", + "src": "user_bharat" + }, + { + "confidence": 1.0, + "dst": "loc_dockyard17", + "rel": "located_in", + "src": "user_bharat" + }, + { + "confidence": 1.0, + "dst": "org_apex_dynamics", + "rel": "works_at", + "src": "user_cyrus" + }, + { + "confidence": 1.0, + "dst": "loc_old_town", + "rel": "located_in", + "src": "user_cyrus" + }, + { + "confidence": 1.0, + "dst": "org_blueharbor_media", + "rel": "works_at", + "src": "user_diya" + }, + { + "confidence": 1.0, + "dst": "loc_old_town", + "rel": "located_in", + "src": "user_diya" + }, + { + "confidence": 1.0, + "dst": "org_helios_labs", + "rel": "works_at", + "src": "user_elin" + }, + { + "confidence": 1.0, + "dst": "loc_sector9", + "rel": "located_in", + "src": "user_elin" + }, + { + "confidence": 1.0, + "dst": "org_tidewatch_ops", + "rel": "works_at", + "src": "user_faris" + }, + { + "confidence": 1.0, + "dst": "loc_rivergate", + "rel": "located_in", + "src": "user_faris" + }, + { + "confidence": 1.0, + "dst": "org_apex_dynamics", + "rel": "works_at", + "src": "user_gita" + }, + { + "confidence": 1.0, + "dst": "loc_old_town", + "rel": "located_in", + "src": "user_gita" + }, + { + "confidence": 1.0, + "dst": "org_northbridge_logistics", + "rel": "works_at", + "src": "user_hiro" + }, + { + "confidence": 1.0, + "dst": "loc_dockyard17", + "rel": "located_in", + "src": "user_hiro" + }, + { + "confidence": 1.0, + "dst": "org_kestrel_works", + "rel": "works_at", + "src": "user_ivy" + }, + { + "confidence": 1.0, + "dst": "loc_rivergate", + "rel": "located_in", + "src": "user_ivy" + }, + { + "confidence": 1.0, + "dst": "org_blueharbor_media", + "rel": "works_at", + "src": "user_jules" + }, + { + "confidence": 1.0, + "dst": "loc_old_town", + "rel": "located_in", + "src": "user_jules" + }, + { + "confidence": 1.0, + "dst": "org_atlas_freight", + "rel": "works_at", + "src": "user_kian" + }, + { + "confidence": 1.0, + "dst": "loc_east_quay", + "rel": "located_in", + "src": "user_kian" + }, + { + "confidence": 1.0, + "dst": "org_sunmesh_analytics", + "rel": "works_at", + "src": "user_leena" + }, + { + "confidence": 1.0, + "dst": "loc_sector9", + "rel": "located_in", + "src": "user_leena" + }, + { + "confidence": 1.0, + "dst": "org_orion_customs", + "rel": "works_at", + "src": "user_mika" + }, + { + "confidence": 1.0, + "dst": "loc_north_basin", + "rel": "located_in", + "src": "user_mika" + }, + { + "confidence": 1.0, + "dst": "org_emberline_security", + "rel": "works_at", + "src": "user_nora" + }, + { + "confidence": 1.0, + "dst": "loc_foundry_row", + "rel": "located_in", + "src": "user_nora" + }, + { + "confidence": 1.0, + "dst": "org_atlas_freight", + "rel": "works_at", + "src": "user_omar" + }, + { + "confidence": 1.0, + "dst": "loc_east_quay", + "rel": "located_in", + "src": "user_omar" + }, + { + "confidence": 1.0, + "dst": "org_sunmesh_analytics", + "rel": "works_at", + "src": "user_priya" + }, + { + "confidence": 1.0, + "dst": "loc_sector9", + "rel": "located_in", + "src": "user_priya" + }, + { + "confidence": 1.0, + "dst": "org_orion_customs", + "rel": "works_at", + "src": "user_quinn" + }, + { + "confidence": 1.0, + "dst": "loc_north_basin", + "rel": "located_in", + "src": "user_quinn" + }, + { + "confidence": 1.0, + "dst": "org_emberline_security", + "rel": "works_at", + "src": "user_rhea" + }, + { + "confidence": 1.0, + "dst": "loc_foundry_row", + "rel": "located_in", + "src": "user_rhea" + }, + { + "confidence": 1.0, + "dst": "org_harborlight_transit", + "rel": "works_at", + "src": "user_soren" + }, + { + "confidence": 1.0, + "dst": "loc_uplink_yard", + "rel": "located_in", + "src": "user_soren" + }, + { + "confidence": 1.0, + "dst": "org_harborlight_transit", + "rel": "works_at", + "src": "user_tara" + }, + { + "confidence": 1.0, + "dst": "loc_uplink_yard", + "rel": "located_in", + "src": "user_tara" + }, + { + "confidence": 1.0, + "dst": "loc_sector9", + "rel": "operates_in", + "src": "org_helios_labs" + }, + { + "confidence": 1.0, + "dst": "loc_dockyard17", + "rel": "operates_in", + "src": "org_northbridge_logistics" + }, + { + "confidence": 1.0, + "dst": "loc_old_town", + "rel": "operates_in", + "src": "org_apex_dynamics" + }, + { + "confidence": 1.0, + "dst": "loc_old_town", + "rel": "operates_in", + "src": "org_blueharbor_media" + }, + { + "confidence": 1.0, + "dst": "loc_rivergate", + "rel": "operates_in", + "src": "org_tidewatch_ops" + }, + { + "confidence": 1.0, + "dst": "loc_rivergate", + "rel": "operates_in", + "src": "org_kestrel_works" + }, + { + "confidence": 1.0, + "dst": "loc_east_quay", + "rel": "operates_in", + "src": "org_atlas_freight" + }, + { + "confidence": 1.0, + "dst": "loc_sector9", + "rel": "operates_in", + "src": "org_sunmesh_analytics" + }, + { + "confidence": 1.0, + "dst": "loc_north_basin", + "rel": "operates_in", + "src": "org_orion_customs" + }, + { + "confidence": 1.0, + "dst": "loc_foundry_row", + "rel": "operates_in", + "src": "org_emberline_security" + }, + { + "confidence": 1.0, + "dst": "loc_uplink_yard", + "rel": "operates_in", + "src": "org_harborlight_transit" + }, + { + "confidence": 0.95, + "dst": "user_bharat", + "rel": "connected_to", + "src": "user_ivy" + }, + { + "confidence": 0.95, + "dst": "user_hiro", + "rel": "connected_to", + "src": "user_bharat" + }, + { + "confidence": 0.92, + "dst": "user_faris", + "rel": "connected_to", + "src": "user_hiro" + }, + { + "confidence": 0.9, + "dst": "user_diya", + "rel": "connected_to", + "src": "user_faris" + }, + { + "confidence": 0.89, + "dst": "user_elin", + "rel": "connected_to", + "src": "user_diya" + }, + { + "confidence": 0.87, + "dst": "user_aria", + "rel": "connected_to", + "src": "user_elin" + }, + { + "confidence": 0.84, + "dst": "user_cyrus", + "rel": "connected_to", + "src": "user_aria" + }, + { + "confidence": 0.83, + "dst": "user_gita", + "rel": "connected_to", + "src": "user_cyrus" + }, + { + "confidence": 0.82, + "dst": "user_jules", + "rel": "connected_to", + "src": "user_gita" + }, + { + "confidence": 0.81, + "dst": "user_bharat", + "rel": "connected_to", + "src": "user_jules" + }, + { + "confidence": 0.9, + "dst": "user_ivy", + "rel": "connected_to", + "src": "user_diya" + }, + { + "confidence": 0.86, + "dst": "user_elin", + "rel": "connected_to", + "src": "user_ivy" + }, + { + "confidence": 0.93, + "dst": "user_omar", + "rel": "connected_to", + "src": "user_kian" + }, + { + "confidence": 0.9, + "dst": "user_mika", + "rel": "connected_to", + "src": "user_omar" + }, + { + "confidence": 0.89, + "dst": "user_quinn", + "rel": "connected_to", + "src": "user_mika" + }, + { + "confidence": 0.88, + "dst": "user_nora", + "rel": "connected_to", + "src": "user_quinn" + }, + { + "confidence": 0.87, + "dst": "user_rhea", + "rel": "connected_to", + "src": "user_nora" + }, + { + "confidence": 0.86, + "dst": "user_soren", + "rel": "connected_to", + "src": "user_rhea" + }, + { + "confidence": 0.86, + "dst": "user_tara", + "rel": "connected_to", + "src": "user_soren" + }, + { + "confidence": 0.84, + "dst": "user_kian", + "rel": "connected_to", + "src": "user_tara" + }, + { + "confidence": 0.91, + "dst": "user_leena", + "rel": "connected_to", + "src": "user_priya" + }, + { + "confidence": 0.83, + "dst": "user_aria", + "rel": "connected_to", + "src": "user_leena" + }, + { + "confidence": 0.82, + "dst": "user_nora", + "rel": "connected_to", + "src": "user_priya" + }, + { + "confidence": 0.8, + "dst": "user_bharat", + "rel": "connected_to", + "src": "user_kian" + }, + { + "confidence": 0.79, + "dst": "user_faris", + "rel": "connected_to", + "src": "user_soren" + }, + { + "confidence": 0.78, + "dst": "user_hiro", + "rel": "connected_to", + "src": "user_quinn" + }, + { + "confidence": 1.0, + "dst": "post_midnight_manifest", + "rel": "authored_post", + "src": "alias_orchidfox" + }, + { + "confidence": 1.0, + "dst": "post_shift_roster", + "rel": "authored_post", + "src": "alias_docksparrow" + }, + { + "confidence": 1.0, + "dst": "post_sat_phone_ping", + "rel": "authored_post", + "src": "alias_nightrelay" + }, + { + "confidence": 1.0, + "dst": "post_drone_parts", + "rel": "authored_post", + "src": "alias_monsoonbyte" + }, + { + "confidence": 1.0, + "dst": "post_relay_schedule", + "rel": "authored_post", + "src": "alias_steelquill" + }, + { + "confidence": 1.0, + "dst": "post_quay_ledgers", + "rel": "authored_post", + "src": "alias_lanternmoth" + }, + { + "confidence": 1.0, + "dst": "post_customs_tag", + "rel": "authored_post", + "src": "alias_basinraven" + }, + { + "confidence": 1.0, + "dst": "post_hull_signal", + "rel": "authored_post", + "src": "alias_tideshard" + }, + { + "confidence": 1.0, + "dst": "post_basin_photo", + "rel": "authored_post", + "src": "alias_emberglass" + }, + { + "confidence": 1.0, + "dst": "post_foundry_map", + "rel": "authored_post", + "src": "alias_cinderveil" + }, + { + "confidence": 1.0, + "dst": "post_lantern_route", + "rel": "authored_post", + "src": "alias_frostledger" + }, + { + "confidence": 1.0, + "dst": "post_uplink_note", + "rel": "authored_post", + "src": "alias_sablekeel" + }, + { + "confidence": 1.0, + "dst": "loc_dockyard17", + "rel": "references", + "src": "post_midnight_manifest" + }, + { + "confidence": 1.0, + "dst": "event_project_lantern", + "rel": "references", + "src": "post_midnight_manifest" + }, + { + "confidence": 1.0, + "dst": "loc_dockyard17", + "rel": "references", + "src": "post_shift_roster" + }, + { + "confidence": 1.0, + "dst": "org_northbridge_logistics", + "rel": "references", + "src": "post_shift_roster" + }, + { + "confidence": 1.0, + "dst": "loc_rivergate", + "rel": "references", + "src": "post_sat_phone_ping" + }, + { + "confidence": 1.0, + "dst": "event_project_lantern", + "rel": "references", + "src": "post_sat_phone_ping" + }, + { + "confidence": 1.0, + "dst": "event_black_kite", + "rel": "references", + "src": "post_drone_parts" + }, + { + "confidence": 1.0, + "dst": "org_kestrel_works", + "rel": "references", + "src": "post_drone_parts" + }, + { + "confidence": 1.0, + "dst": "event_project_lantern", + "rel": "references", + "src": "post_relay_schedule" + }, + { + "confidence": 1.0, + "dst": "loc_sector9", + "rel": "references", + "src": "post_relay_schedule" + }, + { + "confidence": 1.0, + "dst": "loc_east_quay", + "rel": "references", + "src": "post_quay_ledgers" + }, + { + "confidence": 1.0, + "dst": "event_glass_harbor", + "rel": "references", + "src": "post_quay_ledgers" + }, + { + "confidence": 1.0, + "dst": "loc_north_basin", + "rel": "references", + "src": "post_customs_tag" + }, + { + "confidence": 1.0, + "dst": "event_iron_wharf", + "rel": "references", + "src": "post_customs_tag" + }, + { + "confidence": 1.0, + "dst": "loc_uplink_yard", + "rel": "references", + "src": "post_hull_signal" + }, + { + "confidence": 1.0, + "dst": "event_ghost_signal", + "rel": "references", + "src": "post_hull_signal" + }, + { + "confidence": 1.0, + "dst": "loc_foundry_row", + "rel": "references", + "src": "post_basin_photo" + }, + { + "confidence": 1.0, + "dst": "event_amber_veil", + "rel": "references", + "src": "post_basin_photo" + }, + { + "confidence": 1.0, + "dst": "loc_foundry_row", + "rel": "references", + "src": "post_foundry_map" + }, + { + "confidence": 1.0, + "dst": "event_ember_tide", + "rel": "references", + "src": "post_foundry_map" + }, + { + "confidence": 1.0, + "dst": "event_project_lantern", + "rel": "references", + "src": "post_lantern_route" + }, + { + "confidence": 1.0, + "dst": "org_sunmesh_analytics", + "rel": "references", + "src": "post_lantern_route" + }, + { + "confidence": 1.0, + "dst": "loc_uplink_yard", + "rel": "references", + "src": "post_uplink_note" + }, + { + "confidence": 1.0, + "dst": "org_harborlight_transit", + "rel": "references", + "src": "post_uplink_note" + }, + { + "confidence": 1.0, + "dst": "thr_supply_leak", + "rel": "authored_thread", + "src": "user_diya" + }, + { + "confidence": 1.0, + "dst": "thr_port_audit", + "rel": "authored_thread", + "src": "user_jules" + }, + { + "confidence": 1.0, + "dst": "thr_customs_breach", + "rel": "authored_thread", + "src": "user_mika" + }, + { + "confidence": 1.0, + "dst": "thr_relay_map", + "rel": "authored_thread", + "src": "user_leena" + }, + { + "confidence": 1.0, + "dst": "thr_foundry_watch", + "rel": "authored_thread", + "src": "user_nora" + }, + { + "confidence": 1.0, + "dst": "thr_basin_shift", + "rel": "authored_thread", + "src": "user_quinn" + }, + { + "confidence": 1.0, + "dst": "thr_quiet_manifest", + "rel": "authored_thread", + "src": "user_kian" + }, + { + "confidence": 1.0, + "dst": "thr_uplink_route", + "rel": "authored_thread", + "src": "user_soren" + }, + { + "confidence": 1.0, + "dst": "thr_ember_tide_watch", + "rel": "authored_thread", + "src": "user_rhea" + }, + { + "confidence": 1.0, + "dst": "thr_ghost_signal_net", + "rel": "authored_thread", + "src": "user_tara" + }, + { + "confidence": 1.0, + "dst": "event_project_lantern", + "rel": "discusses", + "src": "thr_supply_leak" + }, + { + "confidence": 1.0, + "dst": "org_northbridge_logistics", + "rel": "references", + "src": "thr_supply_leak" + }, + { + "confidence": 1.0, + "dst": "event_black_kite", + "rel": "discusses", + "src": "thr_port_audit" + }, + { + "confidence": 1.0, + "dst": "org_kestrel_works", + "rel": "references", + "src": "thr_port_audit" + }, + { + "confidence": 1.0, + "dst": "event_iron_wharf", + "rel": "discusses", + "src": "thr_customs_breach" + }, + { + "confidence": 1.0, + "dst": "org_orion_customs", + "rel": "references", + "src": "thr_customs_breach" + }, + { + "confidence": 1.0, + "dst": "event_project_lantern", + "rel": "discusses", + "src": "thr_relay_map" + }, + { + "confidence": 1.0, + "dst": "org_sunmesh_analytics", + "rel": "references", + "src": "thr_relay_map" + }, + { + "confidence": 1.0, + "dst": "event_ember_tide", + "rel": "discusses", + "src": "thr_foundry_watch" + }, + { + "confidence": 1.0, + "dst": "org_emberline_security", + "rel": "references", + "src": "thr_foundry_watch" + }, + { + "confidence": 1.0, + "dst": "event_amber_veil", + "rel": "discusses", + "src": "thr_basin_shift" + }, + { + "confidence": 1.0, + "dst": "loc_north_basin", + "rel": "references", + "src": "thr_basin_shift" + }, + { + "confidence": 1.0, + "dst": "event_glass_harbor", + "rel": "discusses", + "src": "thr_quiet_manifest" + }, + { + "confidence": 1.0, + "dst": "org_atlas_freight", + "rel": "references", + "src": "thr_quiet_manifest" + }, + { + "confidence": 1.0, + "dst": "event_ghost_signal", + "rel": "discusses", + "src": "thr_uplink_route" + }, + { + "confidence": 1.0, + "dst": "org_harborlight_transit", + "rel": "references", + "src": "thr_uplink_route" + }, + { + "confidence": 1.0, + "dst": "event_ember_tide", + "rel": "discusses", + "src": "thr_ember_tide_watch" + }, + { + "confidence": 1.0, + "dst": "loc_foundry_row", + "rel": "references", + "src": "thr_ember_tide_watch" + }, + { + "confidence": 1.0, + "dst": "event_ghost_signal", + "rel": "discusses", + "src": "thr_ghost_signal_net" + }, + { + "confidence": 1.0, + "dst": "loc_uplink_yard", + "rel": "references", + "src": "thr_ghost_signal_net" + }, + { + "confidence": 0.9, + "dst": "event_project_lantern", + "rel": "collaborates_on", + "src": "user_bharat" + }, + { + "confidence": 0.9, + "dst": "event_project_lantern", + "rel": "collaborates_on", + "src": "user_hiro" + }, + { + "confidence": 0.9, + "dst": "event_project_lantern", + "rel": "collaborates_on", + "src": "user_faris" + }, + { + "confidence": 0.9, + "dst": "event_project_lantern", + "rel": "investigates", + "src": "user_diya" + }, + { + "confidence": 0.9, + "dst": "event_project_lantern", + "rel": "monitors", + "src": "user_leena" + }, + { + "confidence": 0.9, + "dst": "event_black_kite", + "rel": "collaborates_on", + "src": "user_ivy" + }, + { + "confidence": 0.9, + "dst": "event_black_kite", + "rel": "collaborates_on", + "src": "user_cyrus" + }, + { + "confidence": 0.9, + "dst": "event_black_kite", + "rel": "investigates", + "src": "user_elin" + }, + { + "confidence": 0.9, + "dst": "event_black_kite", + "rel": "reports_on", + "src": "user_jules" + }, + { + "confidence": 0.9, + "dst": "event_glass_harbor", + "rel": "collaborates_on", + "src": "user_kian" + }, + { + "confidence": 0.9, + "dst": "event_glass_harbor", + "rel": "collaborates_on", + "src": "user_omar" + }, + { + "confidence": 0.9, + "dst": "event_glass_harbor", + "rel": "monitors", + "src": "user_priya" + }, + { + "confidence": 0.9, + "dst": "event_iron_wharf", + "rel": "collaborates_on", + "src": "user_mika" + }, + { + "confidence": 0.9, + "dst": "event_iron_wharf", + "rel": "collaborates_on", + "src": "user_quinn" + }, + { + "confidence": 0.9, + "dst": "event_amber_veil", + "rel": "investigates", + "src": "user_nora" + }, + { + "confidence": 0.9, + "dst": "event_ember_tide", + "rel": "collaborates_on", + "src": "user_rhea" + }, + { + "confidence": 0.9, + "dst": "event_ghost_signal", + "rel": "collaborates_on", + "src": "user_soren" + }, + { + "confidence": 0.9, + "dst": "event_ghost_signal", + "rel": "reports_on", + "src": "user_tara" + }, + { + "confidence": 0.9, + "dst": "event_silent_current", + "rel": "monitors", + "src": "user_gita" + }, + { + "confidence": 0.9, + "dst": "event_silent_current", + "rel": "reports_on", + "src": "user_jules" + }, + { + "confidence": 0.77, + "dst": "event_glass_harbor", + "rel": "connected_to", + "src": "event_project_lantern" + }, + { + "confidence": 0.77, + "dst": "event_amber_veil", + "rel": "connected_to", + "src": "event_black_kite" + }, + { + "confidence": 0.77, + "dst": "event_ghost_signal", + "rel": "connected_to", + "src": "event_ember_tide" + }, + { + "confidence": 0.77, + "dst": "org_northbridge_logistics", + "rel": "connected_to", + "src": "org_atlas_freight" + }, + { + "confidence": 0.77, + "dst": "org_emberline_security", + "rel": "connected_to", + "src": "org_orion_customs" + }, + { + "confidence": 0.77, + "dst": "org_tidewatch_ops", + "rel": "connected_to", + "src": "org_harborlight_transit" + } + ], + "node_count": 118, + "nodes": [ + { + "attrs": { + "handle": "@alias_12_827" + }, + "node_id": "alias_12_827", + "node_type": "alias" + }, + { + "attrs": { + "handle": "@alias_21_334" + }, + "node_id": "alias_21_334", + "node_type": "alias" + }, + { + "attrs": { + "handle": "@alias_5_936" + }, + "node_id": "alias_5_936", + "node_type": "alias" + }, + { + "attrs": { + "handle": "@alias_6_801" + }, + "node_id": "alias_6_801", + "node_type": "alias" + }, + { + "attrs": { + "handle": "@basinraven" + }, + "node_id": "alias_basinraven", + "node_type": "alias" + }, + { + "attrs": { + "handle": "@cinderveil" + }, + "node_id": "alias_cinderveil", + "node_type": "alias" + }, + { + "attrs": { + "handle": "@docksparrow" + }, + "node_id": "alias_docksparrow", + "node_type": "alias" + }, + { + "attrs": { + "handle": "@emberglass" + }, + "node_id": "alias_emberglass", + "node_type": "alias" + }, + { + "attrs": { + "handle": "@frostledger" + }, + "node_id": "alias_frostledger", + "node_type": "alias" + }, + { + "attrs": { + "handle": "@hollowsignal" + }, + "node_id": "alias_hollowsignal", + "node_type": "alias" + }, + { + "attrs": { + "handle": "@ironwhisper" + }, + "node_id": "alias_ironwhisper", + "node_type": "alias" + }, + { + "attrs": { + "handle": "@lanternmoth" + }, + "node_id": "alias_lanternmoth", + "node_type": "alias" + }, + { + "attrs": { + "handle": "@mapleghost" + }, + "node_id": "alias_mapleghost", + "node_type": "alias" + }, + { + "attrs": { + "handle": "@monsoonbyte" + }, + "node_id": "alias_monsoonbyte", + "node_type": "alias" + }, + { + "attrs": { + "handle": "@nightrelay" + }, + "node_id": "alias_nightrelay", + "node_type": "alias" + }, + { + "attrs": { + "handle": "@orchidfox" + }, + "node_id": "alias_orchidfox", + "node_type": "alias" + }, + { + "attrs": { + "handle": "@quartzlotus" + }, + "node_id": "alias_quartzlotus", + "node_type": "alias" + }, + { + "attrs": { + "handle": "@sablekeel" + }, + "node_id": "alias_sablekeel", + "node_type": "alias" + }, + { + "attrs": { + "handle": "@steelquill" + }, + "node_id": "alias_steelquill", + "node_type": "alias" + }, + { + "attrs": { + "handle": "@tideshard" + }, + "node_id": "alias_tideshard", + "node_type": "alias" + }, + { + "attrs": { + "name": "Amber Veil" + }, + "node_id": "event_amber_veil", + "node_type": "event" + }, + { + "attrs": { + "name": "Black Kite" + }, + "node_id": "event_black_kite", + "node_type": "event" + }, + { + "attrs": { + "name": "Ember Tide" + }, + "node_id": "event_ember_tide", + "node_type": "event" + }, + { + "attrs": { + "name": "Ghost Signal" + }, + "node_id": "event_ghost_signal", + "node_type": "event" + }, + { + "attrs": { + "name": "Glass Harbor" + }, + "node_id": "event_glass_harbor", + "node_type": "event" + }, + { + "attrs": { + "name": "Iron Wharf" + }, + "node_id": "event_iron_wharf", + "node_type": "event" + }, + { + "attrs": { + "name": "Project Lantern" + }, + "node_id": "event_project_lantern", + "node_type": "event" + }, + { + "attrs": { + "name": "Silent Current" + }, + "node_id": "event_silent_current", + "node_type": "event" + }, + { + "attrs": { + "name": "Bengaluru" + }, + "node_id": "loc_bengaluru", + "node_type": "location" + }, + { + "attrs": { + "name": "Delhi" + }, + "node_id": "loc_delhi", + "node_type": "location" + }, + { + "attrs": { + "name": "Dockyard 17" + }, + "node_id": "loc_dockyard17", + "node_type": "location" + }, + { + "attrs": { + "name": "East Quay" + }, + "node_id": "loc_east_quay", + "node_type": "location" + }, + { + "attrs": { + "name": "Foundry Row" + }, + "node_id": "loc_foundry_row", + "node_type": "location" + }, + { + "attrs": { + "name": "Hyderabad" + }, + "node_id": "loc_hyderabad", + "node_type": "location" + }, + { + "attrs": { + "name": "North Basin" + }, + "node_id": "loc_north_basin", + "node_type": "location" + }, + { + "attrs": { + "name": "Old Town" + }, + "node_id": "loc_old_town", + "node_type": "location" + }, + { + "attrs": { + "name": "Pune" + }, + "node_id": "loc_pune", + "node_type": "location" + }, + { + "attrs": { + "name": "Rivergate" + }, + "node_id": "loc_rivergate", + "node_type": "location" + }, + { + "attrs": { + "name": "Sector 9" + }, + "node_id": "loc_sector9", + "node_type": "location" + }, + { + "attrs": { + "name": "Uplink Yard" + }, + "node_id": "loc_uplink_yard", + "node_type": "location" + }, + { + "attrs": { + "name": "Apex Dynamics" + }, + "node_id": "org_apex_dynamics", + "node_type": "org" + }, + { + "attrs": { + "name": "Atlas Freight" + }, + "node_id": "org_atlas_freight", + "node_type": "org" + }, + { + "attrs": { + "name": "Blueharbor Media" + }, + "node_id": "org_blueharbor_media", + "node_type": "org" + }, + { + "attrs": { + "name": "Emberline Security" + }, + "node_id": "org_emberline_security", + "node_type": "org" + }, + { + "attrs": { + "name": "Harborlight Transit" + }, + "node_id": "org_harborlight_transit", + "node_type": "org" + }, + { + "attrs": { + "name": "Helios Labs" + }, + "node_id": "org_helios_labs", + "node_type": "org" + }, + { + "attrs": { + "name": "Kestrel Works" + }, + "node_id": "org_kestrel_works", + "node_type": "org" + }, + { + "attrs": { + "name": "Northbridge" + }, + "node_id": "org_northbridge", + "node_type": "org" + }, + { + "attrs": { + "name": "Northbridge Logistics" + }, + "node_id": "org_northbridge_logistics", + "node_type": "org" + }, + { + "attrs": { + "name": "Orion Customs" + }, + "node_id": "org_orion_customs", + "node_type": "org" + }, + { + "attrs": { + "name": "Sunmesh Analytics" + }, + "node_id": "org_sunmesh_analytics", + "node_type": "org" + }, + { + "attrs": { + "name": "Tidewatch Ops" + }, + "node_id": "org_tidewatch_ops", + "node_type": "org" + }, + { + "attrs": { + "channel": "microblog" + }, + "node_id": "post_basin_photo", + "node_type": "post" + }, + { + "attrs": { + "channel": "microblog" + }, + "node_id": "post_customs_tag", + "node_type": "post" + }, + { + "attrs": { + "channel": "microblog" + }, + "node_id": "post_drone_parts", + "node_type": "post" + }, + { + "attrs": { + "channel": "microblog" + }, + "node_id": "post_foundry_map", + "node_type": "post" + }, + { + "attrs": { + "channel": "microblog" + }, + "node_id": "post_hull_signal", + "node_type": "post" + }, + { + "attrs": { + "channel": "microblog" + }, + "node_id": "post_lantern_route", + "node_type": "post" + }, + { + "attrs": { + "channel": "microblog" + }, + "node_id": "post_midnight_manifest", + "node_type": "post" + }, + { + "attrs": { + "channel": "microblog" + }, + "node_id": "post_quay_ledgers", + "node_type": "post" + }, + { + "attrs": { + "channel": "microblog" + }, + "node_id": "post_relay_schedule", + "node_type": "post" + }, + { + "attrs": { + "channel": "microblog" + }, + "node_id": "post_sat_phone_ping", + "node_type": "post" + }, + { + "attrs": { + "channel": "microblog" + }, + "node_id": "post_shift_roster", + "node_type": "post" + }, + { + "attrs": { + "channel": "microblog" + }, + "node_id": "post_uplink_note", + "node_type": "post" + }, + { + "attrs": { + "topic": "basin_shift" + }, + "node_id": "thr_basin_shift", + "node_type": "thread" + }, + { + "attrs": { + "topic": "customs_breach" + }, + "node_id": "thr_customs_breach", + "node_type": "thread" + }, + { + "attrs": { + "topic": "ember_tide" + }, + "node_id": "thr_ember_tide_watch", + "node_type": "thread" + }, + { + "attrs": { + "topic": "foundry_watch" + }, + "node_id": "thr_foundry_watch", + "node_type": "thread" + }, + { + "attrs": { + "topic": "ghost_signal" + }, + "node_id": "thr_ghost_signal_net", + "node_type": "thread" + }, + { + "attrs": { + "topic": "port_audit" + }, + "node_id": "thr_port_audit", + "node_type": "thread" + }, + { + "attrs": { + "topic": "quiet_manifest" + }, + "node_id": "thr_quiet_manifest", + "node_type": "thread" + }, + { + "attrs": { + "topic": "relay_map" + }, + "node_id": "thr_relay_map", + "node_type": "thread" + }, + { + "attrs": { + "topic": "supply_chain" + }, + "node_id": "thr_supply_leak", + "node_type": "thread" + }, + { + "attrs": { + "topic": "uplink_route" + }, + "node_id": "thr_uplink_route", + "node_type": "thread" + }, + { + "attrs": { + "location": "Hyderabad", + "name": "Person 0", + "org": "Apex Dynamics" + }, + "node_id": "user_0", + "node_type": "user" + }, + { + "attrs": { + "location": "Bengaluru", + "name": "Person 1", + "org": "Northbridge" + }, + "node_id": "user_1", + "node_type": "user" + }, + { + "attrs": { + "location": "Bengaluru", + "name": "Person 10", + "org": "Northbridge" + }, + "node_id": "user_10", + "node_type": "user" + }, + { + "attrs": { + "location": "Hyderabad", + "name": "Person 11", + "org": "Apex Dynamics" + }, + "node_id": "user_11", + "node_type": "user" + }, + { + "attrs": { + "location": "Hyderabad", + "name": "Person 12", + "org": "Apex Dynamics" + }, + "node_id": "user_12", + "node_type": "user" + }, + { + "attrs": { + "location": "Hyderabad", + "name": "Person 13", + "org": "Helios Labs" + }, + "node_id": "user_13", + "node_type": "user" + }, + { + "attrs": { + "location": "Delhi", + "name": "Person 14", + "org": "Apex Dynamics" + }, + "node_id": "user_14", + "node_type": "user" + }, + { + "attrs": { + "location": "Delhi", + "name": "Person 15", + "org": "Northbridge" + }, + "node_id": "user_15", + "node_type": "user" + }, + { + "attrs": { + "location": "Delhi", + "name": "Person 16", + "org": "Helios Labs" + }, + "node_id": "user_16", + "node_type": "user" + }, + { + "attrs": { + "location": "Hyderabad", + "name": "Person 17", + "org": "Northbridge" + }, + "node_id": "user_17", + "node_type": "user" + }, + { + "attrs": { + "location": "Delhi", + "name": "Person 18", + "org": "Northbridge" + }, + "node_id": "user_18", + "node_type": "user" + }, + { + "attrs": { + "location": "Pune", + "name": "Person 19", + "org": "Apex Dynamics" + }, + "node_id": "user_19", + "node_type": "user" + }, + { + "attrs": { + "location": "Delhi", + "name": "Person 2", + "org": "Northbridge" + }, + "node_id": "user_2", + "node_type": "user" + }, + { + "attrs": { + "location": "Bengaluru", + "name": "Person 20", + "org": "Apex Dynamics" + }, + "node_id": "user_20", + "node_type": "user" + }, + { + "attrs": { + "location": "Delhi", + "name": "Person 21", + "org": "Helios Labs" + }, + "node_id": "user_21", + "node_type": "user" + }, + { + "attrs": { + "location": "Delhi", + "name": "Person 22", + "org": "Apex Dynamics" + }, + "node_id": "user_22", + "node_type": "user" + }, + { + "attrs": { + "location": "Hyderabad", + "name": "Person 23", + "org": "Northbridge" + }, + "node_id": "user_23", + "node_type": "user" + }, + { + "attrs": { + "location": "Delhi", + "name": "Person 3", + "org": "Northbridge" + }, + "node_id": "user_3", + "node_type": "user" + }, + { + "attrs": { + "location": "Delhi", + "name": "Person 4", + "org": "Northbridge" + }, + "node_id": "user_4", + "node_type": "user" + }, + { + "attrs": { + "location": "Bengaluru", + "name": "Person 5", + "org": "Northbridge" + }, + "node_id": "user_5", + "node_type": "user" + }, + { + "attrs": { + "location": "Delhi", + "name": "Person 6", + "org": "Apex Dynamics" + }, + "node_id": "user_6", + "node_type": "user" + }, + { + "attrs": { + "location": "Hyderabad", + "name": "Person 7", + "org": "Helios Labs" + }, + "node_id": "user_7", + "node_type": "user" + }, + { + "attrs": { + "location": "Hyderabad", + "name": "Person 8", + "org": "Helios Labs" + }, + "node_id": "user_8", + "node_type": "user" + }, + { + "attrs": { + "location": "Delhi", + "name": "Person 9", + "org": "Helios Labs" + }, + "node_id": "user_9", + "node_type": "user" + }, + { + "attrs": { + "location": "Sector 9", + "name": "Aria Sen", + "org": "Helios Labs" + }, + "node_id": "user_aria", + "node_type": "user" + }, + { + "attrs": { + "location": "Dockyard 17", + "name": "Bharat Kulkarni", + "org": "Northbridge Logistics" + }, + "node_id": "user_bharat", + "node_type": "user" + }, + { + "attrs": { + "location": "Old Town", + "name": "Cyrus Mehta", + "org": "Apex Dynamics" + }, + "node_id": "user_cyrus", + "node_type": "user" + }, + { + "attrs": { + "location": "Old Town", + "name": "Diya Roy", + "org": "Blueharbor Media" + }, + "node_id": "user_diya", + "node_type": "user" + }, + { + "attrs": { + "location": "Sector 9", + "name": "Elin Das", + "org": "Helios Labs" + }, + "node_id": "user_elin", + "node_type": "user" + }, + { + "attrs": { + "location": "Rivergate", + "name": "Faris Noor", + "org": "Tidewatch Ops" + }, + "node_id": "user_faris", + "node_type": "user" + }, + { + "attrs": { + "location": "Old Town", + "name": "Gita Pradhan", + "org": "Apex Dynamics" + }, + "node_id": "user_gita", + "node_type": "user" + }, + { + "attrs": { + "location": "Dockyard 17", + "name": "Hiro Tan", + "org": "Northbridge Logistics" + }, + "node_id": "user_hiro", + "node_type": "user" + }, + { + "attrs": { + "location": "Rivergate", + "name": "Ivy Kapoor", + "org": "Kestrel Works" + }, + "node_id": "user_ivy", + "node_type": "user" + }, + { + "attrs": { + "location": "Old Town", + "name": "Jules Banerjee", + "org": "Blueharbor Media" + }, + "node_id": "user_jules", + "node_type": "user" + }, + { + "attrs": { + "location": "East Quay", + "name": "Kian Bose", + "org": "Atlas Freight" + }, + "node_id": "user_kian", + "node_type": "user" + }, + { + "attrs": { + "location": "Sector 9", + "name": "Leena Das", + "org": "Sunmesh Analytics" + }, + "node_id": "user_leena", + "node_type": "user" + }, + { + "attrs": { + "location": "North Basin", + "name": "Mika Solanki", + "org": "Orion Customs" + }, + "node_id": "user_mika", + "node_type": "user" + }, + { + "attrs": { + "location": "Foundry Row", + "name": "Nora Iqbal", + "org": "Emberline Security" + }, + "node_id": "user_nora", + "node_type": "user" + }, + { + "attrs": { + "location": "East Quay", + "name": "Omar Sheikh", + "org": "Atlas Freight" + }, + "node_id": "user_omar", + "node_type": "user" + }, + { + "attrs": { + "location": "Sector 9", + "name": "Priya Menon", + "org": "Sunmesh Analytics" + }, + "node_id": "user_priya", + "node_type": "user" + }, + { + "attrs": { + "location": "North Basin", + "name": "Quinn Rao", + "org": "Orion Customs" + }, + "node_id": "user_quinn", + "node_type": "user" + }, + { + "attrs": { + "location": "Foundry Row", + "name": "Rhea Kapoor", + "org": "Emberline Security" + }, + "node_id": "user_rhea", + "node_type": "user" + }, + { + "attrs": { + "location": "Uplink Yard", + "name": "Soren Malik", + "org": "Harborlight Transit" + }, + "node_id": "user_soren", + "node_type": "user" + }, + { + "attrs": { + "location": "Uplink Yard", + "name": "Tara Dey", + "org": "Harborlight Transit" + }, + "node_id": "user_tara", + "node_type": "user" + } + ] + }, + "dataset_name": "fixed_levels_submission_set", + "difficulty_counts": { + "easy": 10, + "high": 10, + "mid": 10 + }, + "environment": { + "alias_density": 0.2, + "n_users": 24, + "noise_level": 0.12, + "red_herring_rate": 0.08, + "seed": 2026 + }, + "generation_mode": "llm_expanded", + "llm": { + "max_tokens": 384, + "model": "qwen3:2b", + "ollama_base_url": "http://127.0.0.1:11434", + "openai_api_key": "", + "openai_api_key_env": "OPENAI_API_KEY", + "openai_base_url": "https://api.openai.com/v1", + "provider": "ollama", + "temperature": 0.05, + "timeout_seconds": 240 + }, + "platform_views": { + "counts": { + "forum_threads": 8, + "microblog_posts": 44, + "profiles": 47 + }, + "forum_threads": [ + { + "author_id": "user_17", + "comments": [ + { + "text": "Following this.", + "user_id": "user_6" + }, + { + "text": "Interesting link.", + "user_id": "user_16" + } + ], + "thread_id": "thr_0", + "topic": "startup" + }, + { + "author_id": "user_6", + "comments": [ + { + "text": "Following this.", + "user_id": "user_10" + }, + { + "text": "Interesting link.", + "user_id": "user_6" + } + ], + "thread_id": "thr_1", + "topic": "infra" + }, + { + "author_id": "user_tara", + "comments": [ + { + "text": "Following this.", + "user_id": "user_22" + }, + { + "text": "Interesting link.", + "user_id": "user_11" + } + ], + "thread_id": "thr_2", + "topic": "security" + }, + { + "author_id": "user_rhea", + "comments": [ + { + "text": "Following this.", + "user_id": "user_ivy" + }, + { + "text": "Interesting link.", + "user_id": "user_20" + } + ], + "thread_id": "thr_3", + "topic": "security" + }, + { + "author_id": "user_17", + "comments": [ + { + "text": "Following this.", + "user_id": "user_18" + }, + { + "text": "Interesting link.", + "user_id": "user_20" + } + ], + "thread_id": "thr_4", + "topic": "security" + }, + { + "author_id": "user_10", + "comments": [ + { + "text": "Following this.", + "user_id": "user_16" + }, + { + "text": "Interesting link.", + "user_id": "user_4" + } + ], + "thread_id": "thr_5", + "topic": "ai" + }, + { + "author_id": "user_0", + "comments": [ + { + "text": "Following this.", + "user_id": "user_16" + }, + { + "text": "Interesting link.", + "user_id": "user_cyrus" + } + ], + "thread_id": "thr_6", + "topic": "infra" + }, + { + "author_id": "user_0", + "comments": [ + { + "text": "Following this.", + "user_id": "user_6" + }, + { + "text": "Interesting link.", + "user_id": "user_15" + } + ], + "thread_id": "thr_7", + "topic": "security" + } + ], + "microblog_posts": [ + { + "canonical_user": "user_0", + "mentions": [ + "user_15" + ], + "post_id": "post_0", + "text": "Update 0 from Apex Dynamics #hyderabad", + "timestamp": 1000, + "user_id": "user_0" + }, + { + "canonical_user": "user_1", + "mentions": [ + "user_1" + ], + "post_id": "post_1", + "text": "Update 1 from Northbridge #bengaluru", + "timestamp": 1001, + "user_id": "user_1" + }, + { + "canonical_user": "user_2", + "mentions": [ + "user_3" + ], + "post_id": "post_2", + "text": "Update 2 from Northbridge #delhi", + "timestamp": 1002, + "user_id": "user_2" + }, + { + "canonical_user": "user_rhea", + "mentions": [ + "user_5" + ], + "post_id": "post_3", + "text": "Update 3 from Northbridge #delhi", + "timestamp": 1003, + "user_id": "alias_cinderveil" + }, + { + "canonical_user": "user_4", + "mentions": [ + "user_15" + ], + "post_id": "post_4", + "text": "Update 4 from Northbridge #delhi", + "timestamp": 1004, + "user_id": "user_4" + }, + { + "canonical_user": "user_5", + "mentions": [ + "user_15" + ], + "post_id": "post_5", + "text": "Update 5 from Northbridge #bengaluru", + "timestamp": 1005, + "user_id": "user_5" + }, + { + "canonical_user": "user_diya", + "mentions": [ + "user_13" + ], + "post_id": "post_6", + "text": "Update 6 from Apex Dynamics #delhi", + "timestamp": 1006, + "user_id": "alias_monsoonbyte" + }, + { + "canonical_user": "user_5", + "mentions": [ + "user_14" + ], + "post_id": "post_7", + "text": "Rumor: Update 7 from Helios Labs #hyderabad maybe fake", + "timestamp": 1007, + "user_id": "alias_5_936" + }, + { + "canonical_user": "user_8", + "mentions": [ + "user_2" + ], + "post_id": "post_8", + "text": "Update 8 from Helios Labs #hyderabad", + "timestamp": 1008, + "user_id": "user_8" + }, + { + "canonical_user": "user_9", + "mentions": [ + "user_3" + ], + "post_id": "post_9", + "text": "Update 9 from Helios Labs #delhi", + "timestamp": 1009, + "user_id": "user_9" + }, + { + "canonical_user": "user_10", + "mentions": [ + "user_8" + ], + "post_id": "post_10", + "text": "Update 10 from Northbridge #bengaluru", + "timestamp": 1010, + "user_id": "user_10" + }, + { + "canonical_user": "user_11", + "mentions": [ + "user_4" + ], + "post_id": "post_11", + "text": "Update 11 from Apex Dynamics #hyderabad", + "timestamp": 1011, + "user_id": "user_11" + }, + { + "canonical_user": "user_kian", + "mentions": [ + "user_15" + ], + "post_id": "post_12", + "text": "Update 12 from Apex Dynamics #hyderabad", + "timestamp": 1012, + "user_id": "alias_lanternmoth" + }, + { + "canonical_user": "user_13", + "mentions": [ + "user_17" + ], + "post_id": "post_13", + "text": "Update 13 from Helios Labs #hyderabad", + "timestamp": 1013, + "user_id": "user_13" + }, + { + "canonical_user": "user_kian", + "mentions": [ + "user_19" + ], + "post_id": "post_14", + "text": "Update 14 from Apex Dynamics #delhi", + "timestamp": 1014, + "user_id": "alias_lanternmoth" + }, + { + "canonical_user": "user_15", + "mentions": [ + "user_2" + ], + "post_id": "post_15", + "text": "Update 15 from Northbridge #delhi", + "timestamp": 1015, + "user_id": "user_15" + }, + { + "canonical_user": "user_bharat", + "mentions": [ + "user_18" + ], + "post_id": "post_16", + "text": "Update 16 from Helios Labs #delhi", + "timestamp": 1016, + "user_id": "alias_steelquill" + }, + { + "canonical_user": "user_soren", + "mentions": [ + "user_4" + ], + "post_id": "post_17", + "text": "Update 17 from Northbridge #hyderabad", + "timestamp": 1017, + "user_id": "alias_tideshard" + }, + { + "canonical_user": "user_18", + "mentions": [ + "user_0" + ], + "post_id": "post_18", + "text": "Update 18 from Northbridge #delhi", + "timestamp": 1018, + "user_id": "user_18" + }, + { + "canonical_user": "user_19", + "mentions": [ + "user_2" + ], + "post_id": "post_19", + "text": "Update 19 from Apex Dynamics #pune", + "timestamp": 1019, + "user_id": "user_19" + }, + { + "canonical_user": "user_nora", + "mentions": [ + "user_0" + ], + "post_id": "post_20", + "text": "Update 20 from Apex Dynamics #bengaluru", + "timestamp": 1020, + "user_id": "alias_emberglass" + }, + { + "canonical_user": "user_omar", + "mentions": [ + "user_9" + ], + "post_id": "post_21", + "text": "Update 21 from Helios Labs #delhi", + "timestamp": 1021, + "user_id": "alias_ironwhisper" + }, + { + "canonical_user": "user_22", + "mentions": [ + "user_15" + ], + "post_id": "post_22", + "text": "Update 22 from Apex Dynamics #delhi", + "timestamp": 1022, + "user_id": "user_22" + }, + { + "canonical_user": "user_23", + "mentions": [ + "user_5" + ], + "post_id": "post_23", + "text": "Update 23 from Northbridge #hyderabad", + "timestamp": 1023, + "user_id": "user_23" + }, + { + "canonical_user": "user_rhea", + "mentions": [ + "user_19" + ], + "post_id": "post_24", + "text": "Update 24 from Helios Labs #sector 9", + "timestamp": 1024, + "user_id": "alias_cinderveil" + }, + { + "canonical_user": "user_leena", + "mentions": [ + "user_22" + ], + "post_id": "post_25", + "text": "Update 25 from Northbridge Logistics #dockyard 17", + "timestamp": 1025, + "user_id": "alias_frostledger" + }, + { + "canonical_user": "user_mika", + "mentions": [ + "user_20" + ], + "post_id": "post_26", + "text": "Rumor: Update 26 from Apex Dynamics #old town maybe fake", + "timestamp": 1026, + "user_id": "alias_basinraven" + }, + { + "canonical_user": "user_diya", + "mentions": [ + "user_17" + ], + "post_id": "post_27", + "text": "Rumor: Update 27 from Blueharbor Media #old town maybe fake", + "timestamp": 1027, + "user_id": "user_diya" + }, + { + "canonical_user": "user_elin", + "mentions": [ + "user_20" + ], + "post_id": "post_28", + "text": "Rumor: Update 28 from Helios Labs #sector 9 maybe fake", + "timestamp": 1028, + "user_id": "user_elin" + }, + { + "canonical_user": "user_5", + "mentions": [ + "user_16" + ], + "post_id": "post_29", + "text": "Update 29 from Tidewatch Ops #rivergate", + "timestamp": 1029, + "user_id": "alias_5_936" + }, + { + "canonical_user": "user_rhea", + "mentions": [ + "user_4" + ], + "post_id": "post_30", + "text": "Update 30 from Apex Dynamics #old town", + "timestamp": 1030, + "user_id": "alias_cinderveil" + }, + { + "canonical_user": "user_bharat", + "mentions": [ + "user_13" + ], + "post_id": "post_31", + "text": "Update 31 from Northbridge Logistics #dockyard 17", + "timestamp": 1031, + "user_id": "alias_steelquill" + }, + { + "canonical_user": "user_ivy", + "mentions": [ + "user_12" + ], + "post_id": "post_32", + "text": "Update 32 from Kestrel Works #rivergate", + "timestamp": 1032, + "user_id": "user_ivy" + }, + { + "canonical_user": "user_nora", + "mentions": [ + "user_14" + ], + "post_id": "post_33", + "text": "Update 33 from Blueharbor Media #old town", + "timestamp": 1033, + "user_id": "alias_emberglass" + }, + { + "canonical_user": "user_kian", + "mentions": [ + "user_13" + ], + "post_id": "post_34", + "text": "Update 34 from Atlas Freight #east quay", + "timestamp": 1034, + "user_id": "user_kian" + }, + { + "canonical_user": "user_bharat", + "mentions": [ + "user_15" + ], + "post_id": "post_35", + "text": "Update 35 from Sunmesh Analytics #sector 9", + "timestamp": 1035, + "user_id": "alias_steelquill" + }, + { + "canonical_user": "user_rhea", + "mentions": [ + "user_8" + ], + "post_id": "post_36", + "text": "Update 36 from Orion Customs #north basin", + "timestamp": 1036, + "user_id": "alias_cinderveil" + }, + { + "canonical_user": "user_nora", + "mentions": [ + "user_7" + ], + "post_id": "post_37", + "text": "Update 37 from Emberline Security #foundry row", + "timestamp": 1037, + "user_id": "user_nora" + }, + { + "canonical_user": "user_12", + "mentions": [ + "user_20" + ], + "post_id": "post_38", + "text": "Update 38 from Atlas Freight #east quay", + "timestamp": 1038, + "user_id": "alias_12_827" + }, + { + "canonical_user": "user_tara", + "mentions": [ + "user_21" + ], + "post_id": "post_39", + "text": "Update 39 from Sunmesh Analytics #sector 9", + "timestamp": 1039, + "user_id": "alias_sablekeel" + }, + { + "canonical_user": "user_5", + "mentions": [ + "user_5" + ], + "post_id": "post_40", + "text": "Update 40 from Orion Customs #north basin", + "timestamp": 1040, + "user_id": "alias_5_936" + }, + { + "canonical_user": "user_kian", + "mentions": [ + "user_6" + ], + "post_id": "post_41", + "text": "Update 41 from Emberline Security #foundry row", + "timestamp": 1041, + "user_id": "alias_lanternmoth" + }, + { + "canonical_user": "user_elin", + "mentions": [ + "user_18" + ], + "post_id": "post_42", + "text": "Update 42 from Harborlight Transit #uplink yard", + "timestamp": 1042, + "user_id": "alias_mapleghost" + }, + { + "canonical_user": "user_tara", + "mentions": [ + "user_18" + ], + "post_id": "post_43", + "text": "Update 43 from Harborlight Transit #uplink yard", + "timestamp": 1043, + "user_id": "user_tara" + } + ], + "profiles": [ + { + "connections": [ + "user_1" + ], + "location": "Hyderabad", + "name": "Person 0", + "org": "Apex Dynamics", + "user_id": "user_0", + "work_history": [ + "Apex Dynamics" + ] + }, + { + "connections": [], + "location": "Bengaluru", + "name": "Person 1", + "org": "Northbridge", + "user_id": "user_1", + "work_history": [ + "Northbridge" + ] + }, + { + "connections": [], + "location": "Delhi", + "name": "Person 2", + "org": "Northbridge", + "user_id": "user_2", + "work_history": [ + "Northbridge" + ] + }, + { + "connections": [], + "location": "Delhi", + "name": "Person 3", + "org": "Northbridge", + "user_id": "user_3", + "work_history": [ + "Northbridge" + ] + }, + { + "connections": [ + "user_20" + ], + "location": "Delhi", + "name": "Person 4", + "org": "Northbridge", + "user_id": "user_4", + "work_history": [ + "Northbridge" + ] + }, + { + "connections": [], + "location": "Bengaluru", + "name": "Person 5", + "org": "Northbridge", + "user_id": "user_5", + "work_history": [ + "Northbridge" + ] + }, + { + "connections": [], + "location": "Delhi", + "name": "Person 6", + "org": "Apex Dynamics", + "user_id": "user_6", + "work_history": [ + "Apex Dynamics" + ] + }, + { + "connections": [], + "location": "Hyderabad", + "name": "Person 7", + "org": "Helios Labs", + "user_id": "user_7", + "work_history": [ + "Helios Labs" + ] + }, + { + "connections": [ + "user_22" + ], + "location": "Hyderabad", + "name": "Person 8", + "org": "Helios Labs", + "user_id": "user_8", + "work_history": [ + "Helios Labs" + ] + }, + { + "connections": [ + "user_8" + ], + "location": "Delhi", + "name": "Person 9", + "org": "Helios Labs", + "user_id": "user_9", + "work_history": [ + "Helios Labs" + ] + }, + { + "connections": [], + "location": "Bengaluru", + "name": "Person 10", + "org": "Northbridge", + "user_id": "user_10", + "work_history": [ + "Northbridge" + ] + }, + { + "connections": [ + "user_6" + ], + "location": "Hyderabad", + "name": "Person 11", + "org": "Apex Dynamics", + "user_id": "user_11", + "work_history": [ + "Apex Dynamics" + ] + }, + { + "connections": [], + "location": "Hyderabad", + "name": "Person 12", + "org": "Apex Dynamics", + "user_id": "user_12", + "work_history": [ + "Apex Dynamics" + ] + }, + { + "connections": [ + "user_6" + ], + "location": "Hyderabad", + "name": "Person 13", + "org": "Helios Labs", + "user_id": "user_13", + "work_history": [ + "Helios Labs" + ] + }, + { + "connections": [], + "location": "Delhi", + "name": "Person 14", + "org": "Apex Dynamics", + "user_id": "user_14", + "work_history": [ + "Apex Dynamics" + ] + }, + { + "connections": [ + "user_12" + ], + "location": "Delhi", + "name": "Person 15", + "org": "Northbridge", + "user_id": "user_15", + "work_history": [ + "Northbridge" + ] + }, + { + "connections": [ + "user_22", + "user_15" + ], + "location": "Delhi", + "name": "Person 16", + "org": "Helios Labs", + "user_id": "user_16", + "work_history": [ + "Helios Labs" + ] + }, + { + "connections": [ + "user_20" + ], + "location": "Hyderabad", + "name": "Person 17", + "org": "Northbridge", + "user_id": "user_17", + "work_history": [ + "Northbridge" + ] + }, + { + "connections": [], + "location": "Delhi", + "name": "Person 18", + "org": "Northbridge", + "user_id": "user_18", + "work_history": [ + "Northbridge" + ] + }, + { + "connections": [ + "user_14" + ], + "location": "Pune", + "name": "Person 19", + "org": "Apex Dynamics", + "user_id": "user_19", + "work_history": [ + "Apex Dynamics" + ] + }, + { + "connections": [], + "location": "Bengaluru", + "name": "Person 20", + "org": "Apex Dynamics", + "user_id": "user_20", + "work_history": [ + "Apex Dynamics" + ] + }, + { + "connections": [ + "user_9" + ], + "location": "Delhi", + "name": "Person 21", + "org": "Helios Labs", + "user_id": "user_21", + "work_history": [ + "Helios Labs" + ] + }, + { + "connections": [], + "location": "Delhi", + "name": "Person 22", + "org": "Apex Dynamics", + "user_id": "user_22", + "work_history": [ + "Apex Dynamics" + ] + }, + { + "connections": [], + "location": "Hyderabad", + "name": "Person 23", + "org": "Northbridge", + "user_id": "user_23", + "work_history": [ + "Northbridge" + ] + }, + { + "connections": [ + "user_cyrus" + ], + "location": "Sector 9", + "name": "Aria Sen", + "org": "Helios Labs", + "user_id": "user_aria", + "work_history": [ + "Helios Labs" + ] + }, + { + "connections": [ + "user_hiro" + ], + "location": "Dockyard 17", + "name": "Bharat Kulkarni", + "org": "Northbridge Logistics", + "user_id": "user_bharat", + "work_history": [ + "Northbridge Logistics" + ] + }, + { + "connections": [ + "user_gita" + ], + "location": "Old Town", + "name": "Cyrus Mehta", + "org": "Apex Dynamics", + "user_id": "user_cyrus", + "work_history": [ + "Apex Dynamics" + ] + }, + { + "connections": [ + "user_elin", + "user_ivy" + ], + "location": "Old Town", + "name": "Diya Roy", + "org": "Blueharbor Media", + "user_id": "user_diya", + "work_history": [ + "Blueharbor Media" + ] + }, + { + "connections": [ + "user_aria" + ], + "location": "Sector 9", + "name": "Elin Das", + "org": "Helios Labs", + "user_id": "user_elin", + "work_history": [ + "Helios Labs" + ] + }, + { + "connections": [ + "user_diya" + ], + "location": "Rivergate", + "name": "Faris Noor", + "org": "Tidewatch Ops", + "user_id": "user_faris", + "work_history": [ + "Tidewatch Ops" + ] + }, + { + "connections": [ + "user_jules" + ], + "location": "Old Town", + "name": "Gita Pradhan", + "org": "Apex Dynamics", + "user_id": "user_gita", + "work_history": [ + "Apex Dynamics" + ] + }, + { + "connections": [ + "user_faris" + ], + "location": "Dockyard 17", + "name": "Hiro Tan", + "org": "Northbridge Logistics", + "user_id": "user_hiro", + "work_history": [ + "Northbridge Logistics" + ] + }, + { + "connections": [ + "user_bharat", + "user_elin" + ], + "location": "Rivergate", + "name": "Ivy Kapoor", + "org": "Kestrel Works", + "user_id": "user_ivy", + "work_history": [ + "Kestrel Works" + ] + }, + { + "connections": [ + "user_bharat" + ], + "location": "Old Town", + "name": "Jules Banerjee", + "org": "Blueharbor Media", + "user_id": "user_jules", + "work_history": [ + "Blueharbor Media" + ] + }, + { + "connections": [ + "user_omar", + "user_bharat" + ], + "location": "East Quay", + "name": "Kian Bose", + "org": "Atlas Freight", + "user_id": "user_kian", + "work_history": [ + "Atlas Freight" + ] + }, + { + "connections": [ + "user_aria" + ], + "location": "Sector 9", + "name": "Leena Das", + "org": "Sunmesh Analytics", + "user_id": "user_leena", + "work_history": [ + "Sunmesh Analytics" + ] + }, + { + "connections": [ + "user_quinn" + ], + "location": "North Basin", + "name": "Mika Solanki", + "org": "Orion Customs", + "user_id": "user_mika", + "work_history": [ + "Orion Customs" + ] + }, + { + "connections": [ + "user_rhea" + ], + "location": "Foundry Row", + "name": "Nora Iqbal", + "org": "Emberline Security", + "user_id": "user_nora", + "work_history": [ + "Emberline Security" + ] + }, + { + "connections": [ + "user_mika" + ], + "location": "East Quay", + "name": "Omar Sheikh", + "org": "Atlas Freight", + "user_id": "user_omar", + "work_history": [ + "Atlas Freight" + ] + }, + { + "connections": [ + "user_leena", + "user_nora" + ], + "location": "Sector 9", + "name": "Priya Menon", + "org": "Sunmesh Analytics", + "user_id": "user_priya", + "work_history": [ + "Sunmesh Analytics" + ] + }, + { + "connections": [ + "user_nora", + "user_hiro" + ], + "location": "North Basin", + "name": "Quinn Rao", + "org": "Orion Customs", + "user_id": "user_quinn", + "work_history": [ + "Orion Customs" + ] + }, + { + "connections": [ + "user_soren" + ], + "location": "Foundry Row", + "name": "Rhea Kapoor", + "org": "Emberline Security", + "user_id": "user_rhea", + "work_history": [ + "Emberline Security" + ] + }, + { + "connections": [ + "user_tara", + "user_faris" + ], + "location": "Uplink Yard", + "name": "Soren Malik", + "org": "Harborlight Transit", + "user_id": "user_soren", + "work_history": [ + "Harborlight Transit" + ] + }, + { + "connections": [ + "user_kian" + ], + "location": "Uplink Yard", + "name": "Tara Dey", + "org": "Harborlight Transit", + "user_id": "user_tara", + "work_history": [ + "Harborlight Transit" + ] + }, + { + "connections": [], + "location": "Remote", + "name": "P123", + "org": "Unknown Ventures", + "user_id": "noise_0", + "work_history": [] + }, + { + "connections": [], + "location": "Unknown", + "name": "P196", + "org": "Unknown Ventures", + "user_id": "noise_1", + "work_history": [] + }, + { + "connections": [], + "location": "Remote", + "name": "P898", + "org": "Unknown Ventures", + "user_id": "noise_2", + "work_history": [] + } + ] + }, + "seed_file": "datasets\\fixed_levels\\seed_fixed_levels.json", + "shared_config": "datasets\\fixed_levels\\shared_config_fixed_levels.json", + "task_count": 30, + "tasks": [ + { + "answer": "user_bharat", + "metadata": { + "difficulty": "easy", + "difficulty_level": 1, + "question_id": "easy_01", + "support_nodes": 6 + }, + "question": "alias_orchidfox -> post_midnight_manifest -> loc_dockyard17 -> connected collaborator on event_project_lantern. Who is it?", + "supporting_edges": [ + { + "confidence": 1.0, + "dst": "user_ivy", + "rel": "alias_of", + "src": "alias_orchidfox" + }, + { + "confidence": 1.0, + "dst": "post_midnight_manifest", + "rel": "authored_post", + "src": "alias_orchidfox" + }, + { + "confidence": 1.0, + "dst": "loc_dockyard17", + "rel": "references", + "src": "post_midnight_manifest" + }, + { + "confidence": 0.95, + "dst": "user_bharat", + "rel": "connected_to", + "src": "user_ivy" + }, + { + "confidence": 0.9, + "dst": "event_project_lantern", + "rel": "collaborates_on", + "src": "user_bharat" + } + ], + "task_id": "seed_task_0", + "task_type": "fixed_trace" + }, + { + "answer": "user_hiro", + "metadata": { + "difficulty": "easy", + "difficulty_level": 1, + "question_id": "easy_02", + "support_nodes": 5 + }, + "question": "thr_supply_leak references org_northbridge_logistics. Which alias_docksparrow user works there and collaborates on event_project_lantern?", + "supporting_edges": [ + { + "confidence": 1.0, + "dst": "org_northbridge_logistics", + "rel": "references", + "src": "thr_supply_leak" + }, + { + "confidence": 1.0, + "dst": "user_hiro", + "rel": "alias_of", + "src": "alias_docksparrow" + }, + { + "confidence": 1.0, + "dst": "org_northbridge_logistics", + "rel": "works_at", + "src": "user_hiro" + }, + { + "confidence": 0.9, + "dst": "event_project_lantern", + "rel": "collaborates_on", + "src": "user_hiro" + } + ], + "task_id": "seed_task_1", + "task_type": "fixed_trace" + }, + { + "answer": "user_diya", + "metadata": { + "difficulty": "easy", + "difficulty_level": 1, + "question_id": "easy_03", + "support_nodes": 7 + }, + "question": "alias_monsoonbyte authored post_drone_parts about event_black_kite. Which user behind that alias is directly connected to the Kestrel collaborator?", + "supporting_edges": [ + { + "confidence": 1.0, + "dst": "user_diya", + "rel": "alias_of", + "src": "alias_monsoonbyte" + }, + { + "confidence": 1.0, + "dst": "post_drone_parts", + "rel": "authored_post", + "src": "alias_monsoonbyte" + }, + { + "confidence": 1.0, + "dst": "event_black_kite", + "rel": "references", + "src": "post_drone_parts" + }, + { + "confidence": 1.0, + "dst": "org_kestrel_works", + "rel": "works_at", + "src": "user_ivy" + }, + { + "confidence": 0.9, + "dst": "event_black_kite", + "rel": "collaborates_on", + "src": "user_ivy" + }, + { + "confidence": 0.86, + "dst": "user_elin", + "rel": "connected_to", + "src": "user_ivy" + } + ], + "task_id": "seed_task_2", + "task_type": "fixed_trace" + }, + { + "answer": "user_faris", + "metadata": { + "difficulty": "easy", + "difficulty_level": 1, + "question_id": "easy_04", + "support_nodes": 6 + }, + "question": "alias_nightrelay references loc_rivergate. Which user behind it works at an org operating there and collaborates on event_project_lantern?", + "supporting_edges": [ + { + "confidence": 1.0, + "dst": "user_faris", + "rel": "alias_of", + "src": "alias_nightrelay" + }, + { + "confidence": 1.0, + "dst": "post_sat_phone_ping", + "rel": "authored_post", + "src": "alias_nightrelay" + }, + { + "confidence": 1.0, + "dst": "loc_rivergate", + "rel": "references", + "src": "post_sat_phone_ping" + }, + { + "confidence": 1.0, + "dst": "org_tidewatch_ops", + "rel": "works_at", + "src": "user_faris" + }, + { + "confidence": 1.0, + "dst": "loc_rivergate", + "rel": "operates_in", + "src": "org_tidewatch_ops" + }, + { + "confidence": 0.9, + "dst": "event_project_lantern", + "rel": "collaborates_on", + "src": "user_faris" + } + ], + "task_id": "seed_task_3", + "task_type": "fixed_trace" + }, + { + "answer": "user_ivy", + "metadata": { + "difficulty": "easy", + "difficulty_level": 1, + "question_id": "easy_05", + "support_nodes": 6 + }, + "question": "thr_port_audit discusses Black Kite and references Kestrel Works. Which alias_orchidfox user authored post_midnight_manifest and collaborates on Black Kite?", + "supporting_edges": [ + { + "confidence": 1.0, + "dst": "event_black_kite", + "rel": "discusses", + "src": "thr_port_audit" + }, + { + "confidence": 1.0, + "dst": "org_kestrel_works", + "rel": "references", + "src": "thr_port_audit" + }, + { + "confidence": 1.0, + "dst": "user_ivy", + "rel": "alias_of", + "src": "alias_orchidfox" + }, + { + "confidence": 1.0, + "dst": "post_midnight_manifest", + "rel": "authored_post", + "src": "alias_orchidfox" + }, + { + "confidence": 1.0, + "dst": "org_kestrel_works", + "rel": "works_at", + "src": "user_ivy" + }, + { + "confidence": 0.9, + "dst": "event_black_kite", + "rel": "collaborates_on", + "src": "user_ivy" + } + ], + "task_id": "seed_task_4", + "task_type": "fixed_trace" + }, + { + "answer": "user_kian", + "metadata": { + "difficulty": "easy", + "difficulty_level": 1, + "question_id": "easy_06", + "support_nodes": 5 + }, + "question": "Which Atlas Freight user behind alias_lanternmoth authored post_quay_ledgers and collaborates on event_glass_harbor?", + "supporting_edges": [ + { + "confidence": 1.0, + "dst": "user_kian", + "rel": "alias_of", + "src": "alias_lanternmoth" + }, + { + "confidence": 1.0, + "dst": "post_quay_ledgers", + "rel": "authored_post", + "src": "alias_lanternmoth" + }, + { + "confidence": 1.0, + "dst": "org_atlas_freight", + "rel": "works_at", + "src": "user_kian" + }, + { + "confidence": 0.9, + "dst": "event_glass_harbor", + "rel": "collaborates_on", + "src": "user_kian" + } + ], + "task_id": "seed_task_5", + "task_type": "fixed_trace" + }, + { + "answer": "user_mika", + "metadata": { + "difficulty": "easy", + "difficulty_level": 1, + "question_id": "easy_07", + "support_nodes": 5 + }, + "question": "Which Orion Customs user behind alias_basinraven authored post_customs_tag and collaborates on event_iron_wharf?", + "supporting_edges": [ + { + "confidence": 1.0, + "dst": "user_mika", + "rel": "alias_of", + "src": "alias_basinraven" + }, + { + "confidence": 1.0, + "dst": "post_customs_tag", + "rel": "authored_post", + "src": "alias_basinraven" + }, + { + "confidence": 1.0, + "dst": "org_orion_customs", + "rel": "works_at", + "src": "user_mika" + }, + { + "confidence": 0.9, + "dst": "event_iron_wharf", + "rel": "collaborates_on", + "src": "user_mika" + } + ], + "task_id": "seed_task_6", + "task_type": "fixed_trace" + }, + { + "answer": "user_nora", + "metadata": { + "difficulty": "easy", + "difficulty_level": 1, + "question_id": "easy_08", + "support_nodes": 5 + }, + "question": "Which user behind alias_emberglass posted basin_photo from Foundry Row and investigates Amber Veil?", + "supporting_edges": [ + { + "confidence": 1.0, + "dst": "user_nora", + "rel": "alias_of", + "src": "alias_emberglass" + }, + { + "confidence": 1.0, + "dst": "post_basin_photo", + "rel": "authored_post", + "src": "alias_emberglass" + }, + { + "confidence": 1.0, + "dst": "loc_foundry_row", + "rel": "references", + "src": "post_basin_photo" + }, + { + "confidence": 0.9, + "dst": "event_amber_veil", + "rel": "investigates", + "src": "user_nora" + } + ], + "task_id": "seed_task_7", + "task_type": "fixed_trace" + }, + { + "answer": "user_soren", + "metadata": { + "difficulty": "easy", + "difficulty_level": 1, + "question_id": "easy_09", + "support_nodes": 4 + }, + "question": "Which user behind alias_tideshard authored post_hull_signal and collaborates on Ghost Signal?", + "supporting_edges": [ + { + "confidence": 1.0, + "dst": "user_soren", + "rel": "alias_of", + "src": "alias_tideshard" + }, + { + "confidence": 1.0, + "dst": "post_hull_signal", + "rel": "authored_post", + "src": "alias_tideshard" + }, + { + "confidence": 0.9, + "dst": "event_ghost_signal", + "rel": "collaborates_on", + "src": "user_soren" + } + ], + "task_id": "seed_task_8", + "task_type": "fixed_trace" + }, + { + "answer": "user_tara", + "metadata": { + "difficulty": "easy", + "difficulty_level": 1, + "question_id": "easy_10", + "support_nodes": 5 + }, + "question": "Which Harborlight Transit user behind alias_sablekeel authored post_uplink_note and reports on Ghost Signal?", + "supporting_edges": [ + { + "confidence": 1.0, + "dst": "user_tara", + "rel": "alias_of", + "src": "alias_sablekeel" + }, + { + "confidence": 1.0, + "dst": "post_uplink_note", + "rel": "authored_post", + "src": "alias_sablekeel" + }, + { + "confidence": 1.0, + "dst": "org_harborlight_transit", + "rel": "works_at", + "src": "user_tara" + }, + { + "confidence": 0.9, + "dst": "event_ghost_signal", + "rel": "reports_on", + "src": "user_tara" + } + ], + "task_id": "seed_task_9", + "task_type": "fixed_trace" + }, + { + "answer": "org_northbridge_logistics", + "metadata": { + "difficulty": "mid", + "difficulty_level": 2, + "question_id": "mid_01", + "support_nodes": 17 + }, + "question": "Follow alias_docksparrow through post_shift_roster, Dockyard 17, and the Lantern chain. Return the org node id.", + "supporting_edges": [ + { + "confidence": 1.0, + "dst": "user_hiro", + "rel": "alias_of", + "src": "alias_docksparrow" + }, + { + "confidence": 1.0, + "dst": "post_shift_roster", + "rel": "authored_post", + "src": "alias_docksparrow" + }, + { + "confidence": 1.0, + "dst": "loc_dockyard17", + "rel": "references", + "src": "post_shift_roster" + }, + { + "confidence": 1.0, + "dst": "org_northbridge_logistics", + "rel": "references", + "src": "post_shift_roster" + }, + { + "confidence": 1.0, + "dst": "org_northbridge_logistics", + "rel": "references", + "src": "thr_supply_leak" + }, + { + "confidence": 1.0, + "dst": "org_northbridge_logistics", + "rel": "works_at", + "src": "user_hiro" + }, + { + "confidence": 1.0, + "dst": "loc_dockyard17", + "rel": "located_in", + "src": "user_hiro" + }, + { + "confidence": 0.9, + "dst": "event_project_lantern", + "rel": "collaborates_on", + "src": "user_hiro" + }, + { + "confidence": 0.9, + "dst": "event_project_lantern", + "rel": "collaborates_on", + "src": "user_bharat" + }, + { + "confidence": 0.95, + "dst": "user_hiro", + "rel": "connected_to", + "src": "user_bharat" + }, + { + "confidence": 0.92, + "dst": "user_faris", + "rel": "connected_to", + "src": "user_hiro" + }, + { + "confidence": 1.0, + "dst": "user_ivy", + "rel": "alias_of", + "src": "alias_orchidfox" + }, + { + "confidence": 1.0, + "dst": "post_midnight_manifest", + "rel": "authored_post", + "src": "alias_orchidfox" + }, + { + "confidence": 1.0, + "dst": "loc_dockyard17", + "rel": "references", + "src": "post_midnight_manifest" + }, + { + "confidence": 1.0, + "dst": "event_project_lantern", + "rel": "references", + "src": "post_midnight_manifest" + }, + { + "confidence": 1.0, + "dst": "user_kian", + "rel": "alias_of", + "src": "alias_lanternmoth" + }, + { + "confidence": 1.0, + "dst": "post_quay_ledgers", + "rel": "authored_post", + "src": "alias_lanternmoth" + }, + { + "confidence": 1.0, + "dst": "loc_east_quay", + "rel": "references", + "src": "post_quay_ledgers" + }, + { + "confidence": 1.0, + "dst": "event_glass_harbor", + "rel": "references", + "src": "post_quay_ledgers" + } + ], + "task_id": "seed_task_10", + "task_type": "fixed_trace" + }, + { + "answer": "user_kian", + "metadata": { + "difficulty": "mid", + "difficulty_level": 2, + "question_id": "mid_02", + "support_nodes": 17 + }, + "question": "Across the Glass Harbor cluster, which user behind alias_lanternmoth links to the Atlas Freight network from thr_quiet_manifest?", + "supporting_edges": [ + { + "confidence": 1.0, + "dst": "user_kian", + "rel": "alias_of", + "src": "alias_lanternmoth" + }, + { + "confidence": 1.0, + "dst": "post_quay_ledgers", + "rel": "authored_post", + "src": "alias_lanternmoth" + }, + { + "confidence": 1.0, + "dst": "loc_east_quay", + "rel": "references", + "src": "post_quay_ledgers" + }, + { + "confidence": 1.0, + "dst": "event_glass_harbor", + "rel": "references", + "src": "post_quay_ledgers" + }, + { + "confidence": 1.0, + "dst": "thr_quiet_manifest", + "rel": "authored_thread", + "src": "user_kian" + }, + { + "confidence": 1.0, + "dst": "event_glass_harbor", + "rel": "discusses", + "src": "thr_quiet_manifest" + }, + { + "confidence": 1.0, + "dst": "org_atlas_freight", + "rel": "references", + "src": "thr_quiet_manifest" + }, + { + "confidence": 1.0, + "dst": "org_atlas_freight", + "rel": "works_at", + "src": "user_kian" + }, + { + "confidence": 1.0, + "dst": "org_atlas_freight", + "rel": "works_at", + "src": "user_omar" + }, + { + "confidence": 0.9, + "dst": "event_glass_harbor", + "rel": "collaborates_on", + "src": "user_kian" + }, + { + "confidence": 0.9, + "dst": "event_glass_harbor", + "rel": "collaborates_on", + "src": "user_omar" + }, + { + "confidence": 0.9, + "dst": "event_glass_harbor", + "rel": "monitors", + "src": "user_priya" + }, + { + "confidence": 0.93, + "dst": "user_omar", + "rel": "connected_to", + "src": "user_kian" + }, + { + "confidence": 0.9, + "dst": "user_mika", + "rel": "connected_to", + "src": "user_omar" + }, + { + "confidence": 1.0, + "dst": "user_ivy", + "rel": "alias_of", + "src": "alias_orchidfox" + }, + { + "confidence": 1.0, + "dst": "post_midnight_manifest", + "rel": "authored_post", + "src": "alias_orchidfox" + }, + { + "confidence": 1.0, + "dst": "loc_dockyard17", + "rel": "references", + "src": "post_midnight_manifest" + }, + { + "confidence": 1.0, + "dst": "event_project_lantern", + "rel": "references", + "src": "post_midnight_manifest" + }, + { + "confidence": 1.0, + "dst": "user_mika", + "rel": "alias_of", + "src": "alias_basinraven" + }, + { + "confidence": 1.0, + "dst": "post_customs_tag", + "rel": "authored_post", + "src": "alias_basinraven" + } + ], + "task_id": "seed_task_11", + "task_type": "fixed_trace" + }, + { + "answer": "user_mika", + "metadata": { + "difficulty": "mid", + "difficulty_level": 2, + "question_id": "mid_03", + "support_nodes": 17 + }, + "question": "Trace alias_basinraven through post_customs_tag, thr_customs_breach, and the Orion Customs collaboration chain. Who is it?", + "supporting_edges": [ + { + "confidence": 1.0, + "dst": "user_mika", + "rel": "alias_of", + "src": "alias_basinraven" + }, + { + "confidence": 1.0, + "dst": "post_customs_tag", + "rel": "authored_post", + "src": "alias_basinraven" + }, + { + "confidence": 1.0, + "dst": "loc_north_basin", + "rel": "references", + "src": "post_customs_tag" + }, + { + "confidence": 1.0, + "dst": "event_iron_wharf", + "rel": "references", + "src": "post_customs_tag" + }, + { + "confidence": 1.0, + "dst": "thr_customs_breach", + "rel": "authored_thread", + "src": "user_mika" + }, + { + "confidence": 1.0, + "dst": "event_iron_wharf", + "rel": "discusses", + "src": "thr_customs_breach" + }, + { + "confidence": 1.0, + "dst": "org_orion_customs", + "rel": "references", + "src": "thr_customs_breach" + }, + { + "confidence": 1.0, + "dst": "org_orion_customs", + "rel": "works_at", + "src": "user_mika" + }, + { + "confidence": 1.0, + "dst": "org_orion_customs", + "rel": "works_at", + "src": "user_quinn" + }, + { + "confidence": 0.9, + "dst": "event_iron_wharf", + "rel": "collaborates_on", + "src": "user_mika" + }, + { + "confidence": 0.9, + "dst": "event_iron_wharf", + "rel": "collaborates_on", + "src": "user_quinn" + }, + { + "confidence": 0.89, + "dst": "user_quinn", + "rel": "connected_to", + "src": "user_mika" + }, + { + "confidence": 0.88, + "dst": "user_nora", + "rel": "connected_to", + "src": "user_quinn" + }, + { + "confidence": 0.77, + "dst": "org_emberline_security", + "rel": "connected_to", + "src": "org_orion_customs" + }, + { + "confidence": 1.0, + "dst": "user_ivy", + "rel": "alias_of", + "src": "alias_orchidfox" + }, + { + "confidence": 1.0, + "dst": "post_midnight_manifest", + "rel": "authored_post", + "src": "alias_orchidfox" + }, + { + "confidence": 1.0, + "dst": "loc_dockyard17", + "rel": "references", + "src": "post_midnight_manifest" + }, + { + "confidence": 1.0, + "dst": "event_project_lantern", + "rel": "references", + "src": "post_midnight_manifest" + }, + { + "confidence": 1.0, + "dst": "user_kian", + "rel": "alias_of", + "src": "alias_lanternmoth" + } + ], + "task_id": "seed_task_12", + "task_type": "fixed_trace" + }, + { + "answer": "user_rhea", + "metadata": { + "difficulty": "mid", + "difficulty_level": 2, + "question_id": "mid_04", + "support_nodes": 18 + }, + "question": "In the Ember Tide and Amber Veil overlap, which Foundry Row user behind alias_cinderveil collaborates on Ember Tide?", + "supporting_edges": [ + { + "confidence": 1.0, + "dst": "user_rhea", + "rel": "alias_of", + "src": "alias_cinderveil" + }, + { + "confidence": 1.0, + "dst": "post_foundry_map", + "rel": "authored_post", + "src": "alias_cinderveil" + }, + { + "confidence": 1.0, + "dst": "loc_foundry_row", + "rel": "references", + "src": "post_foundry_map" + }, + { + "confidence": 1.0, + "dst": "event_ember_tide", + "rel": "references", + "src": "post_foundry_map" + }, + { + "confidence": 1.0, + "dst": "thr_foundry_watch", + "rel": "authored_thread", + "src": "user_nora" + }, + { + "confidence": 1.0, + "dst": "event_ember_tide", + "rel": "discusses", + "src": "thr_foundry_watch" + }, + { + "confidence": 1.0, + "dst": "org_emberline_security", + "rel": "references", + "src": "thr_foundry_watch" + }, + { + "confidence": 1.0, + "dst": "thr_ember_tide_watch", + "rel": "authored_thread", + "src": "user_rhea" + }, + { + "confidence": 1.0, + "dst": "event_ember_tide", + "rel": "discusses", + "src": "thr_ember_tide_watch" + }, + { + "confidence": 1.0, + "dst": "loc_foundry_row", + "rel": "references", + "src": "thr_ember_tide_watch" + }, + { + "confidence": 1.0, + "dst": "org_emberline_security", + "rel": "works_at", + "src": "user_rhea" + }, + { + "confidence": 1.0, + "dst": "org_emberline_security", + "rel": "works_at", + "src": "user_nora" + }, + { + "confidence": 0.9, + "dst": "event_amber_veil", + "rel": "investigates", + "src": "user_nora" + }, + { + "confidence": 0.9, + "dst": "event_ember_tide", + "rel": "collaborates_on", + "src": "user_rhea" + }, + { + "confidence": 0.87, + "dst": "user_rhea", + "rel": "connected_to", + "src": "user_nora" + }, + { + "confidence": 0.77, + "dst": "event_ghost_signal", + "rel": "connected_to", + "src": "event_ember_tide" + }, + { + "confidence": 1.0, + "dst": "user_ivy", + "rel": "alias_of", + "src": "alias_orchidfox" + }, + { + "confidence": 1.0, + "dst": "post_midnight_manifest", + "rel": "authored_post", + "src": "alias_orchidfox" + }, + { + "confidence": 1.0, + "dst": "loc_dockyard17", + "rel": "references", + "src": "post_midnight_manifest" + }, + { + "confidence": 1.0, + "dst": "event_project_lantern", + "rel": "references", + "src": "post_midnight_manifest" + }, + { + "confidence": 1.0, + "dst": "user_kian", + "rel": "alias_of", + "src": "alias_lanternmoth" + } + ], + "task_id": "seed_task_13", + "task_type": "fixed_trace" + }, + { + "answer": "org_harborlight_transit", + "metadata": { + "difficulty": "mid", + "difficulty_level": 2, + "question_id": "mid_05", + "support_nodes": 17 + }, + "question": "Follow alias_tideshard from post_hull_signal into thr_uplink_route and the Harborlight relay. Return the org node id.", + "supporting_edges": [ + { + "confidence": 1.0, + "dst": "user_soren", + "rel": "alias_of", + "src": "alias_tideshard" + }, + { + "confidence": 1.0, + "dst": "post_hull_signal", + "rel": "authored_post", + "src": "alias_tideshard" + }, + { + "confidence": 1.0, + "dst": "loc_uplink_yard", + "rel": "references", + "src": "post_hull_signal" + }, + { + "confidence": 1.0, + "dst": "event_ghost_signal", + "rel": "references", + "src": "post_hull_signal" + }, + { + "confidence": 1.0, + "dst": "thr_uplink_route", + "rel": "authored_thread", + "src": "user_soren" + }, + { + "confidence": 1.0, + "dst": "event_ghost_signal", + "rel": "discusses", + "src": "thr_uplink_route" + }, + { + "confidence": 1.0, + "dst": "org_harborlight_transit", + "rel": "references", + "src": "thr_uplink_route" + }, + { + "confidence": 1.0, + "dst": "org_harborlight_transit", + "rel": "works_at", + "src": "user_soren" + }, + { + "confidence": 1.0, + "dst": "org_harborlight_transit", + "rel": "works_at", + "src": "user_tara" + }, + { + "confidence": 0.9, + "dst": "event_ghost_signal", + "rel": "collaborates_on", + "src": "user_soren" + }, + { + "confidence": 0.9, + "dst": "event_ghost_signal", + "rel": "reports_on", + "src": "user_tara" + }, + { + "confidence": 0.86, + "dst": "user_soren", + "rel": "connected_to", + "src": "user_rhea" + }, + { + "confidence": 0.86, + "dst": "user_tara", + "rel": "connected_to", + "src": "user_soren" + }, + { + "confidence": 1.0, + "dst": "loc_uplink_yard", + "rel": "operates_in", + "src": "org_harborlight_transit" + }, + { + "confidence": 0.77, + "dst": "org_tidewatch_ops", + "rel": "connected_to", + "src": "org_harborlight_transit" + }, + { + "confidence": 1.0, + "dst": "user_ivy", + "rel": "alias_of", + "src": "alias_orchidfox" + }, + { + "confidence": 1.0, + "dst": "post_midnight_manifest", + "rel": "authored_post", + "src": "alias_orchidfox" + }, + { + "confidence": 1.0, + "dst": "loc_dockyard17", + "rel": "references", + "src": "post_midnight_manifest" + }, + { + "confidence": 1.0, + "dst": "event_project_lantern", + "rel": "references", + "src": "post_midnight_manifest" + }, + { + "confidence": 1.0, + "dst": "user_kian", + "rel": "alias_of", + "src": "alias_lanternmoth" + } + ], + "task_id": "seed_task_14", + "task_type": "fixed_trace" + }, + { + "answer": "user_leena", + "metadata": { + "difficulty": "mid", + "difficulty_level": 2, + "question_id": "mid_06", + "support_nodes": 17 + }, + "question": "Which Sunmesh user behind alias_frostledger connects post_lantern_route to thr_relay_map and the Sector 9 monitoring chain?", + "supporting_edges": [ + { + "confidence": 1.0, + "dst": "user_leena", + "rel": "alias_of", + "src": "alias_frostledger" + }, + { + "confidence": 1.0, + "dst": "post_lantern_route", + "rel": "authored_post", + "src": "alias_frostledger" + }, + { + "confidence": 1.0, + "dst": "event_project_lantern", + "rel": "references", + "src": "post_lantern_route" + }, + { + "confidence": 1.0, + "dst": "org_sunmesh_analytics", + "rel": "references", + "src": "post_lantern_route" + }, + { + "confidence": 1.0, + "dst": "thr_relay_map", + "rel": "authored_thread", + "src": "user_leena" + }, + { + "confidence": 1.0, + "dst": "event_project_lantern", + "rel": "discusses", + "src": "thr_relay_map" + }, + { + "confidence": 1.0, + "dst": "org_sunmesh_analytics", + "rel": "references", + "src": "thr_relay_map" + }, + { + "confidence": 1.0, + "dst": "org_sunmesh_analytics", + "rel": "works_at", + "src": "user_leena" + }, + { + "confidence": 1.0, + "dst": "org_sunmesh_analytics", + "rel": "works_at", + "src": "user_priya" + }, + { + "confidence": 1.0, + "dst": "loc_sector9", + "rel": "located_in", + "src": "user_leena" + }, + { + "confidence": 1.0, + "dst": "loc_sector9", + "rel": "operates_in", + "src": "org_sunmesh_analytics" + }, + { + "confidence": 0.9, + "dst": "event_project_lantern", + "rel": "monitors", + "src": "user_leena" + }, + { + "confidence": 0.91, + "dst": "user_leena", + "rel": "connected_to", + "src": "user_priya" + }, + { + "confidence": 0.83, + "dst": "user_aria", + "rel": "connected_to", + "src": "user_leena" + }, + { + "confidence": 1.0, + "dst": "user_ivy", + "rel": "alias_of", + "src": "alias_orchidfox" + }, + { + "confidence": 1.0, + "dst": "post_midnight_manifest", + "rel": "authored_post", + "src": "alias_orchidfox" + }, + { + "confidence": 1.0, + "dst": "loc_dockyard17", + "rel": "references", + "src": "post_midnight_manifest" + }, + { + "confidence": 1.0, + "dst": "event_project_lantern", + "rel": "references", + "src": "post_midnight_manifest" + }, + { + "confidence": 1.0, + "dst": "user_kian", + "rel": "alias_of", + "src": "alias_lanternmoth" + }, + { + "confidence": 1.0, + "dst": "post_quay_ledgers", + "rel": "authored_post", + "src": "alias_lanternmoth" + }, + { + "confidence": 1.0, + "dst": "loc_east_quay", + "rel": "references", + "src": "post_quay_ledgers" + } + ], + "task_id": "seed_task_15", + "task_type": "fixed_trace" + }, + { + "answer": "user_nora", + "metadata": { + "difficulty": "mid", + "difficulty_level": 2, + "question_id": "mid_07", + "support_nodes": 18 + }, + "question": "Which user behind alias_emberglass is tied to Amber Veil after combining post_basin_photo, thr_basin_shift, and the Foundry Row investigation chain?", + "supporting_edges": [ + { + "confidence": 1.0, + "dst": "user_nora", + "rel": "alias_of", + "src": "alias_emberglass" + }, + { + "confidence": 1.0, + "dst": "post_basin_photo", + "rel": "authored_post", + "src": "alias_emberglass" + }, + { + "confidence": 1.0, + "dst": "loc_foundry_row", + "rel": "references", + "src": "post_basin_photo" + }, + { + "confidence": 1.0, + "dst": "event_amber_veil", + "rel": "references", + "src": "post_basin_photo" + }, + { + "confidence": 1.0, + "dst": "thr_basin_shift", + "rel": "authored_thread", + "src": "user_quinn" + }, + { + "confidence": 1.0, + "dst": "event_amber_veil", + "rel": "discusses", + "src": "thr_basin_shift" + }, + { + "confidence": 1.0, + "dst": "loc_north_basin", + "rel": "references", + "src": "thr_basin_shift" + }, + { + "confidence": 1.0, + "dst": "org_emberline_security", + "rel": "works_at", + "src": "user_nora" + }, + { + "confidence": 1.0, + "dst": "org_orion_customs", + "rel": "works_at", + "src": "user_quinn" + }, + { + "confidence": 1.0, + "dst": "loc_foundry_row", + "rel": "located_in", + "src": "user_nora" + }, + { + "confidence": 0.9, + "dst": "event_amber_veil", + "rel": "investigates", + "src": "user_nora" + }, + { + "confidence": 0.88, + "dst": "user_nora", + "rel": "connected_to", + "src": "user_quinn" + }, + { + "confidence": 0.87, + "dst": "user_rhea", + "rel": "connected_to", + "src": "user_nora" + }, + { + "confidence": 0.77, + "dst": "org_emberline_security", + "rel": "connected_to", + "src": "org_orion_customs" + }, + { + "confidence": 1.0, + "dst": "user_ivy", + "rel": "alias_of", + "src": "alias_orchidfox" + }, + { + "confidence": 1.0, + "dst": "post_midnight_manifest", + "rel": "authored_post", + "src": "alias_orchidfox" + }, + { + "confidence": 1.0, + "dst": "loc_dockyard17", + "rel": "references", + "src": "post_midnight_manifest" + }, + { + "confidence": 1.0, + "dst": "event_project_lantern", + "rel": "references", + "src": "post_midnight_manifest" + }, + { + "confidence": 1.0, + "dst": "user_kian", + "rel": "alias_of", + "src": "alias_lanternmoth" + } + ], + "task_id": "seed_task_16", + "task_type": "fixed_trace" + }, + { + "answer": "user_ivy", + "metadata": { + "difficulty": "mid", + "difficulty_level": 2, + "question_id": "mid_08", + "support_nodes": 17 + }, + "question": "Combine alias_orchidfox, post_midnight_manifest, thr_supply_leak, and the Lantern to Glass Harbor bridge. Which user starts that chain?", + "supporting_edges": [ + { + "confidence": 1.0, + "dst": "user_ivy", + "rel": "alias_of", + "src": "alias_orchidfox" + }, + { + "confidence": 1.0, + "dst": "post_midnight_manifest", + "rel": "authored_post", + "src": "alias_orchidfox" + }, + { + "confidence": 1.0, + "dst": "loc_dockyard17", + "rel": "references", + "src": "post_midnight_manifest" + }, + { + "confidence": 1.0, + "dst": "event_project_lantern", + "rel": "references", + "src": "post_midnight_manifest" + }, + { + "confidence": 1.0, + "dst": "thr_supply_leak", + "rel": "authored_thread", + "src": "user_diya" + }, + { + "confidence": 1.0, + "dst": "event_project_lantern", + "rel": "discusses", + "src": "thr_supply_leak" + }, + { + "confidence": 1.0, + "dst": "org_northbridge_logistics", + "rel": "references", + "src": "thr_supply_leak" + }, + { + "confidence": 1.0, + "dst": "org_kestrel_works", + "rel": "works_at", + "src": "user_ivy" + }, + { + "confidence": 0.9, + "dst": "event_black_kite", + "rel": "collaborates_on", + "src": "user_ivy" + }, + { + "confidence": 0.95, + "dst": "user_bharat", + "rel": "connected_to", + "src": "user_ivy" + }, + { + "confidence": 0.86, + "dst": "user_elin", + "rel": "connected_to", + "src": "user_ivy" + }, + { + "confidence": 0.77, + "dst": "event_glass_harbor", + "rel": "connected_to", + "src": "event_project_lantern" + }, + { + "confidence": 0.9, + "dst": "event_glass_harbor", + "rel": "collaborates_on", + "src": "user_kian" + }, + { + "confidence": 0.9, + "dst": "event_glass_harbor", + "rel": "monitors", + "src": "user_priya" + }, + { + "confidence": 1.0, + "dst": "user_kian", + "rel": "alias_of", + "src": "alias_lanternmoth" + }, + { + "confidence": 1.0, + "dst": "post_quay_ledgers", + "rel": "authored_post", + "src": "alias_lanternmoth" + } + ], + "task_id": "seed_task_17", + "task_type": "fixed_trace" + }, + { + "answer": "user_diya", + "metadata": { + "difficulty": "mid", + "difficulty_level": 2, + "question_id": "mid_09", + "support_nodes": 18 + }, + "question": "Which user behind alias_monsoonbyte sits at the overlap of Blueharbor Media, Project Lantern, Black Kite, and the Ivy connection chain?", + "supporting_edges": [ + { + "confidence": 1.0, + "dst": "user_diya", + "rel": "alias_of", + "src": "alias_monsoonbyte" + }, + { + "confidence": 1.0, + "dst": "post_drone_parts", + "rel": "authored_post", + "src": "alias_monsoonbyte" + }, + { + "confidence": 1.0, + "dst": "event_black_kite", + "rel": "references", + "src": "post_drone_parts" + }, + { + "confidence": 1.0, + "dst": "thr_supply_leak", + "rel": "authored_thread", + "src": "user_diya" + }, + { + "confidence": 1.0, + "dst": "event_project_lantern", + "rel": "discusses", + "src": "thr_supply_leak" + }, + { + "confidence": 1.0, + "dst": "thr_port_audit", + "rel": "authored_thread", + "src": "user_jules" + }, + { + "confidence": 1.0, + "dst": "event_black_kite", + "rel": "discusses", + "src": "thr_port_audit" + }, + { + "confidence": 1.0, + "dst": "org_blueharbor_media", + "rel": "works_at", + "src": "user_diya" + }, + { + "confidence": 1.0, + "dst": "org_kestrel_works", + "rel": "works_at", + "src": "user_ivy" + }, + { + "confidence": 1.0, + "dst": "org_blueharbor_media", + "rel": "works_at", + "src": "user_jules" + }, + { + "confidence": 0.9, + "dst": "event_project_lantern", + "rel": "investigates", + "src": "user_diya" + }, + { + "confidence": 0.9, + "dst": "event_black_kite", + "rel": "collaborates_on", + "src": "user_ivy" + }, + { + "confidence": 0.9, + "dst": "event_black_kite", + "rel": "reports_on", + "src": "user_jules" + }, + { + "confidence": 0.9, + "dst": "user_diya", + "rel": "connected_to", + "src": "user_faris" + }, + { + "confidence": 0.86, + "dst": "user_elin", + "rel": "connected_to", + "src": "user_ivy" + }, + { + "confidence": 1.0, + "dst": "user_ivy", + "rel": "alias_of", + "src": "alias_orchidfox" + }, + { + "confidence": 1.0, + "dst": "post_midnight_manifest", + "rel": "authored_post", + "src": "alias_orchidfox" + }, + { + "confidence": 1.0, + "dst": "loc_dockyard17", + "rel": "references", + "src": "post_midnight_manifest" + }, + { + "confidence": 1.0, + "dst": "event_project_lantern", + "rel": "references", + "src": "post_midnight_manifest" + }, + { + "confidence": 1.0, + "dst": "user_kian", + "rel": "alias_of", + "src": "alias_lanternmoth" + } + ], + "task_id": "seed_task_18", + "task_type": "fixed_trace" + }, + { + "answer": "user_bharat", + "metadata": { + "difficulty": "mid", + "difficulty_level": 2, + "question_id": "mid_10", + "support_nodes": 17 + }, + "question": "Who is the Northbridge user behind alias_steelquill when combining post_relay_schedule, thr_supply_leak, Dockyard 17, and Lantern collaborator edges?", + "supporting_edges": [ + { + "confidence": 1.0, + "dst": "user_bharat", + "rel": "alias_of", + "src": "alias_steelquill" + }, + { + "confidence": 1.0, + "dst": "post_relay_schedule", + "rel": "authored_post", + "src": "alias_steelquill" + }, + { + "confidence": 1.0, + "dst": "event_project_lantern", + "rel": "references", + "src": "post_relay_schedule" + }, + { + "confidence": 1.0, + "dst": "loc_sector9", + "rel": "references", + "src": "post_relay_schedule" + }, + { + "confidence": 1.0, + "dst": "thr_supply_leak", + "rel": "authored_thread", + "src": "user_diya" + }, + { + "confidence": 1.0, + "dst": "event_project_lantern", + "rel": "discusses", + "src": "thr_supply_leak" + }, + { + "confidence": 1.0, + "dst": "org_northbridge_logistics", + "rel": "references", + "src": "thr_supply_leak" + }, + { + "confidence": 1.0, + "dst": "org_northbridge_logistics", + "rel": "works_at", + "src": "user_bharat" + }, + { + "confidence": 1.0, + "dst": "org_northbridge_logistics", + "rel": "works_at", + "src": "user_hiro" + }, + { + "confidence": 1.0, + "dst": "loc_dockyard17", + "rel": "located_in", + "src": "user_bharat" + }, + { + "confidence": 1.0, + "dst": "loc_dockyard17", + "rel": "located_in", + "src": "user_hiro" + }, + { + "confidence": 0.9, + "dst": "event_project_lantern", + "rel": "collaborates_on", + "src": "user_bharat" + }, + { + "confidence": 0.9, + "dst": "event_project_lantern", + "rel": "collaborates_on", + "src": "user_hiro" + }, + { + "confidence": 0.95, + "dst": "user_bharat", + "rel": "connected_to", + "src": "user_ivy" + }, + { + "confidence": 0.95, + "dst": "user_hiro", + "rel": "connected_to", + "src": "user_bharat" + }, + { + "confidence": 1.0, + "dst": "user_ivy", + "rel": "alias_of", + "src": "alias_orchidfox" + }, + { + "confidence": 1.0, + "dst": "post_midnight_manifest", + "rel": "authored_post", + "src": "alias_orchidfox" + }, + { + "confidence": 1.0, + "dst": "loc_dockyard17", + "rel": "references", + "src": "post_midnight_manifest" + }, + { + "confidence": 1.0, + "dst": "event_project_lantern", + "rel": "references", + "src": "post_midnight_manifest" + }, + { + "confidence": 1.0, + "dst": "user_kian", + "rel": "alias_of", + "src": "alias_lanternmoth" + }, + { + "confidence": 1.0, + "dst": "post_quay_ledgers", + "rel": "authored_post", + "src": "alias_lanternmoth" + }, + { + "confidence": 1.0, + "dst": "loc_east_quay", + "rel": "references", + "src": "post_quay_ledgers" + } + ], + "task_id": "seed_task_19", + "task_type": "fixed_trace" + }, + { + "answer": "user_ivy", + "metadata": { + "difficulty": "high", + "difficulty_level": 3, + "question_id": "high_01", + "support_nodes": 50 + }, + "question": "Lantern to Glass Harbor handoff: identify the user behind alias_orchidfox after combining Lantern logistics, Dockyard links, and Atlas Freight bridge evidence.", + "supporting_edges": [ + { + "confidence": 1.0, + "dst": "user_ivy", + "rel": "alias_of", + "src": "alias_orchidfox" + }, + { + "confidence": 1.0, + "dst": "post_midnight_manifest", + "rel": "authored_post", + "src": "alias_orchidfox" + }, + { + "confidence": 1.0, + "dst": "loc_dockyard17", + "rel": "references", + "src": "post_midnight_manifest" + }, + { + "confidence": 1.0, + "dst": "event_project_lantern", + "rel": "references", + "src": "post_midnight_manifest" + }, + { + "confidence": 1.0, + "dst": "thr_supply_leak", + "rel": "authored_thread", + "src": "user_diya" + }, + { + "confidence": 1.0, + "dst": "event_project_lantern", + "rel": "discusses", + "src": "thr_supply_leak" + }, + { + "confidence": 1.0, + "dst": "org_northbridge_logistics", + "rel": "references", + "src": "thr_supply_leak" + }, + { + "confidence": 1.0, + "dst": "org_kestrel_works", + "rel": "works_at", + "src": "user_ivy" + }, + { + "confidence": 1.0, + "dst": "org_northbridge_logistics", + "rel": "works_at", + "src": "user_bharat" + }, + { + "confidence": 1.0, + "dst": "org_northbridge_logistics", + "rel": "works_at", + "src": "user_hiro" + }, + { + "confidence": 1.0, + "dst": "org_atlas_freight", + "rel": "works_at", + "src": "user_kian" + }, + { + "confidence": 1.0, + "dst": "org_atlas_freight", + "rel": "works_at", + "src": "user_omar" + }, + { + "confidence": 1.0, + "dst": "loc_rivergate", + "rel": "located_in", + "src": "user_ivy" + }, + { + "confidence": 1.0, + "dst": "loc_dockyard17", + "rel": "located_in", + "src": "user_bharat" + }, + { + "confidence": 1.0, + "dst": "loc_dockyard17", + "rel": "located_in", + "src": "user_hiro" + }, + { + "confidence": 1.0, + "dst": "loc_east_quay", + "rel": "located_in", + "src": "user_kian" + }, + { + "confidence": 1.0, + "dst": "loc_east_quay", + "rel": "located_in", + "src": "user_omar" + }, + { + "confidence": 1.0, + "dst": "loc_dockyard17", + "rel": "operates_in", + "src": "org_northbridge_logistics" + }, + { + "confidence": 1.0, + "dst": "loc_rivergate", + "rel": "operates_in", + "src": "org_kestrel_works" + }, + { + "confidence": 1.0, + "dst": "loc_east_quay", + "rel": "operates_in", + "src": "org_atlas_freight" + }, + { + "confidence": 0.95, + "dst": "user_bharat", + "rel": "connected_to", + "src": "user_ivy" + }, + { + "confidence": 0.95, + "dst": "user_hiro", + "rel": "connected_to", + "src": "user_bharat" + }, + { + "confidence": 0.92, + "dst": "user_faris", + "rel": "connected_to", + "src": "user_hiro" + }, + { + "confidence": 0.86, + "dst": "user_elin", + "rel": "connected_to", + "src": "user_ivy" + }, + { + "confidence": 0.93, + "dst": "user_omar", + "rel": "connected_to", + "src": "user_kian" + }, + { + "confidence": 0.9, + "dst": "user_mika", + "rel": "connected_to", + "src": "user_omar" + }, + { + "confidence": 0.9, + "dst": "event_project_lantern", + "rel": "collaborates_on", + "src": "user_bharat" + }, + { + "confidence": 0.9, + "dst": "event_project_lantern", + "rel": "collaborates_on", + "src": "user_hiro" + }, + { + "confidence": 0.9, + "dst": "event_project_lantern", + "rel": "collaborates_on", + "src": "user_faris" + }, + { + "confidence": 0.9, + "dst": "event_black_kite", + "rel": "collaborates_on", + "src": "user_ivy" + }, + { + "confidence": 0.9, + "dst": "event_glass_harbor", + "rel": "collaborates_on", + "src": "user_kian" + }, + { + "confidence": 0.9, + "dst": "event_glass_harbor", + "rel": "collaborates_on", + "src": "user_omar" + }, + { + "confidence": 0.9, + "dst": "event_glass_harbor", + "rel": "monitors", + "src": "user_priya" + }, + { + "confidence": 1.0, + "dst": "thr_quiet_manifest", + "rel": "authored_thread", + "src": "user_kian" + }, + { + "confidence": 1.0, + "dst": "event_glass_harbor", + "rel": "discusses", + "src": "thr_quiet_manifest" + }, + { + "confidence": 1.0, + "dst": "org_atlas_freight", + "rel": "references", + "src": "thr_quiet_manifest" + }, + { + "confidence": 1.0, + "dst": "post_quay_ledgers", + "rel": "authored_post", + "src": "alias_lanternmoth" + }, + { + "confidence": 1.0, + "dst": "loc_east_quay", + "rel": "references", + "src": "post_quay_ledgers" + }, + { + "confidence": 1.0, + "dst": "event_glass_harbor", + "rel": "references", + "src": "post_quay_ledgers" + }, + { + "confidence": 0.77, + "dst": "event_glass_harbor", + "rel": "connected_to", + "src": "event_project_lantern" + }, + { + "confidence": 0.77, + "dst": "org_northbridge_logistics", + "rel": "connected_to", + "src": "org_atlas_freight" + }, + { + "confidence": 1.0, + "dst": "user_kian", + "rel": "alias_of", + "src": "alias_lanternmoth" + }, + { + "confidence": 1.0, + "dst": "user_bharat", + "rel": "alias_of", + "src": "alias_steelquill" + }, + { + "confidence": 1.0, + "dst": "user_hiro", + "rel": "alias_of", + "src": "alias_docksparrow" + }, + { + "confidence": 1.0, + "dst": "user_diya", + "rel": "alias_of", + "src": "alias_monsoonbyte" + }, + { + "confidence": 1.0, + "dst": "user_faris", + "rel": "alias_of", + "src": "alias_nightrelay" + }, + { + "confidence": 1.0, + "dst": "user_elin", + "rel": "alias_of", + "src": "alias_mapleghost" + }, + { + "confidence": 1.0, + "dst": "user_cyrus", + "rel": "alias_of", + "src": "alias_quartzlotus" + }, + { + "confidence": 1.0, + "dst": "user_nora", + "rel": "alias_of", + "src": "alias_emberglass" + }, + { + "confidence": 1.0, + "dst": "user_mika", + "rel": "alias_of", + "src": "alias_basinraven" + }, + { + "confidence": 1.0, + "dst": "user_soren", + "rel": "alias_of", + "src": "alias_tideshard" + }, + { + "confidence": 1.0, + "dst": "user_priya", + "rel": "alias_of", + "src": "alias_hollowsignal" + }, + { + "confidence": 1.0, + "dst": "user_omar", + "rel": "alias_of", + "src": "alias_ironwhisper" + }, + { + "confidence": 1.0, + "dst": "user_rhea", + "rel": "alias_of", + "src": "alias_cinderveil" + }, + { + "confidence": 1.0, + "dst": "user_tara", + "rel": "alias_of", + "src": "alias_sablekeel" + }, + { + "confidence": 1.0, + "dst": "user_leena", + "rel": "alias_of", + "src": "alias_frostledger" + }, + { + "confidence": 1.0, + "dst": "org_helios_labs", + "rel": "works_at", + "src": "user_aria" + }, + { + "confidence": 1.0, + "dst": "loc_sector9", + "rel": "located_in", + "src": "user_aria" + }, + { + "confidence": 1.0, + "dst": "org_apex_dynamics", + "rel": "works_at", + "src": "user_cyrus" + }, + { + "confidence": 1.0, + "dst": "loc_old_town", + "rel": "located_in", + "src": "user_cyrus" + } + ], + "task_id": "seed_task_20", + "task_type": "fixed_trace" + }, + { + "answer": "user_mika", + "metadata": { + "difficulty": "high", + "difficulty_level": 3, + "question_id": "high_02", + "support_nodes": 50 + }, + "question": "North Basin to Foundry Row escalation: which user behind alias_basinraven anchors the Iron Wharf side before the Emberline handoff?", + "supporting_edges": [ + { + "confidence": 1.0, + "dst": "user_mika", + "rel": "alias_of", + "src": "alias_basinraven" + }, + { + "confidence": 1.0, + "dst": "post_customs_tag", + "rel": "authored_post", + "src": "alias_basinraven" + }, + { + "confidence": 1.0, + "dst": "loc_north_basin", + "rel": "references", + "src": "post_customs_tag" + }, + { + "confidence": 1.0, + "dst": "event_iron_wharf", + "rel": "references", + "src": "post_customs_tag" + }, + { + "confidence": 1.0, + "dst": "thr_customs_breach", + "rel": "authored_thread", + "src": "user_mika" + }, + { + "confidence": 1.0, + "dst": "event_iron_wharf", + "rel": "discusses", + "src": "thr_customs_breach" + }, + { + "confidence": 1.0, + "dst": "org_orion_customs", + "rel": "references", + "src": "thr_customs_breach" + }, + { + "confidence": 1.0, + "dst": "thr_basin_shift", + "rel": "authored_thread", + "src": "user_quinn" + }, + { + "confidence": 1.0, + "dst": "event_amber_veil", + "rel": "discusses", + "src": "thr_basin_shift" + }, + { + "confidence": 1.0, + "dst": "loc_north_basin", + "rel": "references", + "src": "thr_basin_shift" + }, + { + "confidence": 1.0, + "dst": "thr_foundry_watch", + "rel": "authored_thread", + "src": "user_nora" + }, + { + "confidence": 1.0, + "dst": "event_ember_tide", + "rel": "discusses", + "src": "thr_foundry_watch" + }, + { + "confidence": 1.0, + "dst": "org_emberline_security", + "rel": "references", + "src": "thr_foundry_watch" + }, + { + "confidence": 1.0, + "dst": "org_orion_customs", + "rel": "works_at", + "src": "user_mika" + }, + { + "confidence": 1.0, + "dst": "org_orion_customs", + "rel": "works_at", + "src": "user_quinn" + }, + { + "confidence": 1.0, + "dst": "org_emberline_security", + "rel": "works_at", + "src": "user_nora" + }, + { + "confidence": 1.0, + "dst": "org_emberline_security", + "rel": "works_at", + "src": "user_rhea" + }, + { + "confidence": 1.0, + "dst": "loc_north_basin", + "rel": "located_in", + "src": "user_mika" + }, + { + "confidence": 1.0, + "dst": "loc_north_basin", + "rel": "located_in", + "src": "user_quinn" + }, + { + "confidence": 1.0, + "dst": "loc_foundry_row", + "rel": "located_in", + "src": "user_nora" + }, + { + "confidence": 1.0, + "dst": "loc_foundry_row", + "rel": "located_in", + "src": "user_rhea" + }, + { + "confidence": 1.0, + "dst": "loc_north_basin", + "rel": "operates_in", + "src": "org_orion_customs" + }, + { + "confidence": 1.0, + "dst": "loc_foundry_row", + "rel": "operates_in", + "src": "org_emberline_security" + }, + { + "confidence": 0.89, + "dst": "user_quinn", + "rel": "connected_to", + "src": "user_mika" + }, + { + "confidence": 0.88, + "dst": "user_nora", + "rel": "connected_to", + "src": "user_quinn" + }, + { + "confidence": 0.87, + "dst": "user_rhea", + "rel": "connected_to", + "src": "user_nora" + }, + { + "confidence": 0.9, + "dst": "event_iron_wharf", + "rel": "collaborates_on", + "src": "user_mika" + }, + { + "confidence": 0.9, + "dst": "event_iron_wharf", + "rel": "collaborates_on", + "src": "user_quinn" + }, + { + "confidence": 0.9, + "dst": "event_amber_veil", + "rel": "investigates", + "src": "user_nora" + }, + { + "confidence": 0.9, + "dst": "event_ember_tide", + "rel": "collaborates_on", + "src": "user_rhea" + }, + { + "confidence": 1.0, + "dst": "post_basin_photo", + "rel": "authored_post", + "src": "alias_emberglass" + }, + { + "confidence": 1.0, + "dst": "loc_foundry_row", + "rel": "references", + "src": "post_basin_photo" + }, + { + "confidence": 1.0, + "dst": "event_amber_veil", + "rel": "references", + "src": "post_basin_photo" + }, + { + "confidence": 1.0, + "dst": "post_foundry_map", + "rel": "authored_post", + "src": "alias_cinderveil" + }, + { + "confidence": 1.0, + "dst": "loc_foundry_row", + "rel": "references", + "src": "post_foundry_map" + }, + { + "confidence": 1.0, + "dst": "event_ember_tide", + "rel": "references", + "src": "post_foundry_map" + }, + { + "confidence": 0.77, + "dst": "event_amber_veil", + "rel": "connected_to", + "src": "event_black_kite" + }, + { + "confidence": 0.77, + "dst": "event_ghost_signal", + "rel": "connected_to", + "src": "event_ember_tide" + }, + { + "confidence": 0.77, + "dst": "org_emberline_security", + "rel": "connected_to", + "src": "org_orion_customs" + }, + { + "confidence": 1.0, + "dst": "user_nora", + "rel": "alias_of", + "src": "alias_emberglass" + }, + { + "confidence": 1.0, + "dst": "user_rhea", + "rel": "alias_of", + "src": "alias_cinderveil" + }, + { + "confidence": 0.82, + "dst": "user_nora", + "rel": "connected_to", + "src": "user_priya" + }, + { + "confidence": 0.8, + "dst": "user_bharat", + "rel": "connected_to", + "src": "user_kian" + }, + { + "confidence": 1.0, + "dst": "user_ivy", + "rel": "alias_of", + "src": "alias_orchidfox" + }, + { + "confidence": 1.0, + "dst": "user_bharat", + "rel": "alias_of", + "src": "alias_steelquill" + }, + { + "confidence": 1.0, + "dst": "user_diya", + "rel": "alias_of", + "src": "alias_monsoonbyte" + }, + { + "confidence": 1.0, + "dst": "user_faris", + "rel": "alias_of", + "src": "alias_nightrelay" + }, + { + "confidence": 1.0, + "dst": "user_elin", + "rel": "alias_of", + "src": "alias_mapleghost" + }, + { + "confidence": 1.0, + "dst": "user_hiro", + "rel": "alias_of", + "src": "alias_docksparrow" + }, + { + "confidence": 1.0, + "dst": "user_cyrus", + "rel": "alias_of", + "src": "alias_quartzlotus" + }, + { + "confidence": 1.0, + "dst": "user_soren", + "rel": "alias_of", + "src": "alias_tideshard" + }, + { + "confidence": 1.0, + "dst": "user_priya", + "rel": "alias_of", + "src": "alias_hollowsignal" + }, + { + "confidence": 1.0, + "dst": "user_omar", + "rel": "alias_of", + "src": "alias_ironwhisper" + }, + { + "confidence": 1.0, + "dst": "user_tara", + "rel": "alias_of", + "src": "alias_sablekeel" + }, + { + "confidence": 1.0, + "dst": "user_kian", + "rel": "alias_of", + "src": "alias_lanternmoth" + }, + { + "confidence": 1.0, + "dst": "user_leena", + "rel": "alias_of", + "src": "alias_frostledger" + }, + { + "confidence": 1.0, + "dst": "org_helios_labs", + "rel": "works_at", + "src": "user_aria" + } + ], + "task_id": "seed_task_21", + "task_type": "fixed_trace" + }, + { + "answer": "user_soren", + "metadata": { + "difficulty": "high", + "difficulty_level": 3, + "question_id": "high_03", + "support_nodes": 50 + }, + "question": "Harborlight ghost-signal relay: identify the user behind alias_tideshard at the Harborlight / Tidewatch junction.", + "supporting_edges": [ + { + "confidence": 1.0, + "dst": "user_soren", + "rel": "alias_of", + "src": "alias_tideshard" + }, + { + "confidence": 1.0, + "dst": "post_hull_signal", + "rel": "authored_post", + "src": "alias_tideshard" + }, + { + "confidence": 1.0, + "dst": "loc_uplink_yard", + "rel": "references", + "src": "post_hull_signal" + }, + { + "confidence": 1.0, + "dst": "event_ghost_signal", + "rel": "references", + "src": "post_hull_signal" + }, + { + "confidence": 1.0, + "dst": "user_tara", + "rel": "alias_of", + "src": "alias_sablekeel" + }, + { + "confidence": 1.0, + "dst": "post_uplink_note", + "rel": "authored_post", + "src": "alias_sablekeel" + }, + { + "confidence": 1.0, + "dst": "loc_uplink_yard", + "rel": "references", + "src": "post_uplink_note" + }, + { + "confidence": 1.0, + "dst": "org_harborlight_transit", + "rel": "references", + "src": "post_uplink_note" + }, + { + "confidence": 1.0, + "dst": "thr_uplink_route", + "rel": "authored_thread", + "src": "user_soren" + }, + { + "confidence": 1.0, + "dst": "event_ghost_signal", + "rel": "discusses", + "src": "thr_uplink_route" + }, + { + "confidence": 1.0, + "dst": "org_harborlight_transit", + "rel": "references", + "src": "thr_uplink_route" + }, + { + "confidence": 1.0, + "dst": "thr_ghost_signal_net", + "rel": "authored_thread", + "src": "user_tara" + }, + { + "confidence": 1.0, + "dst": "event_ghost_signal", + "rel": "discusses", + "src": "thr_ghost_signal_net" + }, + { + "confidence": 1.0, + "dst": "loc_uplink_yard", + "rel": "references", + "src": "thr_ghost_signal_net" + }, + { + "confidence": 1.0, + "dst": "org_harborlight_transit", + "rel": "works_at", + "src": "user_soren" + }, + { + "confidence": 1.0, + "dst": "org_harborlight_transit", + "rel": "works_at", + "src": "user_tara" + }, + { + "confidence": 1.0, + "dst": "org_tidewatch_ops", + "rel": "works_at", + "src": "user_faris" + }, + { + "confidence": 1.0, + "dst": "loc_uplink_yard", + "rel": "located_in", + "src": "user_soren" + }, + { + "confidence": 1.0, + "dst": "loc_uplink_yard", + "rel": "located_in", + "src": "user_tara" + }, + { + "confidence": 1.0, + "dst": "loc_rivergate", + "rel": "located_in", + "src": "user_faris" + }, + { + "confidence": 1.0, + "dst": "loc_uplink_yard", + "rel": "operates_in", + "src": "org_harborlight_transit" + }, + { + "confidence": 1.0, + "dst": "loc_rivergate", + "rel": "operates_in", + "src": "org_tidewatch_ops" + }, + { + "confidence": 0.86, + "dst": "user_soren", + "rel": "connected_to", + "src": "user_rhea" + }, + { + "confidence": 0.86, + "dst": "user_tara", + "rel": "connected_to", + "src": "user_soren" + }, + { + "confidence": 0.84, + "dst": "user_kian", + "rel": "connected_to", + "src": "user_tara" + }, + { + "confidence": 0.79, + "dst": "user_faris", + "rel": "connected_to", + "src": "user_soren" + }, + { + "confidence": 0.9, + "dst": "event_project_lantern", + "rel": "collaborates_on", + "src": "user_faris" + }, + { + "confidence": 0.9, + "dst": "event_ghost_signal", + "rel": "collaborates_on", + "src": "user_soren" + }, + { + "confidence": 0.9, + "dst": "event_ghost_signal", + "rel": "reports_on", + "src": "user_tara" + }, + { + "confidence": 1.0, + "dst": "post_sat_phone_ping", + "rel": "authored_post", + "src": "alias_nightrelay" + }, + { + "confidence": 1.0, + "dst": "loc_rivergate", + "rel": "references", + "src": "post_sat_phone_ping" + }, + { + "confidence": 1.0, + "dst": "event_project_lantern", + "rel": "references", + "src": "post_sat_phone_ping" + }, + { + "confidence": 1.0, + "dst": "thr_supply_leak", + "rel": "authored_thread", + "src": "user_diya" + }, + { + "confidence": 1.0, + "dst": "event_project_lantern", + "rel": "discusses", + "src": "thr_supply_leak" + }, + { + "confidence": 0.9, + "dst": "event_project_lantern", + "rel": "collaborates_on", + "src": "user_bharat" + }, + { + "confidence": 0.9, + "dst": "event_project_lantern", + "rel": "collaborates_on", + "src": "user_hiro" + }, + { + "confidence": 0.77, + "dst": "event_ghost_signal", + "rel": "connected_to", + "src": "event_ember_tide" + }, + { + "confidence": 0.77, + "dst": "org_tidewatch_ops", + "rel": "connected_to", + "src": "org_harborlight_transit" + }, + { + "confidence": 1.0, + "dst": "user_faris", + "rel": "alias_of", + "src": "alias_nightrelay" + }, + { + "confidence": 1.0, + "dst": "user_ivy", + "rel": "alias_of", + "src": "alias_orchidfox" + }, + { + "confidence": 1.0, + "dst": "user_bharat", + "rel": "alias_of", + "src": "alias_steelquill" + }, + { + "confidence": 1.0, + "dst": "user_diya", + "rel": "alias_of", + "src": "alias_monsoonbyte" + }, + { + "confidence": 1.0, + "dst": "user_elin", + "rel": "alias_of", + "src": "alias_mapleghost" + }, + { + "confidence": 1.0, + "dst": "user_hiro", + "rel": "alias_of", + "src": "alias_docksparrow" + }, + { + "confidence": 1.0, + "dst": "user_cyrus", + "rel": "alias_of", + "src": "alias_quartzlotus" + }, + { + "confidence": 1.0, + "dst": "user_nora", + "rel": "alias_of", + "src": "alias_emberglass" + }, + { + "confidence": 1.0, + "dst": "user_mika", + "rel": "alias_of", + "src": "alias_basinraven" + }, + { + "confidence": 1.0, + "dst": "user_priya", + "rel": "alias_of", + "src": "alias_hollowsignal" + }, + { + "confidence": 1.0, + "dst": "user_omar", + "rel": "alias_of", + "src": "alias_ironwhisper" + }, + { + "confidence": 1.0, + "dst": "user_rhea", + "rel": "alias_of", + "src": "alias_cinderveil" + }, + { + "confidence": 1.0, + "dst": "user_kian", + "rel": "alias_of", + "src": "alias_lanternmoth" + }, + { + "confidence": 1.0, + "dst": "user_leena", + "rel": "alias_of", + "src": "alias_frostledger" + }, + { + "confidence": 1.0, + "dst": "org_helios_labs", + "rel": "works_at", + "src": "user_aria" + }, + { + "confidence": 1.0, + "dst": "loc_sector9", + "rel": "located_in", + "src": "user_aria" + }, + { + "confidence": 1.0, + "dst": "org_northbridge_logistics", + "rel": "works_at", + "src": "user_bharat" + }, + { + "confidence": 1.0, + "dst": "loc_dockyard17", + "rel": "located_in", + "src": "user_bharat" + } + ], + "task_id": "seed_task_22", + "task_type": "fixed_trace" + }, + { + "answer": "user_diya", + "metadata": { + "difficulty": "high", + "difficulty_level": 3, + "question_id": "high_04", + "support_nodes": 50 + }, + "question": "Blueharbor to Black Kite to Lantern overlap: which user is the Blueharbor origin behind alias_monsoonbyte?", + "supporting_edges": [ + { + "confidence": 1.0, + "dst": "user_diya", + "rel": "alias_of", + "src": "alias_monsoonbyte" + }, + { + "confidence": 1.0, + "dst": "post_drone_parts", + "rel": "authored_post", + "src": "alias_monsoonbyte" + }, + { + "confidence": 1.0, + "dst": "event_black_kite", + "rel": "references", + "src": "post_drone_parts" + }, + { + "confidence": 1.0, + "dst": "org_kestrel_works", + "rel": "references", + "src": "post_drone_parts" + }, + { + "confidence": 1.0, + "dst": "thr_port_audit", + "rel": "authored_thread", + "src": "user_jules" + }, + { + "confidence": 1.0, + "dst": "event_black_kite", + "rel": "discusses", + "src": "thr_port_audit" + }, + { + "confidence": 1.0, + "dst": "org_kestrel_works", + "rel": "references", + "src": "thr_port_audit" + }, + { + "confidence": 1.0, + "dst": "thr_supply_leak", + "rel": "authored_thread", + "src": "user_diya" + }, + { + "confidence": 1.0, + "dst": "event_project_lantern", + "rel": "discusses", + "src": "thr_supply_leak" + }, + { + "confidence": 1.0, + "dst": "org_northbridge_logistics", + "rel": "references", + "src": "thr_supply_leak" + }, + { + "confidence": 1.0, + "dst": "org_blueharbor_media", + "rel": "works_at", + "src": "user_diya" + }, + { + "confidence": 1.0, + "dst": "org_blueharbor_media", + "rel": "works_at", + "src": "user_jules" + }, + { + "confidence": 1.0, + "dst": "org_kestrel_works", + "rel": "works_at", + "src": "user_ivy" + }, + { + "confidence": 1.0, + "dst": "org_apex_dynamics", + "rel": "works_at", + "src": "user_cyrus" + }, + { + "confidence": 1.0, + "dst": "loc_old_town", + "rel": "located_in", + "src": "user_diya" + }, + { + "confidence": 1.0, + "dst": "loc_old_town", + "rel": "located_in", + "src": "user_jules" + }, + { + "confidence": 1.0, + "dst": "loc_rivergate", + "rel": "located_in", + "src": "user_ivy" + }, + { + "confidence": 1.0, + "dst": "loc_old_town", + "rel": "located_in", + "src": "user_cyrus" + }, + { + "confidence": 1.0, + "dst": "loc_old_town", + "rel": "operates_in", + "src": "org_blueharbor_media" + }, + { + "confidence": 1.0, + "dst": "loc_rivergate", + "rel": "operates_in", + "src": "org_kestrel_works" + }, + { + "confidence": 1.0, + "dst": "loc_old_town", + "rel": "operates_in", + "src": "org_apex_dynamics" + }, + { + "confidence": 0.9, + "dst": "user_diya", + "rel": "connected_to", + "src": "user_faris" + }, + { + "confidence": 0.83, + "dst": "user_gita", + "rel": "connected_to", + "src": "user_cyrus" + }, + { + "confidence": 0.82, + "dst": "user_jules", + "rel": "connected_to", + "src": "user_gita" + }, + { + "confidence": 0.86, + "dst": "user_elin", + "rel": "connected_to", + "src": "user_ivy" + }, + { + "confidence": 0.9, + "dst": "event_project_lantern", + "rel": "investigates", + "src": "user_diya" + }, + { + "confidence": 0.9, + "dst": "event_black_kite", + "rel": "collaborates_on", + "src": "user_ivy" + }, + { + "confidence": 0.9, + "dst": "event_black_kite", + "rel": "collaborates_on", + "src": "user_cyrus" + }, + { + "confidence": 0.9, + "dst": "event_black_kite", + "rel": "investigates", + "src": "user_elin" + }, + { + "confidence": 0.9, + "dst": "event_black_kite", + "rel": "reports_on", + "src": "user_jules" + }, + { + "confidence": 1.0, + "dst": "user_ivy", + "rel": "alias_of", + "src": "alias_orchidfox" + }, + { + "confidence": 1.0, + "dst": "post_midnight_manifest", + "rel": "authored_post", + "src": "alias_orchidfox" + }, + { + "confidence": 1.0, + "dst": "event_project_lantern", + "rel": "references", + "src": "post_midnight_manifest" + }, + { + "confidence": 0.77, + "dst": "event_glass_harbor", + "rel": "connected_to", + "src": "event_project_lantern" + }, + { + "confidence": 0.77, + "dst": "event_amber_veil", + "rel": "connected_to", + "src": "event_black_kite" + }, + { + "confidence": 1.0, + "dst": "thr_relay_map", + "rel": "authored_thread", + "src": "user_leena" + }, + { + "confidence": 1.0, + "dst": "event_project_lantern", + "rel": "discusses", + "src": "thr_relay_map" + }, + { + "confidence": 1.0, + "dst": "org_sunmesh_analytics", + "rel": "works_at", + "src": "user_leena" + }, + { + "confidence": 0.9, + "dst": "event_project_lantern", + "rel": "monitors", + "src": "user_leena" + }, + { + "confidence": 1.0, + "dst": "user_bharat", + "rel": "alias_of", + "src": "alias_steelquill" + }, + { + "confidence": 1.0, + "dst": "user_faris", + "rel": "alias_of", + "src": "alias_nightrelay" + }, + { + "confidence": 1.0, + "dst": "user_elin", + "rel": "alias_of", + "src": "alias_mapleghost" + }, + { + "confidence": 1.0, + "dst": "user_hiro", + "rel": "alias_of", + "src": "alias_docksparrow" + }, + { + "confidence": 1.0, + "dst": "user_cyrus", + "rel": "alias_of", + "src": "alias_quartzlotus" + }, + { + "confidence": 1.0, + "dst": "user_nora", + "rel": "alias_of", + "src": "alias_emberglass" + }, + { + "confidence": 1.0, + "dst": "user_mika", + "rel": "alias_of", + "src": "alias_basinraven" + }, + { + "confidence": 1.0, + "dst": "user_soren", + "rel": "alias_of", + "src": "alias_tideshard" + }, + { + "confidence": 1.0, + "dst": "user_priya", + "rel": "alias_of", + "src": "alias_hollowsignal" + }, + { + "confidence": 1.0, + "dst": "user_omar", + "rel": "alias_of", + "src": "alias_ironwhisper" + }, + { + "confidence": 1.0, + "dst": "user_rhea", + "rel": "alias_of", + "src": "alias_cinderveil" + }, + { + "confidence": 1.0, + "dst": "user_tara", + "rel": "alias_of", + "src": "alias_sablekeel" + }, + { + "confidence": 1.0, + "dst": "user_kian", + "rel": "alias_of", + "src": "alias_lanternmoth" + }, + { + "confidence": 1.0, + "dst": "user_leena", + "rel": "alias_of", + "src": "alias_frostledger" + } + ], + "task_id": "seed_task_23", + "task_type": "fixed_trace" + }, + { + "answer": "user_bharat", + "metadata": { + "difficulty": "high", + "difficulty_level": 3, + "question_id": "high_05", + "support_nodes": 50 + }, + "question": "Sector 9 to Dockyard 17 full relay: which user behind alias_steelquill links the Northbridge chain and the Sunmesh monitoring bridge?", + "supporting_edges": [ + { + "confidence": 1.0, + "dst": "user_bharat", + "rel": "alias_of", + "src": "alias_steelquill" + }, + { + "confidence": 1.0, + "dst": "post_relay_schedule", + "rel": "authored_post", + "src": "alias_steelquill" + }, + { + "confidence": 1.0, + "dst": "event_project_lantern", + "rel": "references", + "src": "post_relay_schedule" + }, + { + "confidence": 1.0, + "dst": "loc_sector9", + "rel": "references", + "src": "post_relay_schedule" + }, + { + "confidence": 1.0, + "dst": "user_leena", + "rel": "alias_of", + "src": "alias_frostledger" + }, + { + "confidence": 1.0, + "dst": "post_lantern_route", + "rel": "authored_post", + "src": "alias_frostledger" + }, + { + "confidence": 1.0, + "dst": "event_project_lantern", + "rel": "references", + "src": "post_lantern_route" + }, + { + "confidence": 1.0, + "dst": "org_sunmesh_analytics", + "rel": "references", + "src": "post_lantern_route" + }, + { + "confidence": 1.0, + "dst": "thr_relay_map", + "rel": "authored_thread", + "src": "user_leena" + }, + { + "confidence": 1.0, + "dst": "event_project_lantern", + "rel": "discusses", + "src": "thr_relay_map" + }, + { + "confidence": 1.0, + "dst": "org_sunmesh_analytics", + "rel": "references", + "src": "thr_relay_map" + }, + { + "confidence": 1.0, + "dst": "thr_supply_leak", + "rel": "authored_thread", + "src": "user_diya" + }, + { + "confidence": 1.0, + "dst": "event_project_lantern", + "rel": "discusses", + "src": "thr_supply_leak" + }, + { + "confidence": 1.0, + "dst": "org_northbridge_logistics", + "rel": "references", + "src": "thr_supply_leak" + }, + { + "confidence": 1.0, + "dst": "org_northbridge_logistics", + "rel": "works_at", + "src": "user_bharat" + }, + { + "confidence": 1.0, + "dst": "org_northbridge_logistics", + "rel": "works_at", + "src": "user_hiro" + }, + { + "confidence": 1.0, + "dst": "org_sunmesh_analytics", + "rel": "works_at", + "src": "user_leena" + }, + { + "confidence": 1.0, + "dst": "org_sunmesh_analytics", + "rel": "works_at", + "src": "user_priya" + }, + { + "confidence": 1.0, + "dst": "org_helios_labs", + "rel": "works_at", + "src": "user_aria" + }, + { + "confidence": 1.0, + "dst": "loc_dockyard17", + "rel": "located_in", + "src": "user_bharat" + }, + { + "confidence": 1.0, + "dst": "loc_dockyard17", + "rel": "located_in", + "src": "user_hiro" + }, + { + "confidence": 1.0, + "dst": "loc_sector9", + "rel": "located_in", + "src": "user_leena" + }, + { + "confidence": 1.0, + "dst": "loc_sector9", + "rel": "located_in", + "src": "user_priya" + }, + { + "confidence": 1.0, + "dst": "loc_sector9", + "rel": "located_in", + "src": "user_aria" + }, + { + "confidence": 1.0, + "dst": "loc_dockyard17", + "rel": "operates_in", + "src": "org_northbridge_logistics" + }, + { + "confidence": 1.0, + "dst": "loc_sector9", + "rel": "operates_in", + "src": "org_sunmesh_analytics" + }, + { + "confidence": 1.0, + "dst": "loc_sector9", + "rel": "operates_in", + "src": "org_helios_labs" + }, + { + "confidence": 0.95, + "dst": "user_bharat", + "rel": "connected_to", + "src": "user_ivy" + }, + { + "confidence": 0.95, + "dst": "user_hiro", + "rel": "connected_to", + "src": "user_bharat" + }, + { + "confidence": 0.89, + "dst": "user_elin", + "rel": "connected_to", + "src": "user_diya" + }, + { + "confidence": 0.87, + "dst": "user_aria", + "rel": "connected_to", + "src": "user_elin" + }, + { + "confidence": 0.84, + "dst": "user_cyrus", + "rel": "connected_to", + "src": "user_aria" + }, + { + "confidence": 0.91, + "dst": "user_leena", + "rel": "connected_to", + "src": "user_priya" + }, + { + "confidence": 0.83, + "dst": "user_aria", + "rel": "connected_to", + "src": "user_leena" + }, + { + "confidence": 0.9, + "dst": "event_project_lantern", + "rel": "collaborates_on", + "src": "user_bharat" + }, + { + "confidence": 0.9, + "dst": "event_project_lantern", + "rel": "collaborates_on", + "src": "user_hiro" + }, + { + "confidence": 0.9, + "dst": "event_project_lantern", + "rel": "monitors", + "src": "user_leena" + }, + { + "confidence": 0.77, + "dst": "event_glass_harbor", + "rel": "connected_to", + "src": "event_project_lantern" + }, + { + "confidence": 0.77, + "dst": "org_northbridge_logistics", + "rel": "connected_to", + "src": "org_atlas_freight" + }, + { + "confidence": 1.0, + "dst": "user_hiro", + "rel": "alias_of", + "src": "alias_docksparrow" + }, + { + "confidence": 1.0, + "dst": "user_elin", + "rel": "alias_of", + "src": "alias_mapleghost" + }, + { + "confidence": 1.0, + "dst": "user_priya", + "rel": "alias_of", + "src": "alias_hollowsignal" + }, + { + "confidence": 1.0, + "dst": "user_ivy", + "rel": "alias_of", + "src": "alias_orchidfox" + }, + { + "confidence": 1.0, + "dst": "user_diya", + "rel": "alias_of", + "src": "alias_monsoonbyte" + }, + { + "confidence": 1.0, + "dst": "user_faris", + "rel": "alias_of", + "src": "alias_nightrelay" + }, + { + "confidence": 1.0, + "dst": "user_cyrus", + "rel": "alias_of", + "src": "alias_quartzlotus" + }, + { + "confidence": 1.0, + "dst": "user_nora", + "rel": "alias_of", + "src": "alias_emberglass" + }, + { + "confidence": 1.0, + "dst": "user_mika", + "rel": "alias_of", + "src": "alias_basinraven" + }, + { + "confidence": 1.0, + "dst": "user_soren", + "rel": "alias_of", + "src": "alias_tideshard" + }, + { + "confidence": 1.0, + "dst": "user_omar", + "rel": "alias_of", + "src": "alias_ironwhisper" + }, + { + "confidence": 1.0, + "dst": "user_rhea", + "rel": "alias_of", + "src": "alias_cinderveil" + }, + { + "confidence": 1.0, + "dst": "user_tara", + "rel": "alias_of", + "src": "alias_sablekeel" + }, + { + "confidence": 1.0, + "dst": "user_kian", + "rel": "alias_of", + "src": "alias_lanternmoth" + }, + { + "confidence": 1.0, + "dst": "org_apex_dynamics", + "rel": "works_at", + "src": "user_cyrus" + }, + { + "confidence": 1.0, + "dst": "loc_old_town", + "rel": "located_in", + "src": "user_cyrus" + }, + { + "confidence": 1.0, + "dst": "org_blueharbor_media", + "rel": "works_at", + "src": "user_diya" + }, + { + "confidence": 1.0, + "dst": "loc_old_town", + "rel": "located_in", + "src": "user_diya" + }, + { + "confidence": 1.0, + "dst": "org_helios_labs", + "rel": "works_at", + "src": "user_elin" + }, + { + "confidence": 1.0, + "dst": "loc_sector9", + "rel": "located_in", + "src": "user_elin" + }, + { + "confidence": 1.0, + "dst": "org_tidewatch_ops", + "rel": "works_at", + "src": "user_faris" + }, + { + "confidence": 1.0, + "dst": "loc_rivergate", + "rel": "located_in", + "src": "user_faris" + } + ], + "task_id": "seed_task_24", + "task_type": "fixed_trace" + }, + { + "answer": "user_nora", + "metadata": { + "difficulty": "high", + "difficulty_level": 3, + "question_id": "high_06", + "support_nodes": 50 + }, + "question": "Foundry Row, North Basin, and Uplink Yard spread: identify the user behind alias_emberglass before the Harborlight relay takes over.", + "supporting_edges": [ + { + "confidence": 1.0, + "dst": "user_nora", + "rel": "alias_of", + "src": "alias_emberglass" + }, + { + "confidence": 1.0, + "dst": "post_basin_photo", + "rel": "authored_post", + "src": "alias_emberglass" + }, + { + "confidence": 1.0, + "dst": "loc_foundry_row", + "rel": "references", + "src": "post_basin_photo" + }, + { + "confidence": 1.0, + "dst": "event_amber_veil", + "rel": "references", + "src": "post_basin_photo" + }, + { + "confidence": 1.0, + "dst": "user_rhea", + "rel": "alias_of", + "src": "alias_cinderveil" + }, + { + "confidence": 1.0, + "dst": "post_foundry_map", + "rel": "authored_post", + "src": "alias_cinderveil" + }, + { + "confidence": 1.0, + "dst": "loc_foundry_row", + "rel": "references", + "src": "post_foundry_map" + }, + { + "confidence": 1.0, + "dst": "event_ember_tide", + "rel": "references", + "src": "post_foundry_map" + }, + { + "confidence": 1.0, + "dst": "user_tara", + "rel": "alias_of", + "src": "alias_sablekeel" + }, + { + "confidence": 1.0, + "dst": "post_uplink_note", + "rel": "authored_post", + "src": "alias_sablekeel" + }, + { + "confidence": 1.0, + "dst": "loc_uplink_yard", + "rel": "references", + "src": "post_uplink_note" + }, + { + "confidence": 1.0, + "dst": "org_harborlight_transit", + "rel": "references", + "src": "post_uplink_note" + }, + { + "confidence": 1.0, + "dst": "thr_foundry_watch", + "rel": "authored_thread", + "src": "user_nora" + }, + { + "confidence": 1.0, + "dst": "event_ember_tide", + "rel": "discusses", + "src": "thr_foundry_watch" + }, + { + "confidence": 1.0, + "dst": "org_emberline_security", + "rel": "references", + "src": "thr_foundry_watch" + }, + { + "confidence": 1.0, + "dst": "thr_ember_tide_watch", + "rel": "authored_thread", + "src": "user_rhea" + }, + { + "confidence": 1.0, + "dst": "event_ember_tide", + "rel": "discusses", + "src": "thr_ember_tide_watch" + }, + { + "confidence": 1.0, + "dst": "loc_foundry_row", + "rel": "references", + "src": "thr_ember_tide_watch" + }, + { + "confidence": 1.0, + "dst": "thr_uplink_route", + "rel": "authored_thread", + "src": "user_soren" + }, + { + "confidence": 1.0, + "dst": "event_ghost_signal", + "rel": "discusses", + "src": "thr_uplink_route" + }, + { + "confidence": 1.0, + "dst": "org_harborlight_transit", + "rel": "references", + "src": "thr_uplink_route" + }, + { + "confidence": 1.0, + "dst": "org_emberline_security", + "rel": "works_at", + "src": "user_nora" + }, + { + "confidence": 1.0, + "dst": "org_emberline_security", + "rel": "works_at", + "src": "user_rhea" + }, + { + "confidence": 1.0, + "dst": "org_harborlight_transit", + "rel": "works_at", + "src": "user_soren" + }, + { + "confidence": 1.0, + "dst": "org_harborlight_transit", + "rel": "works_at", + "src": "user_tara" + }, + { + "confidence": 1.0, + "dst": "loc_foundry_row", + "rel": "located_in", + "src": "user_nora" + }, + { + "confidence": 1.0, + "dst": "loc_foundry_row", + "rel": "located_in", + "src": "user_rhea" + }, + { + "confidence": 1.0, + "dst": "loc_uplink_yard", + "rel": "located_in", + "src": "user_soren" + }, + { + "confidence": 1.0, + "dst": "loc_uplink_yard", + "rel": "located_in", + "src": "user_tara" + }, + { + "confidence": 1.0, + "dst": "loc_foundry_row", + "rel": "operates_in", + "src": "org_emberline_security" + }, + { + "confidence": 1.0, + "dst": "loc_uplink_yard", + "rel": "operates_in", + "src": "org_harborlight_transit" + }, + { + "confidence": 0.87, + "dst": "user_rhea", + "rel": "connected_to", + "src": "user_nora" + }, + { + "confidence": 0.86, + "dst": "user_soren", + "rel": "connected_to", + "src": "user_rhea" + }, + { + "confidence": 0.86, + "dst": "user_tara", + "rel": "connected_to", + "src": "user_soren" + }, + { + "confidence": 0.9, + "dst": "event_amber_veil", + "rel": "investigates", + "src": "user_nora" + }, + { + "confidence": 0.9, + "dst": "event_ember_tide", + "rel": "collaborates_on", + "src": "user_rhea" + }, + { + "confidence": 0.9, + "dst": "event_ghost_signal", + "rel": "collaborates_on", + "src": "user_soren" + }, + { + "confidence": 0.9, + "dst": "event_ghost_signal", + "rel": "reports_on", + "src": "user_tara" + }, + { + "confidence": 0.77, + "dst": "event_ghost_signal", + "rel": "connected_to", + "src": "event_ember_tide" + }, + { + "confidence": 0.77, + "dst": "org_tidewatch_ops", + "rel": "connected_to", + "src": "org_harborlight_transit" + }, + { + "confidence": 1.0, + "dst": "user_ivy", + "rel": "alias_of", + "src": "alias_orchidfox" + }, + { + "confidence": 1.0, + "dst": "user_bharat", + "rel": "alias_of", + "src": "alias_steelquill" + }, + { + "confidence": 1.0, + "dst": "user_diya", + "rel": "alias_of", + "src": "alias_monsoonbyte" + }, + { + "confidence": 1.0, + "dst": "user_faris", + "rel": "alias_of", + "src": "alias_nightrelay" + }, + { + "confidence": 1.0, + "dst": "user_elin", + "rel": "alias_of", + "src": "alias_mapleghost" + }, + { + "confidence": 1.0, + "dst": "user_hiro", + "rel": "alias_of", + "src": "alias_docksparrow" + }, + { + "confidence": 1.0, + "dst": "user_cyrus", + "rel": "alias_of", + "src": "alias_quartzlotus" + }, + { + "confidence": 1.0, + "dst": "user_mika", + "rel": "alias_of", + "src": "alias_basinraven" + }, + { + "confidence": 1.0, + "dst": "user_soren", + "rel": "alias_of", + "src": "alias_tideshard" + }, + { + "confidence": 1.0, + "dst": "user_priya", + "rel": "alias_of", + "src": "alias_hollowsignal" + }, + { + "confidence": 1.0, + "dst": "user_omar", + "rel": "alias_of", + "src": "alias_ironwhisper" + }, + { + "confidence": 1.0, + "dst": "user_kian", + "rel": "alias_of", + "src": "alias_lanternmoth" + }, + { + "confidence": 1.0, + "dst": "user_leena", + "rel": "alias_of", + "src": "alias_frostledger" + }, + { + "confidence": 1.0, + "dst": "org_helios_labs", + "rel": "works_at", + "src": "user_aria" + }, + { + "confidence": 1.0, + "dst": "loc_sector9", + "rel": "located_in", + "src": "user_aria" + }, + { + "confidence": 1.0, + "dst": "org_northbridge_logistics", + "rel": "works_at", + "src": "user_bharat" + } + ], + "task_id": "seed_task_25", + "task_type": "fixed_trace" + }, + { + "answer": "user_kian", + "metadata": { + "difficulty": "high", + "difficulty_level": 3, + "question_id": "high_07", + "support_nodes": 50 + }, + "question": "Freight and customs bridge: which Atlas Freight user behind alias_lanternmoth connects Glass Harbor with the Northbridge chain?", + "supporting_edges": [ + { + "confidence": 1.0, + "dst": "user_kian", + "rel": "alias_of", + "src": "alias_lanternmoth" + }, + { + "confidence": 1.0, + "dst": "post_quay_ledgers", + "rel": "authored_post", + "src": "alias_lanternmoth" + }, + { + "confidence": 1.0, + "dst": "loc_east_quay", + "rel": "references", + "src": "post_quay_ledgers" + }, + { + "confidence": 1.0, + "dst": "event_glass_harbor", + "rel": "references", + "src": "post_quay_ledgers" + }, + { + "confidence": 1.0, + "dst": "thr_quiet_manifest", + "rel": "authored_thread", + "src": "user_kian" + }, + { + "confidence": 1.0, + "dst": "event_glass_harbor", + "rel": "discusses", + "src": "thr_quiet_manifest" + }, + { + "confidence": 1.0, + "dst": "org_atlas_freight", + "rel": "references", + "src": "thr_quiet_manifest" + }, + { + "confidence": 1.0, + "dst": "org_atlas_freight", + "rel": "works_at", + "src": "user_kian" + }, + { + "confidence": 1.0, + "dst": "org_atlas_freight", + "rel": "works_at", + "src": "user_omar" + }, + { + "confidence": 1.0, + "dst": "org_northbridge_logistics", + "rel": "works_at", + "src": "user_bharat" + }, + { + "confidence": 1.0, + "dst": "org_northbridge_logistics", + "rel": "works_at", + "src": "user_hiro" + }, + { + "confidence": 1.0, + "dst": "loc_east_quay", + "rel": "located_in", + "src": "user_kian" + }, + { + "confidence": 1.0, + "dst": "loc_east_quay", + "rel": "located_in", + "src": "user_omar" + }, + { + "confidence": 1.0, + "dst": "loc_dockyard17", + "rel": "located_in", + "src": "user_bharat" + }, + { + "confidence": 1.0, + "dst": "loc_dockyard17", + "rel": "located_in", + "src": "user_hiro" + }, + { + "confidence": 1.0, + "dst": "loc_east_quay", + "rel": "operates_in", + "src": "org_atlas_freight" + }, + { + "confidence": 1.0, + "dst": "loc_dockyard17", + "rel": "operates_in", + "src": "org_northbridge_logistics" + }, + { + "confidence": 0.93, + "dst": "user_omar", + "rel": "connected_to", + "src": "user_kian" + }, + { + "confidence": 0.9, + "dst": "user_mika", + "rel": "connected_to", + "src": "user_omar" + }, + { + "confidence": 0.8, + "dst": "user_bharat", + "rel": "connected_to", + "src": "user_kian" + }, + { + "confidence": 0.95, + "dst": "user_hiro", + "rel": "connected_to", + "src": "user_bharat" + }, + { + "confidence": 0.9, + "dst": "event_glass_harbor", + "rel": "collaborates_on", + "src": "user_kian" + }, + { + "confidence": 0.9, + "dst": "event_glass_harbor", + "rel": "collaborates_on", + "src": "user_omar" + }, + { + "confidence": 0.9, + "dst": "event_glass_harbor", + "rel": "monitors", + "src": "user_priya" + }, + { + "confidence": 0.9, + "dst": "event_project_lantern", + "rel": "collaborates_on", + "src": "user_bharat" + }, + { + "confidence": 0.9, + "dst": "event_project_lantern", + "rel": "collaborates_on", + "src": "user_hiro" + }, + { + "confidence": 1.0, + "dst": "post_shift_roster", + "rel": "authored_post", + "src": "alias_docksparrow" + }, + { + "confidence": 1.0, + "dst": "loc_dockyard17", + "rel": "references", + "src": "post_shift_roster" + }, + { + "confidence": 1.0, + "dst": "org_northbridge_logistics", + "rel": "references", + "src": "post_shift_roster" + }, + { + "confidence": 1.0, + "dst": "post_midnight_manifest", + "rel": "authored_post", + "src": "alias_orchidfox" + }, + { + "confidence": 1.0, + "dst": "loc_dockyard17", + "rel": "references", + "src": "post_midnight_manifest" + }, + { + "confidence": 1.0, + "dst": "thr_supply_leak", + "rel": "authored_thread", + "src": "user_diya" + }, + { + "confidence": 1.0, + "dst": "org_northbridge_logistics", + "rel": "references", + "src": "thr_supply_leak" + }, + { + "confidence": 0.77, + "dst": "org_northbridge_logistics", + "rel": "connected_to", + "src": "org_atlas_freight" + }, + { + "confidence": 1.0, + "dst": "user_omar", + "rel": "alias_of", + "src": "alias_ironwhisper" + }, + { + "confidence": 1.0, + "dst": "user_bharat", + "rel": "alias_of", + "src": "alias_steelquill" + }, + { + "confidence": 1.0, + "dst": "user_hiro", + "rel": "alias_of", + "src": "alias_docksparrow" + }, + { + "confidence": 1.0, + "dst": "user_ivy", + "rel": "alias_of", + "src": "alias_orchidfox" + }, + { + "confidence": 1.0, + "dst": "user_diya", + "rel": "alias_of", + "src": "alias_monsoonbyte" + }, + { + "confidence": 1.0, + "dst": "user_faris", + "rel": "alias_of", + "src": "alias_nightrelay" + }, + { + "confidence": 1.0, + "dst": "user_elin", + "rel": "alias_of", + "src": "alias_mapleghost" + }, + { + "confidence": 1.0, + "dst": "user_cyrus", + "rel": "alias_of", + "src": "alias_quartzlotus" + }, + { + "confidence": 1.0, + "dst": "user_nora", + "rel": "alias_of", + "src": "alias_emberglass" + }, + { + "confidence": 1.0, + "dst": "user_mika", + "rel": "alias_of", + "src": "alias_basinraven" + }, + { + "confidence": 1.0, + "dst": "user_soren", + "rel": "alias_of", + "src": "alias_tideshard" + }, + { + "confidence": 1.0, + "dst": "user_priya", + "rel": "alias_of", + "src": "alias_hollowsignal" + }, + { + "confidence": 1.0, + "dst": "user_rhea", + "rel": "alias_of", + "src": "alias_cinderveil" + }, + { + "confidence": 1.0, + "dst": "user_tara", + "rel": "alias_of", + "src": "alias_sablekeel" + }, + { + "confidence": 1.0, + "dst": "user_leena", + "rel": "alias_of", + "src": "alias_frostledger" + }, + { + "confidence": 1.0, + "dst": "org_helios_labs", + "rel": "works_at", + "src": "user_aria" + }, + { + "confidence": 1.0, + "dst": "loc_sector9", + "rel": "located_in", + "src": "user_aria" + }, + { + "confidence": 1.0, + "dst": "org_apex_dynamics", + "rel": "works_at", + "src": "user_cyrus" + }, + { + "confidence": 1.0, + "dst": "loc_old_town", + "rel": "located_in", + "src": "user_cyrus" + }, + { + "confidence": 1.0, + "dst": "org_blueharbor_media", + "rel": "works_at", + "src": "user_diya" + }, + { + "confidence": 1.0, + "dst": "loc_old_town", + "rel": "located_in", + "src": "user_diya" + }, + { + "confidence": 1.0, + "dst": "org_helios_labs", + "rel": "works_at", + "src": "user_elin" + }, + { + "confidence": 1.0, + "dst": "loc_sector9", + "rel": "located_in", + "src": "user_elin" + }, + { + "confidence": 1.0, + "dst": "org_tidewatch_ops", + "rel": "works_at", + "src": "user_faris" + } + ], + "task_id": "seed_task_26", + "task_type": "fixed_trace" + }, + { + "answer": "user_cyrus", + "metadata": { + "difficulty": "high", + "difficulty_level": 3, + "question_id": "high_08", + "support_nodes": 50 + }, + "question": "Black Kite, Amber Veil, and Iron Wharf overlap: which user behind alias_quartzlotus is the Apex-side collaborator?", + "supporting_edges": [ + { + "confidence": 1.0, + "dst": "user_cyrus", + "rel": "alias_of", + "src": "alias_quartzlotus" + }, + { + "confidence": 1.0, + "dst": "org_apex_dynamics", + "rel": "works_at", + "src": "user_cyrus" + }, + { + "confidence": 1.0, + "dst": "loc_old_town", + "rel": "located_in", + "src": "user_cyrus" + }, + { + "confidence": 1.0, + "dst": "loc_old_town", + "rel": "operates_in", + "src": "org_apex_dynamics" + }, + { + "confidence": 0.9, + "dst": "event_black_kite", + "rel": "collaborates_on", + "src": "user_cyrus" + }, + { + "confidence": 1.0, + "dst": "thr_port_audit", + "rel": "authored_thread", + "src": "user_jules" + }, + { + "confidence": 1.0, + "dst": "event_black_kite", + "rel": "discusses", + "src": "thr_port_audit" + }, + { + "confidence": 1.0, + "dst": "post_drone_parts", + "rel": "authored_post", + "src": "alias_monsoonbyte" + }, + { + "confidence": 1.0, + "dst": "event_black_kite", + "rel": "references", + "src": "post_drone_parts" + }, + { + "confidence": 0.9, + "dst": "event_amber_veil", + "rel": "investigates", + "src": "user_nora" + }, + { + "confidence": 1.0, + "dst": "thr_basin_shift", + "rel": "authored_thread", + "src": "user_quinn" + }, + { + "confidence": 1.0, + "dst": "event_amber_veil", + "rel": "discusses", + "src": "thr_basin_shift" + }, + { + "confidence": 0.9, + "dst": "event_iron_wharf", + "rel": "collaborates_on", + "src": "user_mika" + }, + { + "confidence": 1.0, + "dst": "thr_customs_breach", + "rel": "authored_thread", + "src": "user_mika" + }, + { + "confidence": 1.0, + "dst": "event_iron_wharf", + "rel": "discusses", + "src": "thr_customs_breach" + }, + { + "confidence": 1.0, + "dst": "org_kestrel_works", + "rel": "works_at", + "src": "user_ivy" + }, + { + "confidence": 1.0, + "dst": "org_emberline_security", + "rel": "works_at", + "src": "user_nora" + }, + { + "confidence": 1.0, + "dst": "org_orion_customs", + "rel": "works_at", + "src": "user_mika" + }, + { + "confidence": 1.0, + "dst": "org_orion_customs", + "rel": "works_at", + "src": "user_quinn" + }, + { + "confidence": 1.0, + "dst": "loc_rivergate", + "rel": "located_in", + "src": "user_ivy" + }, + { + "confidence": 1.0, + "dst": "loc_foundry_row", + "rel": "located_in", + "src": "user_nora" + }, + { + "confidence": 1.0, + "dst": "loc_north_basin", + "rel": "located_in", + "src": "user_mika" + }, + { + "confidence": 1.0, + "dst": "loc_north_basin", + "rel": "located_in", + "src": "user_quinn" + }, + { + "confidence": 1.0, + "dst": "loc_rivergate", + "rel": "operates_in", + "src": "org_kestrel_works" + }, + { + "confidence": 1.0, + "dst": "loc_foundry_row", + "rel": "operates_in", + "src": "org_emberline_security" + }, + { + "confidence": 1.0, + "dst": "loc_north_basin", + "rel": "operates_in", + "src": "org_orion_customs" + }, + { + "confidence": 0.83, + "dst": "user_gita", + "rel": "connected_to", + "src": "user_cyrus" + }, + { + "confidence": 0.86, + "dst": "user_elin", + "rel": "connected_to", + "src": "user_ivy" + }, + { + "confidence": 0.89, + "dst": "user_quinn", + "rel": "connected_to", + "src": "user_mika" + }, + { + "confidence": 0.88, + "dst": "user_nora", + "rel": "connected_to", + "src": "user_quinn" + }, + { + "confidence": 0.87, + "dst": "user_rhea", + "rel": "connected_to", + "src": "user_nora" + }, + { + "confidence": 0.77, + "dst": "event_amber_veil", + "rel": "connected_to", + "src": "event_black_kite" + }, + { + "confidence": 0.77, + "dst": "org_emberline_security", + "rel": "connected_to", + "src": "org_orion_customs" + }, + { + "confidence": 1.0, + "dst": "user_ivy", + "rel": "alias_of", + "src": "alias_orchidfox" + }, + { + "confidence": 1.0, + "dst": "user_mika", + "rel": "alias_of", + "src": "alias_basinraven" + }, + { + "confidence": 1.0, + "dst": "user_nora", + "rel": "alias_of", + "src": "alias_emberglass" + }, + { + "confidence": 1.0, + "dst": "user_bharat", + "rel": "alias_of", + "src": "alias_steelquill" + }, + { + "confidence": 1.0, + "dst": "user_diya", + "rel": "alias_of", + "src": "alias_monsoonbyte" + }, + { + "confidence": 1.0, + "dst": "user_faris", + "rel": "alias_of", + "src": "alias_nightrelay" + }, + { + "confidence": 1.0, + "dst": "user_elin", + "rel": "alias_of", + "src": "alias_mapleghost" + }, + { + "confidence": 1.0, + "dst": "user_hiro", + "rel": "alias_of", + "src": "alias_docksparrow" + }, + { + "confidence": 1.0, + "dst": "user_soren", + "rel": "alias_of", + "src": "alias_tideshard" + }, + { + "confidence": 1.0, + "dst": "user_priya", + "rel": "alias_of", + "src": "alias_hollowsignal" + }, + { + "confidence": 1.0, + "dst": "user_omar", + "rel": "alias_of", + "src": "alias_ironwhisper" + }, + { + "confidence": 1.0, + "dst": "user_rhea", + "rel": "alias_of", + "src": "alias_cinderveil" + }, + { + "confidence": 1.0, + "dst": "user_tara", + "rel": "alias_of", + "src": "alias_sablekeel" + }, + { + "confidence": 1.0, + "dst": "user_kian", + "rel": "alias_of", + "src": "alias_lanternmoth" + }, + { + "confidence": 1.0, + "dst": "user_leena", + "rel": "alias_of", + "src": "alias_frostledger" + } + ], + "task_id": "seed_task_27", + "task_type": "fixed_trace" + }, + { + "answer": "user_tara", + "metadata": { + "difficulty": "high", + "difficulty_level": 3, + "question_id": "high_09", + "support_nodes": 50 + }, + "question": "Ghost Signal and Ember Tide relay: which user behind alias_sablekeel is the Harborlight reporting endpoint?", + "supporting_edges": [ + { + "confidence": 1.0, + "dst": "user_tara", + "rel": "alias_of", + "src": "alias_sablekeel" + }, + { + "confidence": 1.0, + "dst": "post_uplink_note", + "rel": "authored_post", + "src": "alias_sablekeel" + }, + { + "confidence": 1.0, + "dst": "loc_uplink_yard", + "rel": "references", + "src": "post_uplink_note" + }, + { + "confidence": 1.0, + "dst": "org_harborlight_transit", + "rel": "references", + "src": "post_uplink_note" + }, + { + "confidence": 1.0, + "dst": "user_soren", + "rel": "alias_of", + "src": "alias_tideshard" + }, + { + "confidence": 1.0, + "dst": "post_hull_signal", + "rel": "authored_post", + "src": "alias_tideshard" + }, + { + "confidence": 1.0, + "dst": "loc_uplink_yard", + "rel": "references", + "src": "post_hull_signal" + }, + { + "confidence": 1.0, + "dst": "event_ghost_signal", + "rel": "references", + "src": "post_hull_signal" + }, + { + "confidence": 1.0, + "dst": "thr_ghost_signal_net", + "rel": "authored_thread", + "src": "user_tara" + }, + { + "confidence": 1.0, + "dst": "event_ghost_signal", + "rel": "discusses", + "src": "thr_ghost_signal_net" + }, + { + "confidence": 1.0, + "dst": "loc_uplink_yard", + "rel": "references", + "src": "thr_ghost_signal_net" + }, + { + "confidence": 1.0, + "dst": "thr_uplink_route", + "rel": "authored_thread", + "src": "user_soren" + }, + { + "confidence": 1.0, + "dst": "event_ghost_signal", + "rel": "discusses", + "src": "thr_uplink_route" + }, + { + "confidence": 1.0, + "dst": "org_harborlight_transit", + "rel": "references", + "src": "thr_uplink_route" + }, + { + "confidence": 1.0, + "dst": "thr_ember_tide_watch", + "rel": "authored_thread", + "src": "user_rhea" + }, + { + "confidence": 1.0, + "dst": "event_ember_tide", + "rel": "discusses", + "src": "thr_ember_tide_watch" + }, + { + "confidence": 1.0, + "dst": "loc_foundry_row", + "rel": "references", + "src": "thr_ember_tide_watch" + }, + { + "confidence": 1.0, + "dst": "org_harborlight_transit", + "rel": "works_at", + "src": "user_tara" + }, + { + "confidence": 1.0, + "dst": "org_harborlight_transit", + "rel": "works_at", + "src": "user_soren" + }, + { + "confidence": 1.0, + "dst": "org_emberline_security", + "rel": "works_at", + "src": "user_rhea" + }, + { + "confidence": 1.0, + "dst": "org_emberline_security", + "rel": "works_at", + "src": "user_nora" + }, + { + "confidence": 1.0, + "dst": "loc_uplink_yard", + "rel": "located_in", + "src": "user_tara" + }, + { + "confidence": 1.0, + "dst": "loc_uplink_yard", + "rel": "located_in", + "src": "user_soren" + }, + { + "confidence": 1.0, + "dst": "loc_foundry_row", + "rel": "located_in", + "src": "user_rhea" + }, + { + "confidence": 1.0, + "dst": "loc_foundry_row", + "rel": "located_in", + "src": "user_nora" + }, + { + "confidence": 1.0, + "dst": "loc_uplink_yard", + "rel": "operates_in", + "src": "org_harborlight_transit" + }, + { + "confidence": 1.0, + "dst": "loc_foundry_row", + "rel": "operates_in", + "src": "org_emberline_security" + }, + { + "confidence": 0.86, + "dst": "user_soren", + "rel": "connected_to", + "src": "user_rhea" + }, + { + "confidence": 0.86, + "dst": "user_tara", + "rel": "connected_to", + "src": "user_soren" + }, + { + "confidence": 0.87, + "dst": "user_rhea", + "rel": "connected_to", + "src": "user_nora" + }, + { + "confidence": 0.9, + "dst": "event_ember_tide", + "rel": "collaborates_on", + "src": "user_rhea" + }, + { + "confidence": 0.9, + "dst": "event_ghost_signal", + "rel": "collaborates_on", + "src": "user_soren" + }, + { + "confidence": 0.9, + "dst": "event_ghost_signal", + "rel": "reports_on", + "src": "user_tara" + }, + { + "confidence": 0.77, + "dst": "event_ghost_signal", + "rel": "connected_to", + "src": "event_ember_tide" + }, + { + "confidence": 0.77, + "dst": "org_tidewatch_ops", + "rel": "connected_to", + "src": "org_harborlight_transit" + }, + { + "confidence": 1.0, + "dst": "user_rhea", + "rel": "alias_of", + "src": "alias_cinderveil" + }, + { + "confidence": 1.0, + "dst": "user_nora", + "rel": "alias_of", + "src": "alias_emberglass" + }, + { + "confidence": 1.0, + "dst": "user_ivy", + "rel": "alias_of", + "src": "alias_orchidfox" + }, + { + "confidence": 1.0, + "dst": "user_bharat", + "rel": "alias_of", + "src": "alias_steelquill" + }, + { + "confidence": 1.0, + "dst": "user_diya", + "rel": "alias_of", + "src": "alias_monsoonbyte" + }, + { + "confidence": 1.0, + "dst": "user_faris", + "rel": "alias_of", + "src": "alias_nightrelay" + }, + { + "confidence": 1.0, + "dst": "user_elin", + "rel": "alias_of", + "src": "alias_mapleghost" + }, + { + "confidence": 1.0, + "dst": "user_hiro", + "rel": "alias_of", + "src": "alias_docksparrow" + }, + { + "confidence": 1.0, + "dst": "user_cyrus", + "rel": "alias_of", + "src": "alias_quartzlotus" + }, + { + "confidence": 1.0, + "dst": "user_mika", + "rel": "alias_of", + "src": "alias_basinraven" + }, + { + "confidence": 1.0, + "dst": "user_priya", + "rel": "alias_of", + "src": "alias_hollowsignal" + }, + { + "confidence": 1.0, + "dst": "user_omar", + "rel": "alias_of", + "src": "alias_ironwhisper" + }, + { + "confidence": 1.0, + "dst": "user_kian", + "rel": "alias_of", + "src": "alias_lanternmoth" + }, + { + "confidence": 1.0, + "dst": "user_leena", + "rel": "alias_of", + "src": "alias_frostledger" + }, + { + "confidence": 1.0, + "dst": "org_helios_labs", + "rel": "works_at", + "src": "user_aria" + }, + { + "confidence": 1.0, + "dst": "loc_sector9", + "rel": "located_in", + "src": "user_aria" + }, + { + "confidence": 1.0, + "dst": "org_northbridge_logistics", + "rel": "works_at", + "src": "user_bharat" + }, + { + "confidence": 1.0, + "dst": "loc_dockyard17", + "rel": "located_in", + "src": "user_bharat" + }, + { + "confidence": 1.0, + "dst": "org_apex_dynamics", + "rel": "works_at", + "src": "user_cyrus" + } + ], + "task_id": "seed_task_28", + "task_type": "fixed_trace" + }, + { + "answer": "user_priya", + "metadata": { + "difficulty": "high", + "difficulty_level": 3, + "question_id": "high_10", + "support_nodes": 55 + }, + "question": "End-to-end benchmark sweep: across Lantern, Black Kite, Glass Harbor, Iron Wharf, Ember Tide, and Ghost Signal, which user behind alias_hollowsignal anchors the Sunmesh monitoring side?", + "supporting_edges": [ + { + "confidence": 1.0, + "dst": "user_ivy", + "rel": "alias_of", + "src": "alias_orchidfox" + }, + { + "confidence": 1.0, + "dst": "user_bharat", + "rel": "alias_of", + "src": "alias_steelquill" + }, + { + "confidence": 1.0, + "dst": "user_diya", + "rel": "alias_of", + "src": "alias_monsoonbyte" + }, + { + "confidence": 1.0, + "dst": "user_faris", + "rel": "alias_of", + "src": "alias_nightrelay" + }, + { + "confidence": 1.0, + "dst": "user_elin", + "rel": "alias_of", + "src": "alias_mapleghost" + }, + { + "confidence": 1.0, + "dst": "user_hiro", + "rel": "alias_of", + "src": "alias_docksparrow" + }, + { + "confidence": 1.0, + "dst": "user_cyrus", + "rel": "alias_of", + "src": "alias_quartzlotus" + }, + { + "confidence": 1.0, + "dst": "user_nora", + "rel": "alias_of", + "src": "alias_emberglass" + }, + { + "confidence": 1.0, + "dst": "user_mika", + "rel": "alias_of", + "src": "alias_basinraven" + }, + { + "confidence": 1.0, + "dst": "user_soren", + "rel": "alias_of", + "src": "alias_tideshard" + }, + { + "confidence": 1.0, + "dst": "user_priya", + "rel": "alias_of", + "src": "alias_hollowsignal" + }, + { + "confidence": 1.0, + "dst": "user_omar", + "rel": "alias_of", + "src": "alias_ironwhisper" + }, + { + "confidence": 1.0, + "dst": "user_rhea", + "rel": "alias_of", + "src": "alias_cinderveil" + }, + { + "confidence": 1.0, + "dst": "user_tara", + "rel": "alias_of", + "src": "alias_sablekeel" + }, + { + "confidence": 1.0, + "dst": "user_kian", + "rel": "alias_of", + "src": "alias_lanternmoth" + }, + { + "confidence": 1.0, + "dst": "user_leena", + "rel": "alias_of", + "src": "alias_frostledger" + }, + { + "confidence": 1.0, + "dst": "org_helios_labs", + "rel": "works_at", + "src": "user_aria" + }, + { + "confidence": 1.0, + "dst": "loc_sector9", + "rel": "located_in", + "src": "user_aria" + }, + { + "confidence": 1.0, + "dst": "org_northbridge_logistics", + "rel": "works_at", + "src": "user_bharat" + }, + { + "confidence": 1.0, + "dst": "loc_dockyard17", + "rel": "located_in", + "src": "user_bharat" + }, + { + "confidence": 1.0, + "dst": "org_apex_dynamics", + "rel": "works_at", + "src": "user_cyrus" + }, + { + "confidence": 1.0, + "dst": "loc_old_town", + "rel": "located_in", + "src": "user_cyrus" + }, + { + "confidence": 1.0, + "dst": "org_blueharbor_media", + "rel": "works_at", + "src": "user_diya" + }, + { + "confidence": 1.0, + "dst": "loc_old_town", + "rel": "located_in", + "src": "user_diya" + }, + { + "confidence": 1.0, + "dst": "org_helios_labs", + "rel": "works_at", + "src": "user_elin" + }, + { + "confidence": 1.0, + "dst": "loc_sector9", + "rel": "located_in", + "src": "user_elin" + }, + { + "confidence": 1.0, + "dst": "org_tidewatch_ops", + "rel": "works_at", + "src": "user_faris" + }, + { + "confidence": 1.0, + "dst": "loc_rivergate", + "rel": "located_in", + "src": "user_faris" + }, + { + "confidence": 1.0, + "dst": "org_apex_dynamics", + "rel": "works_at", + "src": "user_gita" + }, + { + "confidence": 1.0, + "dst": "loc_old_town", + "rel": "located_in", + "src": "user_gita" + }, + { + "confidence": 1.0, + "dst": "org_northbridge_logistics", + "rel": "works_at", + "src": "user_hiro" + }, + { + "confidence": 1.0, + "dst": "loc_dockyard17", + "rel": "located_in", + "src": "user_hiro" + }, + { + "confidence": 1.0, + "dst": "org_kestrel_works", + "rel": "works_at", + "src": "user_ivy" + }, + { + "confidence": 1.0, + "dst": "loc_rivergate", + "rel": "located_in", + "src": "user_ivy" + }, + { + "confidence": 1.0, + "dst": "org_blueharbor_media", + "rel": "works_at", + "src": "user_jules" + }, + { + "confidence": 1.0, + "dst": "loc_old_town", + "rel": "located_in", + "src": "user_jules" + }, + { + "confidence": 1.0, + "dst": "org_atlas_freight", + "rel": "works_at", + "src": "user_kian" + }, + { + "confidence": 1.0, + "dst": "loc_east_quay", + "rel": "located_in", + "src": "user_kian" + }, + { + "confidence": 1.0, + "dst": "org_sunmesh_analytics", + "rel": "works_at", + "src": "user_leena" + }, + { + "confidence": 1.0, + "dst": "loc_sector9", + "rel": "located_in", + "src": "user_leena" + }, + { + "confidence": 1.0, + "dst": "org_orion_customs", + "rel": "works_at", + "src": "user_mika" + }, + { + "confidence": 1.0, + "dst": "loc_north_basin", + "rel": "located_in", + "src": "user_mika" + }, + { + "confidence": 1.0, + "dst": "org_emberline_security", + "rel": "works_at", + "src": "user_nora" + }, + { + "confidence": 1.0, + "dst": "loc_foundry_row", + "rel": "located_in", + "src": "user_nora" + }, + { + "confidence": 1.0, + "dst": "org_atlas_freight", + "rel": "works_at", + "src": "user_omar" + }, + { + "confidence": 1.0, + "dst": "loc_east_quay", + "rel": "located_in", + "src": "user_omar" + }, + { + "confidence": 1.0, + "dst": "org_sunmesh_analytics", + "rel": "works_at", + "src": "user_priya" + }, + { + "confidence": 1.0, + "dst": "loc_sector9", + "rel": "located_in", + "src": "user_priya" + }, + { + "confidence": 1.0, + "dst": "org_orion_customs", + "rel": "works_at", + "src": "user_quinn" + }, + { + "confidence": 1.0, + "dst": "loc_north_basin", + "rel": "located_in", + "src": "user_quinn" + }, + { + "confidence": 1.0, + "dst": "org_emberline_security", + "rel": "works_at", + "src": "user_rhea" + }, + { + "confidence": 1.0, + "dst": "loc_foundry_row", + "rel": "located_in", + "src": "user_rhea" + }, + { + "confidence": 1.0, + "dst": "org_harborlight_transit", + "rel": "works_at", + "src": "user_soren" + }, + { + "confidence": 1.0, + "dst": "loc_uplink_yard", + "rel": "located_in", + "src": "user_soren" + }, + { + "confidence": 1.0, + "dst": "org_harborlight_transit", + "rel": "works_at", + "src": "user_tara" + }, + { + "confidence": 1.0, + "dst": "loc_uplink_yard", + "rel": "located_in", + "src": "user_tara" + }, + { + "confidence": 1.0, + "dst": "loc_sector9", + "rel": "operates_in", + "src": "org_helios_labs" + }, + { + "confidence": 1.0, + "dst": "loc_dockyard17", + "rel": "operates_in", + "src": "org_northbridge_logistics" + } + ], + "task_id": "seed_task_29", + "task_type": "fixed_trace" + } + ] +} \ No newline at end of file diff --git a/datasets/fixed_levels/fixed_graph_questions.json b/datasets/fixed_levels/fixed_graph_questions.json new file mode 100644 index 0000000000000000000000000000000000000000..2fdf99e415462e6256c65214e432b9ba064c3840 --- /dev/null +++ b/datasets/fixed_levels/fixed_graph_questions.json @@ -0,0 +1,7009 @@ +{ + "dataset_name": "fixed_levels_submission_set", + "difficulty_counts": { + "easy": 10, + "high": 10, + "mid": 10 + }, + "graph": { + "edge_count": 185, + "edges": [ + { + "confidence": 1.0, + "dst": "user_ivy", + "rel": "alias_of", + "src": "alias_orchidfox" + }, + { + "confidence": 1.0, + "dst": "user_bharat", + "rel": "alias_of", + "src": "alias_steelquill" + }, + { + "confidence": 1.0, + "dst": "user_diya", + "rel": "alias_of", + "src": "alias_monsoonbyte" + }, + { + "confidence": 1.0, + "dst": "user_faris", + "rel": "alias_of", + "src": "alias_nightrelay" + }, + { + "confidence": 1.0, + "dst": "user_elin", + "rel": "alias_of", + "src": "alias_mapleghost" + }, + { + "confidence": 1.0, + "dst": "user_hiro", + "rel": "alias_of", + "src": "alias_docksparrow" + }, + { + "confidence": 1.0, + "dst": "user_cyrus", + "rel": "alias_of", + "src": "alias_quartzlotus" + }, + { + "confidence": 1.0, + "dst": "user_nora", + "rel": "alias_of", + "src": "alias_emberglass" + }, + { + "confidence": 1.0, + "dst": "user_mika", + "rel": "alias_of", + "src": "alias_basinraven" + }, + { + "confidence": 1.0, + "dst": "user_soren", + "rel": "alias_of", + "src": "alias_tideshard" + }, + { + "confidence": 1.0, + "dst": "user_priya", + "rel": "alias_of", + "src": "alias_hollowsignal" + }, + { + "confidence": 1.0, + "dst": "user_omar", + "rel": "alias_of", + "src": "alias_ironwhisper" + }, + { + "confidence": 1.0, + "dst": "user_rhea", + "rel": "alias_of", + "src": "alias_cinderveil" + }, + { + "confidence": 1.0, + "dst": "user_tara", + "rel": "alias_of", + "src": "alias_sablekeel" + }, + { + "confidence": 1.0, + "dst": "user_kian", + "rel": "alias_of", + "src": "alias_lanternmoth" + }, + { + "confidence": 1.0, + "dst": "user_leena", + "rel": "alias_of", + "src": "alias_frostledger" + }, + { + "confidence": 1.0, + "dst": "org_helios_labs", + "rel": "works_at", + "src": "user_aria" + }, + { + "confidence": 1.0, + "dst": "loc_sector9", + "rel": "located_in", + "src": "user_aria" + }, + { + "confidence": 1.0, + "dst": "org_northbridge_logistics", + "rel": "works_at", + "src": "user_bharat" + }, + { + "confidence": 1.0, + "dst": "loc_dockyard17", + "rel": "located_in", + "src": "user_bharat" + }, + { + "confidence": 1.0, + "dst": "org_apex_dynamics", + "rel": "works_at", + "src": "user_cyrus" + }, + { + "confidence": 1.0, + "dst": "loc_old_town", + "rel": "located_in", + "src": "user_cyrus" + }, + { + "confidence": 1.0, + "dst": "org_blueharbor_media", + "rel": "works_at", + "src": "user_diya" + }, + { + "confidence": 1.0, + "dst": "loc_old_town", + "rel": "located_in", + "src": "user_diya" + }, + { + "confidence": 1.0, + "dst": "org_helios_labs", + "rel": "works_at", + "src": "user_elin" + }, + { + "confidence": 1.0, + "dst": "loc_sector9", + "rel": "located_in", + "src": "user_elin" + }, + { + "confidence": 1.0, + "dst": "org_tidewatch_ops", + "rel": "works_at", + "src": "user_faris" + }, + { + "confidence": 1.0, + "dst": "loc_rivergate", + "rel": "located_in", + "src": "user_faris" + }, + { + "confidence": 1.0, + "dst": "org_apex_dynamics", + "rel": "works_at", + "src": "user_gita" + }, + { + "confidence": 1.0, + "dst": "loc_old_town", + "rel": "located_in", + "src": "user_gita" + }, + { + "confidence": 1.0, + "dst": "org_northbridge_logistics", + "rel": "works_at", + "src": "user_hiro" + }, + { + "confidence": 1.0, + "dst": "loc_dockyard17", + "rel": "located_in", + "src": "user_hiro" + }, + { + "confidence": 1.0, + "dst": "org_kestrel_works", + "rel": "works_at", + "src": "user_ivy" + }, + { + "confidence": 1.0, + "dst": "loc_rivergate", + "rel": "located_in", + "src": "user_ivy" + }, + { + "confidence": 1.0, + "dst": "org_blueharbor_media", + "rel": "works_at", + "src": "user_jules" + }, + { + "confidence": 1.0, + "dst": "loc_old_town", + "rel": "located_in", + "src": "user_jules" + }, + { + "confidence": 1.0, + "dst": "org_atlas_freight", + "rel": "works_at", + "src": "user_kian" + }, + { + "confidence": 1.0, + "dst": "loc_east_quay", + "rel": "located_in", + "src": "user_kian" + }, + { + "confidence": 1.0, + "dst": "org_sunmesh_analytics", + "rel": "works_at", + "src": "user_leena" + }, + { + "confidence": 1.0, + "dst": "loc_sector9", + "rel": "located_in", + "src": "user_leena" + }, + { + "confidence": 1.0, + "dst": "org_orion_customs", + "rel": "works_at", + "src": "user_mika" + }, + { + "confidence": 1.0, + "dst": "loc_north_basin", + "rel": "located_in", + "src": "user_mika" + }, + { + "confidence": 1.0, + "dst": "org_emberline_security", + "rel": "works_at", + "src": "user_nora" + }, + { + "confidence": 1.0, + "dst": "loc_foundry_row", + "rel": "located_in", + "src": "user_nora" + }, + { + "confidence": 1.0, + "dst": "org_atlas_freight", + "rel": "works_at", + "src": "user_omar" + }, + { + "confidence": 1.0, + "dst": "loc_east_quay", + "rel": "located_in", + "src": "user_omar" + }, + { + "confidence": 1.0, + "dst": "org_sunmesh_analytics", + "rel": "works_at", + "src": "user_priya" + }, + { + "confidence": 1.0, + "dst": "loc_sector9", + "rel": "located_in", + "src": "user_priya" + }, + { + "confidence": 1.0, + "dst": "org_orion_customs", + "rel": "works_at", + "src": "user_quinn" + }, + { + "confidence": 1.0, + "dst": "loc_north_basin", + "rel": "located_in", + "src": "user_quinn" + }, + { + "confidence": 1.0, + "dst": "org_emberline_security", + "rel": "works_at", + "src": "user_rhea" + }, + { + "confidence": 1.0, + "dst": "loc_foundry_row", + "rel": "located_in", + "src": "user_rhea" + }, + { + "confidence": 1.0, + "dst": "org_harborlight_transit", + "rel": "works_at", + "src": "user_soren" + }, + { + "confidence": 1.0, + "dst": "loc_uplink_yard", + "rel": "located_in", + "src": "user_soren" + }, + { + "confidence": 1.0, + "dst": "org_harborlight_transit", + "rel": "works_at", + "src": "user_tara" + }, + { + "confidence": 1.0, + "dst": "loc_uplink_yard", + "rel": "located_in", + "src": "user_tara" + }, + { + "confidence": 1.0, + "dst": "loc_sector9", + "rel": "operates_in", + "src": "org_helios_labs" + }, + { + "confidence": 1.0, + "dst": "loc_dockyard17", + "rel": "operates_in", + "src": "org_northbridge_logistics" + }, + { + "confidence": 1.0, + "dst": "loc_old_town", + "rel": "operates_in", + "src": "org_apex_dynamics" + }, + { + "confidence": 1.0, + "dst": "loc_old_town", + "rel": "operates_in", + "src": "org_blueharbor_media" + }, + { + "confidence": 1.0, + "dst": "loc_rivergate", + "rel": "operates_in", + "src": "org_tidewatch_ops" + }, + { + "confidence": 1.0, + "dst": "loc_rivergate", + "rel": "operates_in", + "src": "org_kestrel_works" + }, + { + "confidence": 1.0, + "dst": "loc_east_quay", + "rel": "operates_in", + "src": "org_atlas_freight" + }, + { + "confidence": 1.0, + "dst": "loc_sector9", + "rel": "operates_in", + "src": "org_sunmesh_analytics" + }, + { + "confidence": 1.0, + "dst": "loc_north_basin", + "rel": "operates_in", + "src": "org_orion_customs" + }, + { + "confidence": 1.0, + "dst": "loc_foundry_row", + "rel": "operates_in", + "src": "org_emberline_security" + }, + { + "confidence": 1.0, + "dst": "loc_uplink_yard", + "rel": "operates_in", + "src": "org_harborlight_transit" + }, + { + "confidence": 0.95, + "dst": "user_bharat", + "rel": "connected_to", + "src": "user_ivy" + }, + { + "confidence": 0.95, + "dst": "user_hiro", + "rel": "connected_to", + "src": "user_bharat" + }, + { + "confidence": 0.92, + "dst": "user_faris", + "rel": "connected_to", + "src": "user_hiro" + }, + { + "confidence": 0.9, + "dst": "user_diya", + "rel": "connected_to", + "src": "user_faris" + }, + { + "confidence": 0.89, + "dst": "user_elin", + "rel": "connected_to", + "src": "user_diya" + }, + { + "confidence": 0.87, + "dst": "user_aria", + "rel": "connected_to", + "src": "user_elin" + }, + { + "confidence": 0.84, + "dst": "user_cyrus", + "rel": "connected_to", + "src": "user_aria" + }, + { + "confidence": 0.83, + "dst": "user_gita", + "rel": "connected_to", + "src": "user_cyrus" + }, + { + "confidence": 0.82, + "dst": "user_jules", + "rel": "connected_to", + "src": "user_gita" + }, + { + "confidence": 0.81, + "dst": "user_bharat", + "rel": "connected_to", + "src": "user_jules" + }, + { + "confidence": 0.9, + "dst": "user_ivy", + "rel": "connected_to", + "src": "user_diya" + }, + { + "confidence": 0.86, + "dst": "user_elin", + "rel": "connected_to", + "src": "user_ivy" + }, + { + "confidence": 0.93, + "dst": "user_omar", + "rel": "connected_to", + "src": "user_kian" + }, + { + "confidence": 0.9, + "dst": "user_mika", + "rel": "connected_to", + "src": "user_omar" + }, + { + "confidence": 0.89, + "dst": "user_quinn", + "rel": "connected_to", + "src": "user_mika" + }, + { + "confidence": 0.88, + "dst": "user_nora", + "rel": "connected_to", + "src": "user_quinn" + }, + { + "confidence": 0.87, + "dst": "user_rhea", + "rel": "connected_to", + "src": "user_nora" + }, + { + "confidence": 0.86, + "dst": "user_soren", + "rel": "connected_to", + "src": "user_rhea" + }, + { + "confidence": 0.86, + "dst": "user_tara", + "rel": "connected_to", + "src": "user_soren" + }, + { + "confidence": 0.84, + "dst": "user_kian", + "rel": "connected_to", + "src": "user_tara" + }, + { + "confidence": 0.91, + "dst": "user_leena", + "rel": "connected_to", + "src": "user_priya" + }, + { + "confidence": 0.83, + "dst": "user_aria", + "rel": "connected_to", + "src": "user_leena" + }, + { + "confidence": 0.82, + "dst": "user_nora", + "rel": "connected_to", + "src": "user_priya" + }, + { + "confidence": 0.8, + "dst": "user_bharat", + "rel": "connected_to", + "src": "user_kian" + }, + { + "confidence": 0.79, + "dst": "user_faris", + "rel": "connected_to", + "src": "user_soren" + }, + { + "confidence": 0.78, + "dst": "user_hiro", + "rel": "connected_to", + "src": "user_quinn" + }, + { + "confidence": 1.0, + "dst": "post_midnight_manifest", + "rel": "authored_post", + "src": "alias_orchidfox" + }, + { + "confidence": 1.0, + "dst": "post_shift_roster", + "rel": "authored_post", + "src": "alias_docksparrow" + }, + { + "confidence": 1.0, + "dst": "post_sat_phone_ping", + "rel": "authored_post", + "src": "alias_nightrelay" + }, + { + "confidence": 1.0, + "dst": "post_drone_parts", + "rel": "authored_post", + "src": "alias_monsoonbyte" + }, + { + "confidence": 1.0, + "dst": "post_relay_schedule", + "rel": "authored_post", + "src": "alias_steelquill" + }, + { + "confidence": 1.0, + "dst": "post_quay_ledgers", + "rel": "authored_post", + "src": "alias_lanternmoth" + }, + { + "confidence": 1.0, + "dst": "post_customs_tag", + "rel": "authored_post", + "src": "alias_basinraven" + }, + { + "confidence": 1.0, + "dst": "post_hull_signal", + "rel": "authored_post", + "src": "alias_tideshard" + }, + { + "confidence": 1.0, + "dst": "post_basin_photo", + "rel": "authored_post", + "src": "alias_emberglass" + }, + { + "confidence": 1.0, + "dst": "post_foundry_map", + "rel": "authored_post", + "src": "alias_cinderveil" + }, + { + "confidence": 1.0, + "dst": "post_lantern_route", + "rel": "authored_post", + "src": "alias_frostledger" + }, + { + "confidence": 1.0, + "dst": "post_uplink_note", + "rel": "authored_post", + "src": "alias_sablekeel" + }, + { + "confidence": 1.0, + "dst": "loc_dockyard17", + "rel": "references", + "src": "post_midnight_manifest" + }, + { + "confidence": 1.0, + "dst": "event_project_lantern", + "rel": "references", + "src": "post_midnight_manifest" + }, + { + "confidence": 1.0, + "dst": "loc_dockyard17", + "rel": "references", + "src": "post_shift_roster" + }, + { + "confidence": 1.0, + "dst": "org_northbridge_logistics", + "rel": "references", + "src": "post_shift_roster" + }, + { + "confidence": 1.0, + "dst": "loc_rivergate", + "rel": "references", + "src": "post_sat_phone_ping" + }, + { + "confidence": 1.0, + "dst": "event_project_lantern", + "rel": "references", + "src": "post_sat_phone_ping" + }, + { + "confidence": 1.0, + "dst": "event_black_kite", + "rel": "references", + "src": "post_drone_parts" + }, + { + "confidence": 1.0, + "dst": "org_kestrel_works", + "rel": "references", + "src": "post_drone_parts" + }, + { + "confidence": 1.0, + "dst": "event_project_lantern", + "rel": "references", + "src": "post_relay_schedule" + }, + { + "confidence": 1.0, + "dst": "loc_sector9", + "rel": "references", + "src": "post_relay_schedule" + }, + { + "confidence": 1.0, + "dst": "loc_east_quay", + "rel": "references", + "src": "post_quay_ledgers" + }, + { + "confidence": 1.0, + "dst": "event_glass_harbor", + "rel": "references", + "src": "post_quay_ledgers" + }, + { + "confidence": 1.0, + "dst": "loc_north_basin", + "rel": "references", + "src": "post_customs_tag" + }, + { + "confidence": 1.0, + "dst": "event_iron_wharf", + "rel": "references", + "src": "post_customs_tag" + }, + { + "confidence": 1.0, + "dst": "loc_uplink_yard", + "rel": "references", + "src": "post_hull_signal" + }, + { + "confidence": 1.0, + "dst": "event_ghost_signal", + "rel": "references", + "src": "post_hull_signal" + }, + { + "confidence": 1.0, + "dst": "loc_foundry_row", + "rel": "references", + "src": "post_basin_photo" + }, + { + "confidence": 1.0, + "dst": "event_amber_veil", + "rel": "references", + "src": "post_basin_photo" + }, + { + "confidence": 1.0, + "dst": "loc_foundry_row", + "rel": "references", + "src": "post_foundry_map" + }, + { + "confidence": 1.0, + "dst": "event_ember_tide", + "rel": "references", + "src": "post_foundry_map" + }, + { + "confidence": 1.0, + "dst": "event_project_lantern", + "rel": "references", + "src": "post_lantern_route" + }, + { + "confidence": 1.0, + "dst": "org_sunmesh_analytics", + "rel": "references", + "src": "post_lantern_route" + }, + { + "confidence": 1.0, + "dst": "loc_uplink_yard", + "rel": "references", + "src": "post_uplink_note" + }, + { + "confidence": 1.0, + "dst": "org_harborlight_transit", + "rel": "references", + "src": "post_uplink_note" + }, + { + "confidence": 1.0, + "dst": "thr_supply_leak", + "rel": "authored_thread", + "src": "user_diya" + }, + { + "confidence": 1.0, + "dst": "thr_port_audit", + "rel": "authored_thread", + "src": "user_jules" + }, + { + "confidence": 1.0, + "dst": "thr_customs_breach", + "rel": "authored_thread", + "src": "user_mika" + }, + { + "confidence": 1.0, + "dst": "thr_relay_map", + "rel": "authored_thread", + "src": "user_leena" + }, + { + "confidence": 1.0, + "dst": "thr_foundry_watch", + "rel": "authored_thread", + "src": "user_nora" + }, + { + "confidence": 1.0, + "dst": "thr_basin_shift", + "rel": "authored_thread", + "src": "user_quinn" + }, + { + "confidence": 1.0, + "dst": "thr_quiet_manifest", + "rel": "authored_thread", + "src": "user_kian" + }, + { + "confidence": 1.0, + "dst": "thr_uplink_route", + "rel": "authored_thread", + "src": "user_soren" + }, + { + "confidence": 1.0, + "dst": "thr_ember_tide_watch", + "rel": "authored_thread", + "src": "user_rhea" + }, + { + "confidence": 1.0, + "dst": "thr_ghost_signal_net", + "rel": "authored_thread", + "src": "user_tara" + }, + { + "confidence": 1.0, + "dst": "event_project_lantern", + "rel": "discusses", + "src": "thr_supply_leak" + }, + { + "confidence": 1.0, + "dst": "org_northbridge_logistics", + "rel": "references", + "src": "thr_supply_leak" + }, + { + "confidence": 1.0, + "dst": "event_black_kite", + "rel": "discusses", + "src": "thr_port_audit" + }, + { + "confidence": 1.0, + "dst": "org_kestrel_works", + "rel": "references", + "src": "thr_port_audit" + }, + { + "confidence": 1.0, + "dst": "event_iron_wharf", + "rel": "discusses", + "src": "thr_customs_breach" + }, + { + "confidence": 1.0, + "dst": "org_orion_customs", + "rel": "references", + "src": "thr_customs_breach" + }, + { + "confidence": 1.0, + "dst": "event_project_lantern", + "rel": "discusses", + "src": "thr_relay_map" + }, + { + "confidence": 1.0, + "dst": "org_sunmesh_analytics", + "rel": "references", + "src": "thr_relay_map" + }, + { + "confidence": 1.0, + "dst": "event_ember_tide", + "rel": "discusses", + "src": "thr_foundry_watch" + }, + { + "confidence": 1.0, + "dst": "org_emberline_security", + "rel": "references", + "src": "thr_foundry_watch" + }, + { + "confidence": 1.0, + "dst": "event_amber_veil", + "rel": "discusses", + "src": "thr_basin_shift" + }, + { + "confidence": 1.0, + "dst": "loc_north_basin", + "rel": "references", + "src": "thr_basin_shift" + }, + { + "confidence": 1.0, + "dst": "event_glass_harbor", + "rel": "discusses", + "src": "thr_quiet_manifest" + }, + { + "confidence": 1.0, + "dst": "org_atlas_freight", + "rel": "references", + "src": "thr_quiet_manifest" + }, + { + "confidence": 1.0, + "dst": "event_ghost_signal", + "rel": "discusses", + "src": "thr_uplink_route" + }, + { + "confidence": 1.0, + "dst": "org_harborlight_transit", + "rel": "references", + "src": "thr_uplink_route" + }, + { + "confidence": 1.0, + "dst": "event_ember_tide", + "rel": "discusses", + "src": "thr_ember_tide_watch" + }, + { + "confidence": 1.0, + "dst": "loc_foundry_row", + "rel": "references", + "src": "thr_ember_tide_watch" + }, + { + "confidence": 1.0, + "dst": "event_ghost_signal", + "rel": "discusses", + "src": "thr_ghost_signal_net" + }, + { + "confidence": 1.0, + "dst": "loc_uplink_yard", + "rel": "references", + "src": "thr_ghost_signal_net" + }, + { + "confidence": 0.9, + "dst": "event_project_lantern", + "rel": "collaborates_on", + "src": "user_bharat" + }, + { + "confidence": 0.9, + "dst": "event_project_lantern", + "rel": "collaborates_on", + "src": "user_hiro" + }, + { + "confidence": 0.9, + "dst": "event_project_lantern", + "rel": "collaborates_on", + "src": "user_faris" + }, + { + "confidence": 0.9, + "dst": "event_project_lantern", + "rel": "investigates", + "src": "user_diya" + }, + { + "confidence": 0.9, + "dst": "event_project_lantern", + "rel": "monitors", + "src": "user_leena" + }, + { + "confidence": 0.9, + "dst": "event_black_kite", + "rel": "collaborates_on", + "src": "user_ivy" + }, + { + "confidence": 0.9, + "dst": "event_black_kite", + "rel": "collaborates_on", + "src": "user_cyrus" + }, + { + "confidence": 0.9, + "dst": "event_black_kite", + "rel": "investigates", + "src": "user_elin" + }, + { + "confidence": 0.9, + "dst": "event_black_kite", + "rel": "reports_on", + "src": "user_jules" + }, + { + "confidence": 0.9, + "dst": "event_glass_harbor", + "rel": "collaborates_on", + "src": "user_kian" + }, + { + "confidence": 0.9, + "dst": "event_glass_harbor", + "rel": "collaborates_on", + "src": "user_omar" + }, + { + "confidence": 0.9, + "dst": "event_glass_harbor", + "rel": "monitors", + "src": "user_priya" + }, + { + "confidence": 0.9, + "dst": "event_iron_wharf", + "rel": "collaborates_on", + "src": "user_mika" + }, + { + "confidence": 0.9, + "dst": "event_iron_wharf", + "rel": "collaborates_on", + "src": "user_quinn" + }, + { + "confidence": 0.9, + "dst": "event_amber_veil", + "rel": "investigates", + "src": "user_nora" + }, + { + "confidence": 0.9, + "dst": "event_ember_tide", + "rel": "collaborates_on", + "src": "user_rhea" + }, + { + "confidence": 0.9, + "dst": "event_ghost_signal", + "rel": "collaborates_on", + "src": "user_soren" + }, + { + "confidence": 0.9, + "dst": "event_ghost_signal", + "rel": "reports_on", + "src": "user_tara" + }, + { + "confidence": 0.9, + "dst": "event_silent_current", + "rel": "monitors", + "src": "user_gita" + }, + { + "confidence": 0.9, + "dst": "event_silent_current", + "rel": "reports_on", + "src": "user_jules" + }, + { + "confidence": 0.77, + "dst": "event_glass_harbor", + "rel": "connected_to", + "src": "event_project_lantern" + }, + { + "confidence": 0.77, + "dst": "event_amber_veil", + "rel": "connected_to", + "src": "event_black_kite" + }, + { + "confidence": 0.77, + "dst": "event_ghost_signal", + "rel": "connected_to", + "src": "event_ember_tide" + }, + { + "confidence": 0.77, + "dst": "org_northbridge_logistics", + "rel": "connected_to", + "src": "org_atlas_freight" + }, + { + "confidence": 0.77, + "dst": "org_emberline_security", + "rel": "connected_to", + "src": "org_orion_customs" + }, + { + "confidence": 0.77, + "dst": "org_tidewatch_ops", + "rel": "connected_to", + "src": "org_harborlight_transit" + } + ], + "node_count": 85, + "nodes": [ + { + "attrs": { + "location": "Sector 9", + "name": "Aria Sen", + "org": "Helios Labs" + }, + "node_id": "user_aria", + "node_type": "user" + }, + { + "attrs": { + "location": "Dockyard 17", + "name": "Bharat Kulkarni", + "org": "Northbridge Logistics" + }, + "node_id": "user_bharat", + "node_type": "user" + }, + { + "attrs": { + "location": "Old Town", + "name": "Cyrus Mehta", + "org": "Apex Dynamics" + }, + "node_id": "user_cyrus", + "node_type": "user" + }, + { + "attrs": { + "location": "Old Town", + "name": "Diya Roy", + "org": "Blueharbor Media" + }, + "node_id": "user_diya", + "node_type": "user" + }, + { + "attrs": { + "location": "Sector 9", + "name": "Elin Das", + "org": "Helios Labs" + }, + "node_id": "user_elin", + "node_type": "user" + }, + { + "attrs": { + "location": "Rivergate", + "name": "Faris Noor", + "org": "Tidewatch Ops" + }, + "node_id": "user_faris", + "node_type": "user" + }, + { + "attrs": { + "location": "Old Town", + "name": "Gita Pradhan", + "org": "Apex Dynamics" + }, + "node_id": "user_gita", + "node_type": "user" + }, + { + "attrs": { + "location": "Dockyard 17", + "name": "Hiro Tan", + "org": "Northbridge Logistics" + }, + "node_id": "user_hiro", + "node_type": "user" + }, + { + "attrs": { + "location": "Rivergate", + "name": "Ivy Kapoor", + "org": "Kestrel Works" + }, + "node_id": "user_ivy", + "node_type": "user" + }, + { + "attrs": { + "location": "Old Town", + "name": "Jules Banerjee", + "org": "Blueharbor Media" + }, + "node_id": "user_jules", + "node_type": "user" + }, + { + "attrs": { + "location": "East Quay", + "name": "Kian Bose", + "org": "Atlas Freight" + }, + "node_id": "user_kian", + "node_type": "user" + }, + { + "attrs": { + "location": "Sector 9", + "name": "Leena Das", + "org": "Sunmesh Analytics" + }, + "node_id": "user_leena", + "node_type": "user" + }, + { + "attrs": { + "location": "North Basin", + "name": "Mika Solanki", + "org": "Orion Customs" + }, + "node_id": "user_mika", + "node_type": "user" + }, + { + "attrs": { + "location": "Foundry Row", + "name": "Nora Iqbal", + "org": "Emberline Security" + }, + "node_id": "user_nora", + "node_type": "user" + }, + { + "attrs": { + "location": "East Quay", + "name": "Omar Sheikh", + "org": "Atlas Freight" + }, + "node_id": "user_omar", + "node_type": "user" + }, + { + "attrs": { + "location": "Sector 9", + "name": "Priya Menon", + "org": "Sunmesh Analytics" + }, + "node_id": "user_priya", + "node_type": "user" + }, + { + "attrs": { + "location": "North Basin", + "name": "Quinn Rao", + "org": "Orion Customs" + }, + "node_id": "user_quinn", + "node_type": "user" + }, + { + "attrs": { + "location": "Foundry Row", + "name": "Rhea Kapoor", + "org": "Emberline Security" + }, + "node_id": "user_rhea", + "node_type": "user" + }, + { + "attrs": { + "location": "Uplink Yard", + "name": "Soren Malik", + "org": "Harborlight Transit" + }, + "node_id": "user_soren", + "node_type": "user" + }, + { + "attrs": { + "location": "Uplink Yard", + "name": "Tara Dey", + "org": "Harborlight Transit" + }, + "node_id": "user_tara", + "node_type": "user" + }, + { + "attrs": { + "handle": "@orchidfox" + }, + "node_id": "alias_orchidfox", + "node_type": "alias" + }, + { + "attrs": { + "handle": "@steelquill" + }, + "node_id": "alias_steelquill", + "node_type": "alias" + }, + { + "attrs": { + "handle": "@monsoonbyte" + }, + "node_id": "alias_monsoonbyte", + "node_type": "alias" + }, + { + "attrs": { + "handle": "@nightrelay" + }, + "node_id": "alias_nightrelay", + "node_type": "alias" + }, + { + "attrs": { + "handle": "@mapleghost" + }, + "node_id": "alias_mapleghost", + "node_type": "alias" + }, + { + "attrs": { + "handle": "@docksparrow" + }, + "node_id": "alias_docksparrow", + "node_type": "alias" + }, + { + "attrs": { + "handle": "@quartzlotus" + }, + "node_id": "alias_quartzlotus", + "node_type": "alias" + }, + { + "attrs": { + "handle": "@emberglass" + }, + "node_id": "alias_emberglass", + "node_type": "alias" + }, + { + "attrs": { + "handle": "@basinraven" + }, + "node_id": "alias_basinraven", + "node_type": "alias" + }, + { + "attrs": { + "handle": "@tideshard" + }, + "node_id": "alias_tideshard", + "node_type": "alias" + }, + { + "attrs": { + "handle": "@hollowsignal" + }, + "node_id": "alias_hollowsignal", + "node_type": "alias" + }, + { + "attrs": { + "handle": "@ironwhisper" + }, + "node_id": "alias_ironwhisper", + "node_type": "alias" + }, + { + "attrs": { + "handle": "@cinderveil" + }, + "node_id": "alias_cinderveil", + "node_type": "alias" + }, + { + "attrs": { + "handle": "@sablekeel" + }, + "node_id": "alias_sablekeel", + "node_type": "alias" + }, + { + "attrs": { + "handle": "@lanternmoth" + }, + "node_id": "alias_lanternmoth", + "node_type": "alias" + }, + { + "attrs": { + "handle": "@frostledger" + }, + "node_id": "alias_frostledger", + "node_type": "alias" + }, + { + "attrs": { + "name": "Helios Labs" + }, + "node_id": "org_helios_labs", + "node_type": "org" + }, + { + "attrs": { + "name": "Northbridge Logistics" + }, + "node_id": "org_northbridge_logistics", + "node_type": "org" + }, + { + "attrs": { + "name": "Apex Dynamics" + }, + "node_id": "org_apex_dynamics", + "node_type": "org" + }, + { + "attrs": { + "name": "Blueharbor Media" + }, + "node_id": "org_blueharbor_media", + "node_type": "org" + }, + { + "attrs": { + "name": "Tidewatch Ops" + }, + "node_id": "org_tidewatch_ops", + "node_type": "org" + }, + { + "attrs": { + "name": "Kestrel Works" + }, + "node_id": "org_kestrel_works", + "node_type": "org" + }, + { + "attrs": { + "name": "Atlas Freight" + }, + "node_id": "org_atlas_freight", + "node_type": "org" + }, + { + "attrs": { + "name": "Sunmesh Analytics" + }, + "node_id": "org_sunmesh_analytics", + "node_type": "org" + }, + { + "attrs": { + "name": "Orion Customs" + }, + "node_id": "org_orion_customs", + "node_type": "org" + }, + { + "attrs": { + "name": "Emberline Security" + }, + "node_id": "org_emberline_security", + "node_type": "org" + }, + { + "attrs": { + "name": "Harborlight Transit" + }, + "node_id": "org_harborlight_transit", + "node_type": "org" + }, + { + "attrs": { + "name": "Dockyard 17" + }, + "node_id": "loc_dockyard17", + "node_type": "location" + }, + { + "attrs": { + "name": "Sector 9" + }, + "node_id": "loc_sector9", + "node_type": "location" + }, + { + "attrs": { + "name": "Old Town" + }, + "node_id": "loc_old_town", + "node_type": "location" + }, + { + "attrs": { + "name": "Rivergate" + }, + "node_id": "loc_rivergate", + "node_type": "location" + }, + { + "attrs": { + "name": "East Quay" + }, + "node_id": "loc_east_quay", + "node_type": "location" + }, + { + "attrs": { + "name": "Foundry Row" + }, + "node_id": "loc_foundry_row", + "node_type": "location" + }, + { + "attrs": { + "name": "North Basin" + }, + "node_id": "loc_north_basin", + "node_type": "location" + }, + { + "attrs": { + "name": "Uplink Yard" + }, + "node_id": "loc_uplink_yard", + "node_type": "location" + }, + { + "attrs": { + "name": "Project Lantern" + }, + "node_id": "event_project_lantern", + "node_type": "event" + }, + { + "attrs": { + "name": "Black Kite" + }, + "node_id": "event_black_kite", + "node_type": "event" + }, + { + "attrs": { + "name": "Silent Current" + }, + "node_id": "event_silent_current", + "node_type": "event" + }, + { + "attrs": { + "name": "Amber Veil" + }, + "node_id": "event_amber_veil", + "node_type": "event" + }, + { + "attrs": { + "name": "Glass Harbor" + }, + "node_id": "event_glass_harbor", + "node_type": "event" + }, + { + "attrs": { + "name": "Ember Tide" + }, + "node_id": "event_ember_tide", + "node_type": "event" + }, + { + "attrs": { + "name": "Iron Wharf" + }, + "node_id": "event_iron_wharf", + "node_type": "event" + }, + { + "attrs": { + "name": "Ghost Signal" + }, + "node_id": "event_ghost_signal", + "node_type": "event" + }, + { + "attrs": { + "topic": "supply_chain" + }, + "node_id": "thr_supply_leak", + "node_type": "thread" + }, + { + "attrs": { + "topic": "port_audit" + }, + "node_id": "thr_port_audit", + "node_type": "thread" + }, + { + "attrs": { + "topic": "customs_breach" + }, + "node_id": "thr_customs_breach", + "node_type": "thread" + }, + { + "attrs": { + "topic": "relay_map" + }, + "node_id": "thr_relay_map", + "node_type": "thread" + }, + { + "attrs": { + "topic": "foundry_watch" + }, + "node_id": "thr_foundry_watch", + "node_type": "thread" + }, + { + "attrs": { + "topic": "basin_shift" + }, + "node_id": "thr_basin_shift", + "node_type": "thread" + }, + { + "attrs": { + "topic": "quiet_manifest" + }, + "node_id": "thr_quiet_manifest", + "node_type": "thread" + }, + { + "attrs": { + "topic": "uplink_route" + }, + "node_id": "thr_uplink_route", + "node_type": "thread" + }, + { + "attrs": { + "topic": "ember_tide" + }, + "node_id": "thr_ember_tide_watch", + "node_type": "thread" + }, + { + "attrs": { + "topic": "ghost_signal" + }, + "node_id": "thr_ghost_signal_net", + "node_type": "thread" + }, + { + "attrs": { + "channel": "microblog" + }, + "node_id": "post_shift_roster", + "node_type": "post" + }, + { + "attrs": { + "channel": "microblog" + }, + "node_id": "post_midnight_manifest", + "node_type": "post" + }, + { + "attrs": { + "channel": "microblog" + }, + "node_id": "post_sat_phone_ping", + "node_type": "post" + }, + { + "attrs": { + "channel": "microblog" + }, + "node_id": "post_drone_parts", + "node_type": "post" + }, + { + "attrs": { + "channel": "microblog" + }, + "node_id": "post_relay_schedule", + "node_type": "post" + }, + { + "attrs": { + "channel": "microblog" + }, + "node_id": "post_quay_ledgers", + "node_type": "post" + }, + { + "attrs": { + "channel": "microblog" + }, + "node_id": "post_customs_tag", + "node_type": "post" + }, + { + "attrs": { + "channel": "microblog" + }, + "node_id": "post_hull_signal", + "node_type": "post" + }, + { + "attrs": { + "channel": "microblog" + }, + "node_id": "post_basin_photo", + "node_type": "post" + }, + { + "attrs": { + "channel": "microblog" + }, + "node_id": "post_foundry_map", + "node_type": "post" + }, + { + "attrs": { + "channel": "microblog" + }, + "node_id": "post_lantern_route", + "node_type": "post" + }, + { + "attrs": { + "channel": "microblog" + }, + "node_id": "post_uplink_note", + "node_type": "post" + } + ] + }, + "question_count": 30, + "questions": [ + { + "answer": "user_bharat", + "metadata": { + "difficulty": "easy", + "difficulty_level": 1, + "question_id": "easy_01", + "support_nodes": 6 + }, + "question": "alias_orchidfox -> post_midnight_manifest -> loc_dockyard17 -> connected collaborator on event_project_lantern. Who is it?", + "supporting_edges": [ + { + "confidence": 1.0, + "dst": "user_ivy", + "rel": "alias_of", + "src": "alias_orchidfox" + }, + { + "confidence": 1.0, + "dst": "post_midnight_manifest", + "rel": "authored_post", + "src": "alias_orchidfox" + }, + { + "confidence": 1.0, + "dst": "loc_dockyard17", + "rel": "references", + "src": "post_midnight_manifest" + }, + { + "confidence": 0.95, + "dst": "user_bharat", + "rel": "connected_to", + "src": "user_ivy" + }, + { + "confidence": 0.9, + "dst": "event_project_lantern", + "rel": "collaborates_on", + "src": "user_bharat" + } + ], + "task_id": "fixed_task_00", + "task_type": "fixed_trace" + }, + { + "answer": "user_hiro", + "metadata": { + "difficulty": "easy", + "difficulty_level": 1, + "question_id": "easy_02", + "support_nodes": 5 + }, + "question": "thr_supply_leak references org_northbridge_logistics. Which alias_docksparrow user works there and collaborates on event_project_lantern?", + "supporting_edges": [ + { + "confidence": 1.0, + "dst": "org_northbridge_logistics", + "rel": "references", + "src": "thr_supply_leak" + }, + { + "confidence": 1.0, + "dst": "user_hiro", + "rel": "alias_of", + "src": "alias_docksparrow" + }, + { + "confidence": 1.0, + "dst": "org_northbridge_logistics", + "rel": "works_at", + "src": "user_hiro" + }, + { + "confidence": 0.9, + "dst": "event_project_lantern", + "rel": "collaborates_on", + "src": "user_hiro" + } + ], + "task_id": "fixed_task_01", + "task_type": "fixed_trace" + }, + { + "answer": "user_diya", + "metadata": { + "difficulty": "easy", + "difficulty_level": 1, + "question_id": "easy_03", + "support_nodes": 7 + }, + "question": "alias_monsoonbyte authored post_drone_parts about event_black_kite. Which user behind that alias is directly connected to the Kestrel collaborator?", + "supporting_edges": [ + { + "confidence": 1.0, + "dst": "user_diya", + "rel": "alias_of", + "src": "alias_monsoonbyte" + }, + { + "confidence": 1.0, + "dst": "post_drone_parts", + "rel": "authored_post", + "src": "alias_monsoonbyte" + }, + { + "confidence": 1.0, + "dst": "event_black_kite", + "rel": "references", + "src": "post_drone_parts" + }, + { + "confidence": 1.0, + "dst": "org_kestrel_works", + "rel": "works_at", + "src": "user_ivy" + }, + { + "confidence": 0.9, + "dst": "event_black_kite", + "rel": "collaborates_on", + "src": "user_ivy" + }, + { + "confidence": 0.86, + "dst": "user_elin", + "rel": "connected_to", + "src": "user_ivy" + } + ], + "task_id": "fixed_task_02", + "task_type": "fixed_trace" + }, + { + "answer": "user_faris", + "metadata": { + "difficulty": "easy", + "difficulty_level": 1, + "question_id": "easy_04", + "support_nodes": 6 + }, + "question": "alias_nightrelay references loc_rivergate. Which user behind it works at an org operating there and collaborates on event_project_lantern?", + "supporting_edges": [ + { + "confidence": 1.0, + "dst": "user_faris", + "rel": "alias_of", + "src": "alias_nightrelay" + }, + { + "confidence": 1.0, + "dst": "post_sat_phone_ping", + "rel": "authored_post", + "src": "alias_nightrelay" + }, + { + "confidence": 1.0, + "dst": "loc_rivergate", + "rel": "references", + "src": "post_sat_phone_ping" + }, + { + "confidence": 1.0, + "dst": "org_tidewatch_ops", + "rel": "works_at", + "src": "user_faris" + }, + { + "confidence": 1.0, + "dst": "loc_rivergate", + "rel": "operates_in", + "src": "org_tidewatch_ops" + }, + { + "confidence": 0.9, + "dst": "event_project_lantern", + "rel": "collaborates_on", + "src": "user_faris" + } + ], + "task_id": "fixed_task_03", + "task_type": "fixed_trace" + }, + { + "answer": "user_ivy", + "metadata": { + "difficulty": "easy", + "difficulty_level": 1, + "question_id": "easy_05", + "support_nodes": 6 + }, + "question": "thr_port_audit discusses Black Kite and references Kestrel Works. Which alias_orchidfox user authored post_midnight_manifest and collaborates on Black Kite?", + "supporting_edges": [ + { + "confidence": 1.0, + "dst": "event_black_kite", + "rel": "discusses", + "src": "thr_port_audit" + }, + { + "confidence": 1.0, + "dst": "org_kestrel_works", + "rel": "references", + "src": "thr_port_audit" + }, + { + "confidence": 1.0, + "dst": "user_ivy", + "rel": "alias_of", + "src": "alias_orchidfox" + }, + { + "confidence": 1.0, + "dst": "post_midnight_manifest", + "rel": "authored_post", + "src": "alias_orchidfox" + }, + { + "confidence": 1.0, + "dst": "org_kestrel_works", + "rel": "works_at", + "src": "user_ivy" + }, + { + "confidence": 0.9, + "dst": "event_black_kite", + "rel": "collaborates_on", + "src": "user_ivy" + } + ], + "task_id": "fixed_task_04", + "task_type": "fixed_trace" + }, + { + "answer": "user_kian", + "metadata": { + "difficulty": "easy", + "difficulty_level": 1, + "question_id": "easy_06", + "support_nodes": 5 + }, + "question": "Which Atlas Freight user behind alias_lanternmoth authored post_quay_ledgers and collaborates on event_glass_harbor?", + "supporting_edges": [ + { + "confidence": 1.0, + "dst": "user_kian", + "rel": "alias_of", + "src": "alias_lanternmoth" + }, + { + "confidence": 1.0, + "dst": "post_quay_ledgers", + "rel": "authored_post", + "src": "alias_lanternmoth" + }, + { + "confidence": 1.0, + "dst": "org_atlas_freight", + "rel": "works_at", + "src": "user_kian" + }, + { + "confidence": 0.9, + "dst": "event_glass_harbor", + "rel": "collaborates_on", + "src": "user_kian" + } + ], + "task_id": "fixed_task_05", + "task_type": "fixed_trace" + }, + { + "answer": "user_mika", + "metadata": { + "difficulty": "easy", + "difficulty_level": 1, + "question_id": "easy_07", + "support_nodes": 5 + }, + "question": "Which Orion Customs user behind alias_basinraven authored post_customs_tag and collaborates on event_iron_wharf?", + "supporting_edges": [ + { + "confidence": 1.0, + "dst": "user_mika", + "rel": "alias_of", + "src": "alias_basinraven" + }, + { + "confidence": 1.0, + "dst": "post_customs_tag", + "rel": "authored_post", + "src": "alias_basinraven" + }, + { + "confidence": 1.0, + "dst": "org_orion_customs", + "rel": "works_at", + "src": "user_mika" + }, + { + "confidence": 0.9, + "dst": "event_iron_wharf", + "rel": "collaborates_on", + "src": "user_mika" + } + ], + "task_id": "fixed_task_06", + "task_type": "fixed_trace" + }, + { + "answer": "user_nora", + "metadata": { + "difficulty": "easy", + "difficulty_level": 1, + "question_id": "easy_08", + "support_nodes": 5 + }, + "question": "Which user behind alias_emberglass posted basin_photo from Foundry Row and investigates Amber Veil?", + "supporting_edges": [ + { + "confidence": 1.0, + "dst": "user_nora", + "rel": "alias_of", + "src": "alias_emberglass" + }, + { + "confidence": 1.0, + "dst": "post_basin_photo", + "rel": "authored_post", + "src": "alias_emberglass" + }, + { + "confidence": 1.0, + "dst": "loc_foundry_row", + "rel": "references", + "src": "post_basin_photo" + }, + { + "confidence": 0.9, + "dst": "event_amber_veil", + "rel": "investigates", + "src": "user_nora" + } + ], + "task_id": "fixed_task_07", + "task_type": "fixed_trace" + }, + { + "answer": "user_soren", + "metadata": { + "difficulty": "easy", + "difficulty_level": 1, + "question_id": "easy_09", + "support_nodes": 4 + }, + "question": "Which user behind alias_tideshard authored post_hull_signal and collaborates on Ghost Signal?", + "supporting_edges": [ + { + "confidence": 1.0, + "dst": "user_soren", + "rel": "alias_of", + "src": "alias_tideshard" + }, + { + "confidence": 1.0, + "dst": "post_hull_signal", + "rel": "authored_post", + "src": "alias_tideshard" + }, + { + "confidence": 0.9, + "dst": "event_ghost_signal", + "rel": "collaborates_on", + "src": "user_soren" + } + ], + "task_id": "fixed_task_08", + "task_type": "fixed_trace" + }, + { + "answer": "user_tara", + "metadata": { + "difficulty": "easy", + "difficulty_level": 1, + "question_id": "easy_10", + "support_nodes": 5 + }, + "question": "Which Harborlight Transit user behind alias_sablekeel authored post_uplink_note and reports on Ghost Signal?", + "supporting_edges": [ + { + "confidence": 1.0, + "dst": "user_tara", + "rel": "alias_of", + "src": "alias_sablekeel" + }, + { + "confidence": 1.0, + "dst": "post_uplink_note", + "rel": "authored_post", + "src": "alias_sablekeel" + }, + { + "confidence": 1.0, + "dst": "org_harborlight_transit", + "rel": "works_at", + "src": "user_tara" + }, + { + "confidence": 0.9, + "dst": "event_ghost_signal", + "rel": "reports_on", + "src": "user_tara" + } + ], + "task_id": "fixed_task_09", + "task_type": "fixed_trace" + }, + { + "answer": "org_northbridge_logistics", + "metadata": { + "difficulty": "mid", + "difficulty_level": 2, + "question_id": "mid_01", + "support_nodes": 17 + }, + "question": "Follow alias_docksparrow through post_shift_roster, Dockyard 17, and the Lantern chain. Return the org node id.", + "supporting_edges": [ + { + "confidence": 1.0, + "dst": "user_hiro", + "rel": "alias_of", + "src": "alias_docksparrow" + }, + { + "confidence": 1.0, + "dst": "post_shift_roster", + "rel": "authored_post", + "src": "alias_docksparrow" + }, + { + "confidence": 1.0, + "dst": "loc_dockyard17", + "rel": "references", + "src": "post_shift_roster" + }, + { + "confidence": 1.0, + "dst": "org_northbridge_logistics", + "rel": "references", + "src": "post_shift_roster" + }, + { + "confidence": 1.0, + "dst": "org_northbridge_logistics", + "rel": "references", + "src": "thr_supply_leak" + }, + { + "confidence": 1.0, + "dst": "org_northbridge_logistics", + "rel": "works_at", + "src": "user_hiro" + }, + { + "confidence": 1.0, + "dst": "loc_dockyard17", + "rel": "located_in", + "src": "user_hiro" + }, + { + "confidence": 0.9, + "dst": "event_project_lantern", + "rel": "collaborates_on", + "src": "user_hiro" + }, + { + "confidence": 0.9, + "dst": "event_project_lantern", + "rel": "collaborates_on", + "src": "user_bharat" + }, + { + "confidence": 0.95, + "dst": "user_hiro", + "rel": "connected_to", + "src": "user_bharat" + }, + { + "confidence": 0.92, + "dst": "user_faris", + "rel": "connected_to", + "src": "user_hiro" + }, + { + "confidence": 1.0, + "dst": "user_ivy", + "rel": "alias_of", + "src": "alias_orchidfox" + }, + { + "confidence": 1.0, + "dst": "post_midnight_manifest", + "rel": "authored_post", + "src": "alias_orchidfox" + }, + { + "confidence": 1.0, + "dst": "loc_dockyard17", + "rel": "references", + "src": "post_midnight_manifest" + }, + { + "confidence": 1.0, + "dst": "event_project_lantern", + "rel": "references", + "src": "post_midnight_manifest" + }, + { + "confidence": 1.0, + "dst": "user_kian", + "rel": "alias_of", + "src": "alias_lanternmoth" + }, + { + "confidence": 1.0, + "dst": "post_quay_ledgers", + "rel": "authored_post", + "src": "alias_lanternmoth" + }, + { + "confidence": 1.0, + "dst": "loc_east_quay", + "rel": "references", + "src": "post_quay_ledgers" + }, + { + "confidence": 1.0, + "dst": "event_glass_harbor", + "rel": "references", + "src": "post_quay_ledgers" + } + ], + "task_id": "fixed_task_10", + "task_type": "fixed_trace" + }, + { + "answer": "user_kian", + "metadata": { + "difficulty": "mid", + "difficulty_level": 2, + "question_id": "mid_02", + "support_nodes": 17 + }, + "question": "Across the Glass Harbor cluster, which user behind alias_lanternmoth links to the Atlas Freight network from thr_quiet_manifest?", + "supporting_edges": [ + { + "confidence": 1.0, + "dst": "user_kian", + "rel": "alias_of", + "src": "alias_lanternmoth" + }, + { + "confidence": 1.0, + "dst": "post_quay_ledgers", + "rel": "authored_post", + "src": "alias_lanternmoth" + }, + { + "confidence": 1.0, + "dst": "loc_east_quay", + "rel": "references", + "src": "post_quay_ledgers" + }, + { + "confidence": 1.0, + "dst": "event_glass_harbor", + "rel": "references", + "src": "post_quay_ledgers" + }, + { + "confidence": 1.0, + "dst": "thr_quiet_manifest", + "rel": "authored_thread", + "src": "user_kian" + }, + { + "confidence": 1.0, + "dst": "event_glass_harbor", + "rel": "discusses", + "src": "thr_quiet_manifest" + }, + { + "confidence": 1.0, + "dst": "org_atlas_freight", + "rel": "references", + "src": "thr_quiet_manifest" + }, + { + "confidence": 1.0, + "dst": "org_atlas_freight", + "rel": "works_at", + "src": "user_kian" + }, + { + "confidence": 1.0, + "dst": "org_atlas_freight", + "rel": "works_at", + "src": "user_omar" + }, + { + "confidence": 0.9, + "dst": "event_glass_harbor", + "rel": "collaborates_on", + "src": "user_kian" + }, + { + "confidence": 0.9, + "dst": "event_glass_harbor", + "rel": "collaborates_on", + "src": "user_omar" + }, + { + "confidence": 0.9, + "dst": "event_glass_harbor", + "rel": "monitors", + "src": "user_priya" + }, + { + "confidence": 0.93, + "dst": "user_omar", + "rel": "connected_to", + "src": "user_kian" + }, + { + "confidence": 0.9, + "dst": "user_mika", + "rel": "connected_to", + "src": "user_omar" + }, + { + "confidence": 1.0, + "dst": "user_ivy", + "rel": "alias_of", + "src": "alias_orchidfox" + }, + { + "confidence": 1.0, + "dst": "post_midnight_manifest", + "rel": "authored_post", + "src": "alias_orchidfox" + }, + { + "confidence": 1.0, + "dst": "loc_dockyard17", + "rel": "references", + "src": "post_midnight_manifest" + }, + { + "confidence": 1.0, + "dst": "event_project_lantern", + "rel": "references", + "src": "post_midnight_manifest" + }, + { + "confidence": 1.0, + "dst": "user_mika", + "rel": "alias_of", + "src": "alias_basinraven" + }, + { + "confidence": 1.0, + "dst": "post_customs_tag", + "rel": "authored_post", + "src": "alias_basinraven" + } + ], + "task_id": "fixed_task_11", + "task_type": "fixed_trace" + }, + { + "answer": "user_mika", + "metadata": { + "difficulty": "mid", + "difficulty_level": 2, + "question_id": "mid_03", + "support_nodes": 17 + }, + "question": "Trace alias_basinraven through post_customs_tag, thr_customs_breach, and the Orion Customs collaboration chain. Who is it?", + "supporting_edges": [ + { + "confidence": 1.0, + "dst": "user_mika", + "rel": "alias_of", + "src": "alias_basinraven" + }, + { + "confidence": 1.0, + "dst": "post_customs_tag", + "rel": "authored_post", + "src": "alias_basinraven" + }, + { + "confidence": 1.0, + "dst": "loc_north_basin", + "rel": "references", + "src": "post_customs_tag" + }, + { + "confidence": 1.0, + "dst": "event_iron_wharf", + "rel": "references", + "src": "post_customs_tag" + }, + { + "confidence": 1.0, + "dst": "thr_customs_breach", + "rel": "authored_thread", + "src": "user_mika" + }, + { + "confidence": 1.0, + "dst": "event_iron_wharf", + "rel": "discusses", + "src": "thr_customs_breach" + }, + { + "confidence": 1.0, + "dst": "org_orion_customs", + "rel": "references", + "src": "thr_customs_breach" + }, + { + "confidence": 1.0, + "dst": "org_orion_customs", + "rel": "works_at", + "src": "user_mika" + }, + { + "confidence": 1.0, + "dst": "org_orion_customs", + "rel": "works_at", + "src": "user_quinn" + }, + { + "confidence": 0.9, + "dst": "event_iron_wharf", + "rel": "collaborates_on", + "src": "user_mika" + }, + { + "confidence": 0.9, + "dst": "event_iron_wharf", + "rel": "collaborates_on", + "src": "user_quinn" + }, + { + "confidence": 0.89, + "dst": "user_quinn", + "rel": "connected_to", + "src": "user_mika" + }, + { + "confidence": 0.88, + "dst": "user_nora", + "rel": "connected_to", + "src": "user_quinn" + }, + { + "confidence": 0.77, + "dst": "org_emberline_security", + "rel": "connected_to", + "src": "org_orion_customs" + }, + { + "confidence": 1.0, + "dst": "user_ivy", + "rel": "alias_of", + "src": "alias_orchidfox" + }, + { + "confidence": 1.0, + "dst": "post_midnight_manifest", + "rel": "authored_post", + "src": "alias_orchidfox" + }, + { + "confidence": 1.0, + "dst": "loc_dockyard17", + "rel": "references", + "src": "post_midnight_manifest" + }, + { + "confidence": 1.0, + "dst": "event_project_lantern", + "rel": "references", + "src": "post_midnight_manifest" + }, + { + "confidence": 1.0, + "dst": "user_kian", + "rel": "alias_of", + "src": "alias_lanternmoth" + } + ], + "task_id": "fixed_task_12", + "task_type": "fixed_trace" + }, + { + "answer": "user_rhea", + "metadata": { + "difficulty": "mid", + "difficulty_level": 2, + "question_id": "mid_04", + "support_nodes": 18 + }, + "question": "In the Ember Tide and Amber Veil overlap, which Foundry Row user behind alias_cinderveil collaborates on Ember Tide?", + "supporting_edges": [ + { + "confidence": 1.0, + "dst": "user_rhea", + "rel": "alias_of", + "src": "alias_cinderveil" + }, + { + "confidence": 1.0, + "dst": "post_foundry_map", + "rel": "authored_post", + "src": "alias_cinderveil" + }, + { + "confidence": 1.0, + "dst": "loc_foundry_row", + "rel": "references", + "src": "post_foundry_map" + }, + { + "confidence": 1.0, + "dst": "event_ember_tide", + "rel": "references", + "src": "post_foundry_map" + }, + { + "confidence": 1.0, + "dst": "thr_foundry_watch", + "rel": "authored_thread", + "src": "user_nora" + }, + { + "confidence": 1.0, + "dst": "event_ember_tide", + "rel": "discusses", + "src": "thr_foundry_watch" + }, + { + "confidence": 1.0, + "dst": "org_emberline_security", + "rel": "references", + "src": "thr_foundry_watch" + }, + { + "confidence": 1.0, + "dst": "thr_ember_tide_watch", + "rel": "authored_thread", + "src": "user_rhea" + }, + { + "confidence": 1.0, + "dst": "event_ember_tide", + "rel": "discusses", + "src": "thr_ember_tide_watch" + }, + { + "confidence": 1.0, + "dst": "loc_foundry_row", + "rel": "references", + "src": "thr_ember_tide_watch" + }, + { + "confidence": 1.0, + "dst": "org_emberline_security", + "rel": "works_at", + "src": "user_rhea" + }, + { + "confidence": 1.0, + "dst": "org_emberline_security", + "rel": "works_at", + "src": "user_nora" + }, + { + "confidence": 0.9, + "dst": "event_amber_veil", + "rel": "investigates", + "src": "user_nora" + }, + { + "confidence": 0.9, + "dst": "event_ember_tide", + "rel": "collaborates_on", + "src": "user_rhea" + }, + { + "confidence": 0.87, + "dst": "user_rhea", + "rel": "connected_to", + "src": "user_nora" + }, + { + "confidence": 0.77, + "dst": "event_ghost_signal", + "rel": "connected_to", + "src": "event_ember_tide" + }, + { + "confidence": 1.0, + "dst": "user_ivy", + "rel": "alias_of", + "src": "alias_orchidfox" + }, + { + "confidence": 1.0, + "dst": "post_midnight_manifest", + "rel": "authored_post", + "src": "alias_orchidfox" + }, + { + "confidence": 1.0, + "dst": "loc_dockyard17", + "rel": "references", + "src": "post_midnight_manifest" + }, + { + "confidence": 1.0, + "dst": "event_project_lantern", + "rel": "references", + "src": "post_midnight_manifest" + }, + { + "confidence": 1.0, + "dst": "user_kian", + "rel": "alias_of", + "src": "alias_lanternmoth" + } + ], + "task_id": "fixed_task_13", + "task_type": "fixed_trace" + }, + { + "answer": "org_harborlight_transit", + "metadata": { + "difficulty": "mid", + "difficulty_level": 2, + "question_id": "mid_05", + "support_nodes": 17 + }, + "question": "Follow alias_tideshard from post_hull_signal into thr_uplink_route and the Harborlight relay. Return the org node id.", + "supporting_edges": [ + { + "confidence": 1.0, + "dst": "user_soren", + "rel": "alias_of", + "src": "alias_tideshard" + }, + { + "confidence": 1.0, + "dst": "post_hull_signal", + "rel": "authored_post", + "src": "alias_tideshard" + }, + { + "confidence": 1.0, + "dst": "loc_uplink_yard", + "rel": "references", + "src": "post_hull_signal" + }, + { + "confidence": 1.0, + "dst": "event_ghost_signal", + "rel": "references", + "src": "post_hull_signal" + }, + { + "confidence": 1.0, + "dst": "thr_uplink_route", + "rel": "authored_thread", + "src": "user_soren" + }, + { + "confidence": 1.0, + "dst": "event_ghost_signal", + "rel": "discusses", + "src": "thr_uplink_route" + }, + { + "confidence": 1.0, + "dst": "org_harborlight_transit", + "rel": "references", + "src": "thr_uplink_route" + }, + { + "confidence": 1.0, + "dst": "org_harborlight_transit", + "rel": "works_at", + "src": "user_soren" + }, + { + "confidence": 1.0, + "dst": "org_harborlight_transit", + "rel": "works_at", + "src": "user_tara" + }, + { + "confidence": 0.9, + "dst": "event_ghost_signal", + "rel": "collaborates_on", + "src": "user_soren" + }, + { + "confidence": 0.9, + "dst": "event_ghost_signal", + "rel": "reports_on", + "src": "user_tara" + }, + { + "confidence": 0.86, + "dst": "user_soren", + "rel": "connected_to", + "src": "user_rhea" + }, + { + "confidence": 0.86, + "dst": "user_tara", + "rel": "connected_to", + "src": "user_soren" + }, + { + "confidence": 1.0, + "dst": "loc_uplink_yard", + "rel": "operates_in", + "src": "org_harborlight_transit" + }, + { + "confidence": 0.77, + "dst": "org_tidewatch_ops", + "rel": "connected_to", + "src": "org_harborlight_transit" + }, + { + "confidence": 1.0, + "dst": "user_ivy", + "rel": "alias_of", + "src": "alias_orchidfox" + }, + { + "confidence": 1.0, + "dst": "post_midnight_manifest", + "rel": "authored_post", + "src": "alias_orchidfox" + }, + { + "confidence": 1.0, + "dst": "loc_dockyard17", + "rel": "references", + "src": "post_midnight_manifest" + }, + { + "confidence": 1.0, + "dst": "event_project_lantern", + "rel": "references", + "src": "post_midnight_manifest" + }, + { + "confidence": 1.0, + "dst": "user_kian", + "rel": "alias_of", + "src": "alias_lanternmoth" + } + ], + "task_id": "fixed_task_14", + "task_type": "fixed_trace" + }, + { + "answer": "user_leena", + "metadata": { + "difficulty": "mid", + "difficulty_level": 2, + "question_id": "mid_06", + "support_nodes": 17 + }, + "question": "Which Sunmesh user behind alias_frostledger connects post_lantern_route to thr_relay_map and the Sector 9 monitoring chain?", + "supporting_edges": [ + { + "confidence": 1.0, + "dst": "user_leena", + "rel": "alias_of", + "src": "alias_frostledger" + }, + { + "confidence": 1.0, + "dst": "post_lantern_route", + "rel": "authored_post", + "src": "alias_frostledger" + }, + { + "confidence": 1.0, + "dst": "event_project_lantern", + "rel": "references", + "src": "post_lantern_route" + }, + { + "confidence": 1.0, + "dst": "org_sunmesh_analytics", + "rel": "references", + "src": "post_lantern_route" + }, + { + "confidence": 1.0, + "dst": "thr_relay_map", + "rel": "authored_thread", + "src": "user_leena" + }, + { + "confidence": 1.0, + "dst": "event_project_lantern", + "rel": "discusses", + "src": "thr_relay_map" + }, + { + "confidence": 1.0, + "dst": "org_sunmesh_analytics", + "rel": "references", + "src": "thr_relay_map" + }, + { + "confidence": 1.0, + "dst": "org_sunmesh_analytics", + "rel": "works_at", + "src": "user_leena" + }, + { + "confidence": 1.0, + "dst": "org_sunmesh_analytics", + "rel": "works_at", + "src": "user_priya" + }, + { + "confidence": 1.0, + "dst": "loc_sector9", + "rel": "located_in", + "src": "user_leena" + }, + { + "confidence": 1.0, + "dst": "loc_sector9", + "rel": "operates_in", + "src": "org_sunmesh_analytics" + }, + { + "confidence": 0.9, + "dst": "event_project_lantern", + "rel": "monitors", + "src": "user_leena" + }, + { + "confidence": 0.91, + "dst": "user_leena", + "rel": "connected_to", + "src": "user_priya" + }, + { + "confidence": 0.83, + "dst": "user_aria", + "rel": "connected_to", + "src": "user_leena" + }, + { + "confidence": 1.0, + "dst": "user_ivy", + "rel": "alias_of", + "src": "alias_orchidfox" + }, + { + "confidence": 1.0, + "dst": "post_midnight_manifest", + "rel": "authored_post", + "src": "alias_orchidfox" + }, + { + "confidence": 1.0, + "dst": "loc_dockyard17", + "rel": "references", + "src": "post_midnight_manifest" + }, + { + "confidence": 1.0, + "dst": "event_project_lantern", + "rel": "references", + "src": "post_midnight_manifest" + }, + { + "confidence": 1.0, + "dst": "user_kian", + "rel": "alias_of", + "src": "alias_lanternmoth" + }, + { + "confidence": 1.0, + "dst": "post_quay_ledgers", + "rel": "authored_post", + "src": "alias_lanternmoth" + }, + { + "confidence": 1.0, + "dst": "loc_east_quay", + "rel": "references", + "src": "post_quay_ledgers" + } + ], + "task_id": "fixed_task_15", + "task_type": "fixed_trace" + }, + { + "answer": "user_nora", + "metadata": { + "difficulty": "mid", + "difficulty_level": 2, + "question_id": "mid_07", + "support_nodes": 18 + }, + "question": "Which user behind alias_emberglass is tied to Amber Veil after combining post_basin_photo, thr_basin_shift, and the Foundry Row investigation chain?", + "supporting_edges": [ + { + "confidence": 1.0, + "dst": "user_nora", + "rel": "alias_of", + "src": "alias_emberglass" + }, + { + "confidence": 1.0, + "dst": "post_basin_photo", + "rel": "authored_post", + "src": "alias_emberglass" + }, + { + "confidence": 1.0, + "dst": "loc_foundry_row", + "rel": "references", + "src": "post_basin_photo" + }, + { + "confidence": 1.0, + "dst": "event_amber_veil", + "rel": "references", + "src": "post_basin_photo" + }, + { + "confidence": 1.0, + "dst": "thr_basin_shift", + "rel": "authored_thread", + "src": "user_quinn" + }, + { + "confidence": 1.0, + "dst": "event_amber_veil", + "rel": "discusses", + "src": "thr_basin_shift" + }, + { + "confidence": 1.0, + "dst": "loc_north_basin", + "rel": "references", + "src": "thr_basin_shift" + }, + { + "confidence": 1.0, + "dst": "org_emberline_security", + "rel": "works_at", + "src": "user_nora" + }, + { + "confidence": 1.0, + "dst": "org_orion_customs", + "rel": "works_at", + "src": "user_quinn" + }, + { + "confidence": 1.0, + "dst": "loc_foundry_row", + "rel": "located_in", + "src": "user_nora" + }, + { + "confidence": 0.9, + "dst": "event_amber_veil", + "rel": "investigates", + "src": "user_nora" + }, + { + "confidence": 0.88, + "dst": "user_nora", + "rel": "connected_to", + "src": "user_quinn" + }, + { + "confidence": 0.87, + "dst": "user_rhea", + "rel": "connected_to", + "src": "user_nora" + }, + { + "confidence": 0.77, + "dst": "org_emberline_security", + "rel": "connected_to", + "src": "org_orion_customs" + }, + { + "confidence": 1.0, + "dst": "user_ivy", + "rel": "alias_of", + "src": "alias_orchidfox" + }, + { + "confidence": 1.0, + "dst": "post_midnight_manifest", + "rel": "authored_post", + "src": "alias_orchidfox" + }, + { + "confidence": 1.0, + "dst": "loc_dockyard17", + "rel": "references", + "src": "post_midnight_manifest" + }, + { + "confidence": 1.0, + "dst": "event_project_lantern", + "rel": "references", + "src": "post_midnight_manifest" + }, + { + "confidence": 1.0, + "dst": "user_kian", + "rel": "alias_of", + "src": "alias_lanternmoth" + } + ], + "task_id": "fixed_task_16", + "task_type": "fixed_trace" + }, + { + "answer": "user_ivy", + "metadata": { + "difficulty": "mid", + "difficulty_level": 2, + "question_id": "mid_08", + "support_nodes": 17 + }, + "question": "Combine alias_orchidfox, post_midnight_manifest, thr_supply_leak, and the Lantern to Glass Harbor bridge. Which user starts that chain?", + "supporting_edges": [ + { + "confidence": 1.0, + "dst": "user_ivy", + "rel": "alias_of", + "src": "alias_orchidfox" + }, + { + "confidence": 1.0, + "dst": "post_midnight_manifest", + "rel": "authored_post", + "src": "alias_orchidfox" + }, + { + "confidence": 1.0, + "dst": "loc_dockyard17", + "rel": "references", + "src": "post_midnight_manifest" + }, + { + "confidence": 1.0, + "dst": "event_project_lantern", + "rel": "references", + "src": "post_midnight_manifest" + }, + { + "confidence": 1.0, + "dst": "thr_supply_leak", + "rel": "authored_thread", + "src": "user_diya" + }, + { + "confidence": 1.0, + "dst": "event_project_lantern", + "rel": "discusses", + "src": "thr_supply_leak" + }, + { + "confidence": 1.0, + "dst": "org_northbridge_logistics", + "rel": "references", + "src": "thr_supply_leak" + }, + { + "confidence": 1.0, + "dst": "org_kestrel_works", + "rel": "works_at", + "src": "user_ivy" + }, + { + "confidence": 0.9, + "dst": "event_black_kite", + "rel": "collaborates_on", + "src": "user_ivy" + }, + { + "confidence": 0.95, + "dst": "user_bharat", + "rel": "connected_to", + "src": "user_ivy" + }, + { + "confidence": 0.86, + "dst": "user_elin", + "rel": "connected_to", + "src": "user_ivy" + }, + { + "confidence": 0.77, + "dst": "event_glass_harbor", + "rel": "connected_to", + "src": "event_project_lantern" + }, + { + "confidence": 0.9, + "dst": "event_glass_harbor", + "rel": "collaborates_on", + "src": "user_kian" + }, + { + "confidence": 0.9, + "dst": "event_glass_harbor", + "rel": "monitors", + "src": "user_priya" + }, + { + "confidence": 1.0, + "dst": "user_kian", + "rel": "alias_of", + "src": "alias_lanternmoth" + }, + { + "confidence": 1.0, + "dst": "post_quay_ledgers", + "rel": "authored_post", + "src": "alias_lanternmoth" + } + ], + "task_id": "fixed_task_17", + "task_type": "fixed_trace" + }, + { + "answer": "user_diya", + "metadata": { + "difficulty": "mid", + "difficulty_level": 2, + "question_id": "mid_09", + "support_nodes": 18 + }, + "question": "Which user behind alias_monsoonbyte sits at the overlap of Blueharbor Media, Project Lantern, Black Kite, and the Ivy connection chain?", + "supporting_edges": [ + { + "confidence": 1.0, + "dst": "user_diya", + "rel": "alias_of", + "src": "alias_monsoonbyte" + }, + { + "confidence": 1.0, + "dst": "post_drone_parts", + "rel": "authored_post", + "src": "alias_monsoonbyte" + }, + { + "confidence": 1.0, + "dst": "event_black_kite", + "rel": "references", + "src": "post_drone_parts" + }, + { + "confidence": 1.0, + "dst": "thr_supply_leak", + "rel": "authored_thread", + "src": "user_diya" + }, + { + "confidence": 1.0, + "dst": "event_project_lantern", + "rel": "discusses", + "src": "thr_supply_leak" + }, + { + "confidence": 1.0, + "dst": "thr_port_audit", + "rel": "authored_thread", + "src": "user_jules" + }, + { + "confidence": 1.0, + "dst": "event_black_kite", + "rel": "discusses", + "src": "thr_port_audit" + }, + { + "confidence": 1.0, + "dst": "org_blueharbor_media", + "rel": "works_at", + "src": "user_diya" + }, + { + "confidence": 1.0, + "dst": "org_kestrel_works", + "rel": "works_at", + "src": "user_ivy" + }, + { + "confidence": 1.0, + "dst": "org_blueharbor_media", + "rel": "works_at", + "src": "user_jules" + }, + { + "confidence": 0.9, + "dst": "event_project_lantern", + "rel": "investigates", + "src": "user_diya" + }, + { + "confidence": 0.9, + "dst": "event_black_kite", + "rel": "collaborates_on", + "src": "user_ivy" + }, + { + "confidence": 0.9, + "dst": "event_black_kite", + "rel": "reports_on", + "src": "user_jules" + }, + { + "confidence": 0.9, + "dst": "user_diya", + "rel": "connected_to", + "src": "user_faris" + }, + { + "confidence": 0.86, + "dst": "user_elin", + "rel": "connected_to", + "src": "user_ivy" + }, + { + "confidence": 1.0, + "dst": "user_ivy", + "rel": "alias_of", + "src": "alias_orchidfox" + }, + { + "confidence": 1.0, + "dst": "post_midnight_manifest", + "rel": "authored_post", + "src": "alias_orchidfox" + }, + { + "confidence": 1.0, + "dst": "loc_dockyard17", + "rel": "references", + "src": "post_midnight_manifest" + }, + { + "confidence": 1.0, + "dst": "event_project_lantern", + "rel": "references", + "src": "post_midnight_manifest" + }, + { + "confidence": 1.0, + "dst": "user_kian", + "rel": "alias_of", + "src": "alias_lanternmoth" + } + ], + "task_id": "fixed_task_18", + "task_type": "fixed_trace" + }, + { + "answer": "user_bharat", + "metadata": { + "difficulty": "mid", + "difficulty_level": 2, + "question_id": "mid_10", + "support_nodes": 17 + }, + "question": "Who is the Northbridge user behind alias_steelquill when combining post_relay_schedule, thr_supply_leak, Dockyard 17, and Lantern collaborator edges?", + "supporting_edges": [ + { + "confidence": 1.0, + "dst": "user_bharat", + "rel": "alias_of", + "src": "alias_steelquill" + }, + { + "confidence": 1.0, + "dst": "post_relay_schedule", + "rel": "authored_post", + "src": "alias_steelquill" + }, + { + "confidence": 1.0, + "dst": "event_project_lantern", + "rel": "references", + "src": "post_relay_schedule" + }, + { + "confidence": 1.0, + "dst": "loc_sector9", + "rel": "references", + "src": "post_relay_schedule" + }, + { + "confidence": 1.0, + "dst": "thr_supply_leak", + "rel": "authored_thread", + "src": "user_diya" + }, + { + "confidence": 1.0, + "dst": "event_project_lantern", + "rel": "discusses", + "src": "thr_supply_leak" + }, + { + "confidence": 1.0, + "dst": "org_northbridge_logistics", + "rel": "references", + "src": "thr_supply_leak" + }, + { + "confidence": 1.0, + "dst": "org_northbridge_logistics", + "rel": "works_at", + "src": "user_bharat" + }, + { + "confidence": 1.0, + "dst": "org_northbridge_logistics", + "rel": "works_at", + "src": "user_hiro" + }, + { + "confidence": 1.0, + "dst": "loc_dockyard17", + "rel": "located_in", + "src": "user_bharat" + }, + { + "confidence": 1.0, + "dst": "loc_dockyard17", + "rel": "located_in", + "src": "user_hiro" + }, + { + "confidence": 0.9, + "dst": "event_project_lantern", + "rel": "collaborates_on", + "src": "user_bharat" + }, + { + "confidence": 0.9, + "dst": "event_project_lantern", + "rel": "collaborates_on", + "src": "user_hiro" + }, + { + "confidence": 0.95, + "dst": "user_bharat", + "rel": "connected_to", + "src": "user_ivy" + }, + { + "confidence": 0.95, + "dst": "user_hiro", + "rel": "connected_to", + "src": "user_bharat" + }, + { + "confidence": 1.0, + "dst": "user_ivy", + "rel": "alias_of", + "src": "alias_orchidfox" + }, + { + "confidence": 1.0, + "dst": "post_midnight_manifest", + "rel": "authored_post", + "src": "alias_orchidfox" + }, + { + "confidence": 1.0, + "dst": "loc_dockyard17", + "rel": "references", + "src": "post_midnight_manifest" + }, + { + "confidence": 1.0, + "dst": "event_project_lantern", + "rel": "references", + "src": "post_midnight_manifest" + }, + { + "confidence": 1.0, + "dst": "user_kian", + "rel": "alias_of", + "src": "alias_lanternmoth" + }, + { + "confidence": 1.0, + "dst": "post_quay_ledgers", + "rel": "authored_post", + "src": "alias_lanternmoth" + }, + { + "confidence": 1.0, + "dst": "loc_east_quay", + "rel": "references", + "src": "post_quay_ledgers" + } + ], + "task_id": "fixed_task_19", + "task_type": "fixed_trace" + }, + { + "answer": "user_ivy", + "metadata": { + "difficulty": "high", + "difficulty_level": 3, + "question_id": "high_01", + "support_nodes": 50 + }, + "question": "Lantern to Glass Harbor handoff: identify the user behind alias_orchidfox after combining Lantern logistics, Dockyard links, and Atlas Freight bridge evidence.", + "supporting_edges": [ + { + "confidence": 1.0, + "dst": "user_ivy", + "rel": "alias_of", + "src": "alias_orchidfox" + }, + { + "confidence": 1.0, + "dst": "post_midnight_manifest", + "rel": "authored_post", + "src": "alias_orchidfox" + }, + { + "confidence": 1.0, + "dst": "loc_dockyard17", + "rel": "references", + "src": "post_midnight_manifest" + }, + { + "confidence": 1.0, + "dst": "event_project_lantern", + "rel": "references", + "src": "post_midnight_manifest" + }, + { + "confidence": 1.0, + "dst": "thr_supply_leak", + "rel": "authored_thread", + "src": "user_diya" + }, + { + "confidence": 1.0, + "dst": "event_project_lantern", + "rel": "discusses", + "src": "thr_supply_leak" + }, + { + "confidence": 1.0, + "dst": "org_northbridge_logistics", + "rel": "references", + "src": "thr_supply_leak" + }, + { + "confidence": 1.0, + "dst": "org_kestrel_works", + "rel": "works_at", + "src": "user_ivy" + }, + { + "confidence": 1.0, + "dst": "org_northbridge_logistics", + "rel": "works_at", + "src": "user_bharat" + }, + { + "confidence": 1.0, + "dst": "org_northbridge_logistics", + "rel": "works_at", + "src": "user_hiro" + }, + { + "confidence": 1.0, + "dst": "org_atlas_freight", + "rel": "works_at", + "src": "user_kian" + }, + { + "confidence": 1.0, + "dst": "org_atlas_freight", + "rel": "works_at", + "src": "user_omar" + }, + { + "confidence": 1.0, + "dst": "loc_rivergate", + "rel": "located_in", + "src": "user_ivy" + }, + { + "confidence": 1.0, + "dst": "loc_dockyard17", + "rel": "located_in", + "src": "user_bharat" + }, + { + "confidence": 1.0, + "dst": "loc_dockyard17", + "rel": "located_in", + "src": "user_hiro" + }, + { + "confidence": 1.0, + "dst": "loc_east_quay", + "rel": "located_in", + "src": "user_kian" + }, + { + "confidence": 1.0, + "dst": "loc_east_quay", + "rel": "located_in", + "src": "user_omar" + }, + { + "confidence": 1.0, + "dst": "loc_dockyard17", + "rel": "operates_in", + "src": "org_northbridge_logistics" + }, + { + "confidence": 1.0, + "dst": "loc_rivergate", + "rel": "operates_in", + "src": "org_kestrel_works" + }, + { + "confidence": 1.0, + "dst": "loc_east_quay", + "rel": "operates_in", + "src": "org_atlas_freight" + }, + { + "confidence": 0.95, + "dst": "user_bharat", + "rel": "connected_to", + "src": "user_ivy" + }, + { + "confidence": 0.95, + "dst": "user_hiro", + "rel": "connected_to", + "src": "user_bharat" + }, + { + "confidence": 0.92, + "dst": "user_faris", + "rel": "connected_to", + "src": "user_hiro" + }, + { + "confidence": 0.86, + "dst": "user_elin", + "rel": "connected_to", + "src": "user_ivy" + }, + { + "confidence": 0.93, + "dst": "user_omar", + "rel": "connected_to", + "src": "user_kian" + }, + { + "confidence": 0.9, + "dst": "user_mika", + "rel": "connected_to", + "src": "user_omar" + }, + { + "confidence": 0.9, + "dst": "event_project_lantern", + "rel": "collaborates_on", + "src": "user_bharat" + }, + { + "confidence": 0.9, + "dst": "event_project_lantern", + "rel": "collaborates_on", + "src": "user_hiro" + }, + { + "confidence": 0.9, + "dst": "event_project_lantern", + "rel": "collaborates_on", + "src": "user_faris" + }, + { + "confidence": 0.9, + "dst": "event_black_kite", + "rel": "collaborates_on", + "src": "user_ivy" + }, + { + "confidence": 0.9, + "dst": "event_glass_harbor", + "rel": "collaborates_on", + "src": "user_kian" + }, + { + "confidence": 0.9, + "dst": "event_glass_harbor", + "rel": "collaborates_on", + "src": "user_omar" + }, + { + "confidence": 0.9, + "dst": "event_glass_harbor", + "rel": "monitors", + "src": "user_priya" + }, + { + "confidence": 1.0, + "dst": "thr_quiet_manifest", + "rel": "authored_thread", + "src": "user_kian" + }, + { + "confidence": 1.0, + "dst": "event_glass_harbor", + "rel": "discusses", + "src": "thr_quiet_manifest" + }, + { + "confidence": 1.0, + "dst": "org_atlas_freight", + "rel": "references", + "src": "thr_quiet_manifest" + }, + { + "confidence": 1.0, + "dst": "post_quay_ledgers", + "rel": "authored_post", + "src": "alias_lanternmoth" + }, + { + "confidence": 1.0, + "dst": "loc_east_quay", + "rel": "references", + "src": "post_quay_ledgers" + }, + { + "confidence": 1.0, + "dst": "event_glass_harbor", + "rel": "references", + "src": "post_quay_ledgers" + }, + { + "confidence": 0.77, + "dst": "event_glass_harbor", + "rel": "connected_to", + "src": "event_project_lantern" + }, + { + "confidence": 0.77, + "dst": "org_northbridge_logistics", + "rel": "connected_to", + "src": "org_atlas_freight" + }, + { + "confidence": 1.0, + "dst": "user_kian", + "rel": "alias_of", + "src": "alias_lanternmoth" + }, + { + "confidence": 1.0, + "dst": "user_bharat", + "rel": "alias_of", + "src": "alias_steelquill" + }, + { + "confidence": 1.0, + "dst": "user_hiro", + "rel": "alias_of", + "src": "alias_docksparrow" + }, + { + "confidence": 1.0, + "dst": "user_diya", + "rel": "alias_of", + "src": "alias_monsoonbyte" + }, + { + "confidence": 1.0, + "dst": "user_faris", + "rel": "alias_of", + "src": "alias_nightrelay" + }, + { + "confidence": 1.0, + "dst": "user_elin", + "rel": "alias_of", + "src": "alias_mapleghost" + }, + { + "confidence": 1.0, + "dst": "user_cyrus", + "rel": "alias_of", + "src": "alias_quartzlotus" + }, + { + "confidence": 1.0, + "dst": "user_nora", + "rel": "alias_of", + "src": "alias_emberglass" + }, + { + "confidence": 1.0, + "dst": "user_mika", + "rel": "alias_of", + "src": "alias_basinraven" + }, + { + "confidence": 1.0, + "dst": "user_soren", + "rel": "alias_of", + "src": "alias_tideshard" + }, + { + "confidence": 1.0, + "dst": "user_priya", + "rel": "alias_of", + "src": "alias_hollowsignal" + }, + { + "confidence": 1.0, + "dst": "user_omar", + "rel": "alias_of", + "src": "alias_ironwhisper" + }, + { + "confidence": 1.0, + "dst": "user_rhea", + "rel": "alias_of", + "src": "alias_cinderveil" + }, + { + "confidence": 1.0, + "dst": "user_tara", + "rel": "alias_of", + "src": "alias_sablekeel" + }, + { + "confidence": 1.0, + "dst": "user_leena", + "rel": "alias_of", + "src": "alias_frostledger" + }, + { + "confidence": 1.0, + "dst": "org_helios_labs", + "rel": "works_at", + "src": "user_aria" + }, + { + "confidence": 1.0, + "dst": "loc_sector9", + "rel": "located_in", + "src": "user_aria" + }, + { + "confidence": 1.0, + "dst": "org_apex_dynamics", + "rel": "works_at", + "src": "user_cyrus" + }, + { + "confidence": 1.0, + "dst": "loc_old_town", + "rel": "located_in", + "src": "user_cyrus" + } + ], + "task_id": "fixed_task_20", + "task_type": "fixed_trace" + }, + { + "answer": "user_mika", + "metadata": { + "difficulty": "high", + "difficulty_level": 3, + "question_id": "high_02", + "support_nodes": 50 + }, + "question": "North Basin to Foundry Row escalation: which user behind alias_basinraven anchors the Iron Wharf side before the Emberline handoff?", + "supporting_edges": [ + { + "confidence": 1.0, + "dst": "user_mika", + "rel": "alias_of", + "src": "alias_basinraven" + }, + { + "confidence": 1.0, + "dst": "post_customs_tag", + "rel": "authored_post", + "src": "alias_basinraven" + }, + { + "confidence": 1.0, + "dst": "loc_north_basin", + "rel": "references", + "src": "post_customs_tag" + }, + { + "confidence": 1.0, + "dst": "event_iron_wharf", + "rel": "references", + "src": "post_customs_tag" + }, + { + "confidence": 1.0, + "dst": "thr_customs_breach", + "rel": "authored_thread", + "src": "user_mika" + }, + { + "confidence": 1.0, + "dst": "event_iron_wharf", + "rel": "discusses", + "src": "thr_customs_breach" + }, + { + "confidence": 1.0, + "dst": "org_orion_customs", + "rel": "references", + "src": "thr_customs_breach" + }, + { + "confidence": 1.0, + "dst": "thr_basin_shift", + "rel": "authored_thread", + "src": "user_quinn" + }, + { + "confidence": 1.0, + "dst": "event_amber_veil", + "rel": "discusses", + "src": "thr_basin_shift" + }, + { + "confidence": 1.0, + "dst": "loc_north_basin", + "rel": "references", + "src": "thr_basin_shift" + }, + { + "confidence": 1.0, + "dst": "thr_foundry_watch", + "rel": "authored_thread", + "src": "user_nora" + }, + { + "confidence": 1.0, + "dst": "event_ember_tide", + "rel": "discusses", + "src": "thr_foundry_watch" + }, + { + "confidence": 1.0, + "dst": "org_emberline_security", + "rel": "references", + "src": "thr_foundry_watch" + }, + { + "confidence": 1.0, + "dst": "org_orion_customs", + "rel": "works_at", + "src": "user_mika" + }, + { + "confidence": 1.0, + "dst": "org_orion_customs", + "rel": "works_at", + "src": "user_quinn" + }, + { + "confidence": 1.0, + "dst": "org_emberline_security", + "rel": "works_at", + "src": "user_nora" + }, + { + "confidence": 1.0, + "dst": "org_emberline_security", + "rel": "works_at", + "src": "user_rhea" + }, + { + "confidence": 1.0, + "dst": "loc_north_basin", + "rel": "located_in", + "src": "user_mika" + }, + { + "confidence": 1.0, + "dst": "loc_north_basin", + "rel": "located_in", + "src": "user_quinn" + }, + { + "confidence": 1.0, + "dst": "loc_foundry_row", + "rel": "located_in", + "src": "user_nora" + }, + { + "confidence": 1.0, + "dst": "loc_foundry_row", + "rel": "located_in", + "src": "user_rhea" + }, + { + "confidence": 1.0, + "dst": "loc_north_basin", + "rel": "operates_in", + "src": "org_orion_customs" + }, + { + "confidence": 1.0, + "dst": "loc_foundry_row", + "rel": "operates_in", + "src": "org_emberline_security" + }, + { + "confidence": 0.89, + "dst": "user_quinn", + "rel": "connected_to", + "src": "user_mika" + }, + { + "confidence": 0.88, + "dst": "user_nora", + "rel": "connected_to", + "src": "user_quinn" + }, + { + "confidence": 0.87, + "dst": "user_rhea", + "rel": "connected_to", + "src": "user_nora" + }, + { + "confidence": 0.9, + "dst": "event_iron_wharf", + "rel": "collaborates_on", + "src": "user_mika" + }, + { + "confidence": 0.9, + "dst": "event_iron_wharf", + "rel": "collaborates_on", + "src": "user_quinn" + }, + { + "confidence": 0.9, + "dst": "event_amber_veil", + "rel": "investigates", + "src": "user_nora" + }, + { + "confidence": 0.9, + "dst": "event_ember_tide", + "rel": "collaborates_on", + "src": "user_rhea" + }, + { + "confidence": 1.0, + "dst": "post_basin_photo", + "rel": "authored_post", + "src": "alias_emberglass" + }, + { + "confidence": 1.0, + "dst": "loc_foundry_row", + "rel": "references", + "src": "post_basin_photo" + }, + { + "confidence": 1.0, + "dst": "event_amber_veil", + "rel": "references", + "src": "post_basin_photo" + }, + { + "confidence": 1.0, + "dst": "post_foundry_map", + "rel": "authored_post", + "src": "alias_cinderveil" + }, + { + "confidence": 1.0, + "dst": "loc_foundry_row", + "rel": "references", + "src": "post_foundry_map" + }, + { + "confidence": 1.0, + "dst": "event_ember_tide", + "rel": "references", + "src": "post_foundry_map" + }, + { + "confidence": 0.77, + "dst": "event_amber_veil", + "rel": "connected_to", + "src": "event_black_kite" + }, + { + "confidence": 0.77, + "dst": "event_ghost_signal", + "rel": "connected_to", + "src": "event_ember_tide" + }, + { + "confidence": 0.77, + "dst": "org_emberline_security", + "rel": "connected_to", + "src": "org_orion_customs" + }, + { + "confidence": 1.0, + "dst": "user_nora", + "rel": "alias_of", + "src": "alias_emberglass" + }, + { + "confidence": 1.0, + "dst": "user_rhea", + "rel": "alias_of", + "src": "alias_cinderveil" + }, + { + "confidence": 0.82, + "dst": "user_nora", + "rel": "connected_to", + "src": "user_priya" + }, + { + "confidence": 0.8, + "dst": "user_bharat", + "rel": "connected_to", + "src": "user_kian" + }, + { + "confidence": 1.0, + "dst": "user_ivy", + "rel": "alias_of", + "src": "alias_orchidfox" + }, + { + "confidence": 1.0, + "dst": "user_bharat", + "rel": "alias_of", + "src": "alias_steelquill" + }, + { + "confidence": 1.0, + "dst": "user_diya", + "rel": "alias_of", + "src": "alias_monsoonbyte" + }, + { + "confidence": 1.0, + "dst": "user_faris", + "rel": "alias_of", + "src": "alias_nightrelay" + }, + { + "confidence": 1.0, + "dst": "user_elin", + "rel": "alias_of", + "src": "alias_mapleghost" + }, + { + "confidence": 1.0, + "dst": "user_hiro", + "rel": "alias_of", + "src": "alias_docksparrow" + }, + { + "confidence": 1.0, + "dst": "user_cyrus", + "rel": "alias_of", + "src": "alias_quartzlotus" + }, + { + "confidence": 1.0, + "dst": "user_soren", + "rel": "alias_of", + "src": "alias_tideshard" + }, + { + "confidence": 1.0, + "dst": "user_priya", + "rel": "alias_of", + "src": "alias_hollowsignal" + }, + { + "confidence": 1.0, + "dst": "user_omar", + "rel": "alias_of", + "src": "alias_ironwhisper" + }, + { + "confidence": 1.0, + "dst": "user_tara", + "rel": "alias_of", + "src": "alias_sablekeel" + }, + { + "confidence": 1.0, + "dst": "user_kian", + "rel": "alias_of", + "src": "alias_lanternmoth" + }, + { + "confidence": 1.0, + "dst": "user_leena", + "rel": "alias_of", + "src": "alias_frostledger" + }, + { + "confidence": 1.0, + "dst": "org_helios_labs", + "rel": "works_at", + "src": "user_aria" + } + ], + "task_id": "fixed_task_21", + "task_type": "fixed_trace" + }, + { + "answer": "user_soren", + "metadata": { + "difficulty": "high", + "difficulty_level": 3, + "question_id": "high_03", + "support_nodes": 50 + }, + "question": "Harborlight ghost-signal relay: identify the user behind alias_tideshard at the Harborlight / Tidewatch junction.", + "supporting_edges": [ + { + "confidence": 1.0, + "dst": "user_soren", + "rel": "alias_of", + "src": "alias_tideshard" + }, + { + "confidence": 1.0, + "dst": "post_hull_signal", + "rel": "authored_post", + "src": "alias_tideshard" + }, + { + "confidence": 1.0, + "dst": "loc_uplink_yard", + "rel": "references", + "src": "post_hull_signal" + }, + { + "confidence": 1.0, + "dst": "event_ghost_signal", + "rel": "references", + "src": "post_hull_signal" + }, + { + "confidence": 1.0, + "dst": "user_tara", + "rel": "alias_of", + "src": "alias_sablekeel" + }, + { + "confidence": 1.0, + "dst": "post_uplink_note", + "rel": "authored_post", + "src": "alias_sablekeel" + }, + { + "confidence": 1.0, + "dst": "loc_uplink_yard", + "rel": "references", + "src": "post_uplink_note" + }, + { + "confidence": 1.0, + "dst": "org_harborlight_transit", + "rel": "references", + "src": "post_uplink_note" + }, + { + "confidence": 1.0, + "dst": "thr_uplink_route", + "rel": "authored_thread", + "src": "user_soren" + }, + { + "confidence": 1.0, + "dst": "event_ghost_signal", + "rel": "discusses", + "src": "thr_uplink_route" + }, + { + "confidence": 1.0, + "dst": "org_harborlight_transit", + "rel": "references", + "src": "thr_uplink_route" + }, + { + "confidence": 1.0, + "dst": "thr_ghost_signal_net", + "rel": "authored_thread", + "src": "user_tara" + }, + { + "confidence": 1.0, + "dst": "event_ghost_signal", + "rel": "discusses", + "src": "thr_ghost_signal_net" + }, + { + "confidence": 1.0, + "dst": "loc_uplink_yard", + "rel": "references", + "src": "thr_ghost_signal_net" + }, + { + "confidence": 1.0, + "dst": "org_harborlight_transit", + "rel": "works_at", + "src": "user_soren" + }, + { + "confidence": 1.0, + "dst": "org_harborlight_transit", + "rel": "works_at", + "src": "user_tara" + }, + { + "confidence": 1.0, + "dst": "org_tidewatch_ops", + "rel": "works_at", + "src": "user_faris" + }, + { + "confidence": 1.0, + "dst": "loc_uplink_yard", + "rel": "located_in", + "src": "user_soren" + }, + { + "confidence": 1.0, + "dst": "loc_uplink_yard", + "rel": "located_in", + "src": "user_tara" + }, + { + "confidence": 1.0, + "dst": "loc_rivergate", + "rel": "located_in", + "src": "user_faris" + }, + { + "confidence": 1.0, + "dst": "loc_uplink_yard", + "rel": "operates_in", + "src": "org_harborlight_transit" + }, + { + "confidence": 1.0, + "dst": "loc_rivergate", + "rel": "operates_in", + "src": "org_tidewatch_ops" + }, + { + "confidence": 0.86, + "dst": "user_soren", + "rel": "connected_to", + "src": "user_rhea" + }, + { + "confidence": 0.86, + "dst": "user_tara", + "rel": "connected_to", + "src": "user_soren" + }, + { + "confidence": 0.84, + "dst": "user_kian", + "rel": "connected_to", + "src": "user_tara" + }, + { + "confidence": 0.79, + "dst": "user_faris", + "rel": "connected_to", + "src": "user_soren" + }, + { + "confidence": 0.9, + "dst": "event_project_lantern", + "rel": "collaborates_on", + "src": "user_faris" + }, + { + "confidence": 0.9, + "dst": "event_ghost_signal", + "rel": "collaborates_on", + "src": "user_soren" + }, + { + "confidence": 0.9, + "dst": "event_ghost_signal", + "rel": "reports_on", + "src": "user_tara" + }, + { + "confidence": 1.0, + "dst": "post_sat_phone_ping", + "rel": "authored_post", + "src": "alias_nightrelay" + }, + { + "confidence": 1.0, + "dst": "loc_rivergate", + "rel": "references", + "src": "post_sat_phone_ping" + }, + { + "confidence": 1.0, + "dst": "event_project_lantern", + "rel": "references", + "src": "post_sat_phone_ping" + }, + { + "confidence": 1.0, + "dst": "thr_supply_leak", + "rel": "authored_thread", + "src": "user_diya" + }, + { + "confidence": 1.0, + "dst": "event_project_lantern", + "rel": "discusses", + "src": "thr_supply_leak" + }, + { + "confidence": 0.9, + "dst": "event_project_lantern", + "rel": "collaborates_on", + "src": "user_bharat" + }, + { + "confidence": 0.9, + "dst": "event_project_lantern", + "rel": "collaborates_on", + "src": "user_hiro" + }, + { + "confidence": 0.77, + "dst": "event_ghost_signal", + "rel": "connected_to", + "src": "event_ember_tide" + }, + { + "confidence": 0.77, + "dst": "org_tidewatch_ops", + "rel": "connected_to", + "src": "org_harborlight_transit" + }, + { + "confidence": 1.0, + "dst": "user_faris", + "rel": "alias_of", + "src": "alias_nightrelay" + }, + { + "confidence": 1.0, + "dst": "user_ivy", + "rel": "alias_of", + "src": "alias_orchidfox" + }, + { + "confidence": 1.0, + "dst": "user_bharat", + "rel": "alias_of", + "src": "alias_steelquill" + }, + { + "confidence": 1.0, + "dst": "user_diya", + "rel": "alias_of", + "src": "alias_monsoonbyte" + }, + { + "confidence": 1.0, + "dst": "user_elin", + "rel": "alias_of", + "src": "alias_mapleghost" + }, + { + "confidence": 1.0, + "dst": "user_hiro", + "rel": "alias_of", + "src": "alias_docksparrow" + }, + { + "confidence": 1.0, + "dst": "user_cyrus", + "rel": "alias_of", + "src": "alias_quartzlotus" + }, + { + "confidence": 1.0, + "dst": "user_nora", + "rel": "alias_of", + "src": "alias_emberglass" + }, + { + "confidence": 1.0, + "dst": "user_mika", + "rel": "alias_of", + "src": "alias_basinraven" + }, + { + "confidence": 1.0, + "dst": "user_priya", + "rel": "alias_of", + "src": "alias_hollowsignal" + }, + { + "confidence": 1.0, + "dst": "user_omar", + "rel": "alias_of", + "src": "alias_ironwhisper" + }, + { + "confidence": 1.0, + "dst": "user_rhea", + "rel": "alias_of", + "src": "alias_cinderveil" + }, + { + "confidence": 1.0, + "dst": "user_kian", + "rel": "alias_of", + "src": "alias_lanternmoth" + }, + { + "confidence": 1.0, + "dst": "user_leena", + "rel": "alias_of", + "src": "alias_frostledger" + }, + { + "confidence": 1.0, + "dst": "org_helios_labs", + "rel": "works_at", + "src": "user_aria" + }, + { + "confidence": 1.0, + "dst": "loc_sector9", + "rel": "located_in", + "src": "user_aria" + }, + { + "confidence": 1.0, + "dst": "org_northbridge_logistics", + "rel": "works_at", + "src": "user_bharat" + }, + { + "confidence": 1.0, + "dst": "loc_dockyard17", + "rel": "located_in", + "src": "user_bharat" + } + ], + "task_id": "fixed_task_22", + "task_type": "fixed_trace" + }, + { + "answer": "user_diya", + "metadata": { + "difficulty": "high", + "difficulty_level": 3, + "question_id": "high_04", + "support_nodes": 50 + }, + "question": "Blueharbor to Black Kite to Lantern overlap: which user is the Blueharbor origin behind alias_monsoonbyte?", + "supporting_edges": [ + { + "confidence": 1.0, + "dst": "user_diya", + "rel": "alias_of", + "src": "alias_monsoonbyte" + }, + { + "confidence": 1.0, + "dst": "post_drone_parts", + "rel": "authored_post", + "src": "alias_monsoonbyte" + }, + { + "confidence": 1.0, + "dst": "event_black_kite", + "rel": "references", + "src": "post_drone_parts" + }, + { + "confidence": 1.0, + "dst": "org_kestrel_works", + "rel": "references", + "src": "post_drone_parts" + }, + { + "confidence": 1.0, + "dst": "thr_port_audit", + "rel": "authored_thread", + "src": "user_jules" + }, + { + "confidence": 1.0, + "dst": "event_black_kite", + "rel": "discusses", + "src": "thr_port_audit" + }, + { + "confidence": 1.0, + "dst": "org_kestrel_works", + "rel": "references", + "src": "thr_port_audit" + }, + { + "confidence": 1.0, + "dst": "thr_supply_leak", + "rel": "authored_thread", + "src": "user_diya" + }, + { + "confidence": 1.0, + "dst": "event_project_lantern", + "rel": "discusses", + "src": "thr_supply_leak" + }, + { + "confidence": 1.0, + "dst": "org_northbridge_logistics", + "rel": "references", + "src": "thr_supply_leak" + }, + { + "confidence": 1.0, + "dst": "org_blueharbor_media", + "rel": "works_at", + "src": "user_diya" + }, + { + "confidence": 1.0, + "dst": "org_blueharbor_media", + "rel": "works_at", + "src": "user_jules" + }, + { + "confidence": 1.0, + "dst": "org_kestrel_works", + "rel": "works_at", + "src": "user_ivy" + }, + { + "confidence": 1.0, + "dst": "org_apex_dynamics", + "rel": "works_at", + "src": "user_cyrus" + }, + { + "confidence": 1.0, + "dst": "loc_old_town", + "rel": "located_in", + "src": "user_diya" + }, + { + "confidence": 1.0, + "dst": "loc_old_town", + "rel": "located_in", + "src": "user_jules" + }, + { + "confidence": 1.0, + "dst": "loc_rivergate", + "rel": "located_in", + "src": "user_ivy" + }, + { + "confidence": 1.0, + "dst": "loc_old_town", + "rel": "located_in", + "src": "user_cyrus" + }, + { + "confidence": 1.0, + "dst": "loc_old_town", + "rel": "operates_in", + "src": "org_blueharbor_media" + }, + { + "confidence": 1.0, + "dst": "loc_rivergate", + "rel": "operates_in", + "src": "org_kestrel_works" + }, + { + "confidence": 1.0, + "dst": "loc_old_town", + "rel": "operates_in", + "src": "org_apex_dynamics" + }, + { + "confidence": 0.9, + "dst": "user_diya", + "rel": "connected_to", + "src": "user_faris" + }, + { + "confidence": 0.83, + "dst": "user_gita", + "rel": "connected_to", + "src": "user_cyrus" + }, + { + "confidence": 0.82, + "dst": "user_jules", + "rel": "connected_to", + "src": "user_gita" + }, + { + "confidence": 0.86, + "dst": "user_elin", + "rel": "connected_to", + "src": "user_ivy" + }, + { + "confidence": 0.9, + "dst": "event_project_lantern", + "rel": "investigates", + "src": "user_diya" + }, + { + "confidence": 0.9, + "dst": "event_black_kite", + "rel": "collaborates_on", + "src": "user_ivy" + }, + { + "confidence": 0.9, + "dst": "event_black_kite", + "rel": "collaborates_on", + "src": "user_cyrus" + }, + { + "confidence": 0.9, + "dst": "event_black_kite", + "rel": "investigates", + "src": "user_elin" + }, + { + "confidence": 0.9, + "dst": "event_black_kite", + "rel": "reports_on", + "src": "user_jules" + }, + { + "confidence": 1.0, + "dst": "user_ivy", + "rel": "alias_of", + "src": "alias_orchidfox" + }, + { + "confidence": 1.0, + "dst": "post_midnight_manifest", + "rel": "authored_post", + "src": "alias_orchidfox" + }, + { + "confidence": 1.0, + "dst": "event_project_lantern", + "rel": "references", + "src": "post_midnight_manifest" + }, + { + "confidence": 0.77, + "dst": "event_glass_harbor", + "rel": "connected_to", + "src": "event_project_lantern" + }, + { + "confidence": 0.77, + "dst": "event_amber_veil", + "rel": "connected_to", + "src": "event_black_kite" + }, + { + "confidence": 1.0, + "dst": "thr_relay_map", + "rel": "authored_thread", + "src": "user_leena" + }, + { + "confidence": 1.0, + "dst": "event_project_lantern", + "rel": "discusses", + "src": "thr_relay_map" + }, + { + "confidence": 1.0, + "dst": "org_sunmesh_analytics", + "rel": "works_at", + "src": "user_leena" + }, + { + "confidence": 0.9, + "dst": "event_project_lantern", + "rel": "monitors", + "src": "user_leena" + }, + { + "confidence": 1.0, + "dst": "user_bharat", + "rel": "alias_of", + "src": "alias_steelquill" + }, + { + "confidence": 1.0, + "dst": "user_faris", + "rel": "alias_of", + "src": "alias_nightrelay" + }, + { + "confidence": 1.0, + "dst": "user_elin", + "rel": "alias_of", + "src": "alias_mapleghost" + }, + { + "confidence": 1.0, + "dst": "user_hiro", + "rel": "alias_of", + "src": "alias_docksparrow" + }, + { + "confidence": 1.0, + "dst": "user_cyrus", + "rel": "alias_of", + "src": "alias_quartzlotus" + }, + { + "confidence": 1.0, + "dst": "user_nora", + "rel": "alias_of", + "src": "alias_emberglass" + }, + { + "confidence": 1.0, + "dst": "user_mika", + "rel": "alias_of", + "src": "alias_basinraven" + }, + { + "confidence": 1.0, + "dst": "user_soren", + "rel": "alias_of", + "src": "alias_tideshard" + }, + { + "confidence": 1.0, + "dst": "user_priya", + "rel": "alias_of", + "src": "alias_hollowsignal" + }, + { + "confidence": 1.0, + "dst": "user_omar", + "rel": "alias_of", + "src": "alias_ironwhisper" + }, + { + "confidence": 1.0, + "dst": "user_rhea", + "rel": "alias_of", + "src": "alias_cinderveil" + }, + { + "confidence": 1.0, + "dst": "user_tara", + "rel": "alias_of", + "src": "alias_sablekeel" + }, + { + "confidence": 1.0, + "dst": "user_kian", + "rel": "alias_of", + "src": "alias_lanternmoth" + }, + { + "confidence": 1.0, + "dst": "user_leena", + "rel": "alias_of", + "src": "alias_frostledger" + } + ], + "task_id": "fixed_task_23", + "task_type": "fixed_trace" + }, + { + "answer": "user_bharat", + "metadata": { + "difficulty": "high", + "difficulty_level": 3, + "question_id": "high_05", + "support_nodes": 50 + }, + "question": "Sector 9 to Dockyard 17 full relay: which user behind alias_steelquill links the Northbridge chain and the Sunmesh monitoring bridge?", + "supporting_edges": [ + { + "confidence": 1.0, + "dst": "user_bharat", + "rel": "alias_of", + "src": "alias_steelquill" + }, + { + "confidence": 1.0, + "dst": "post_relay_schedule", + "rel": "authored_post", + "src": "alias_steelquill" + }, + { + "confidence": 1.0, + "dst": "event_project_lantern", + "rel": "references", + "src": "post_relay_schedule" + }, + { + "confidence": 1.0, + "dst": "loc_sector9", + "rel": "references", + "src": "post_relay_schedule" + }, + { + "confidence": 1.0, + "dst": "user_leena", + "rel": "alias_of", + "src": "alias_frostledger" + }, + { + "confidence": 1.0, + "dst": "post_lantern_route", + "rel": "authored_post", + "src": "alias_frostledger" + }, + { + "confidence": 1.0, + "dst": "event_project_lantern", + "rel": "references", + "src": "post_lantern_route" + }, + { + "confidence": 1.0, + "dst": "org_sunmesh_analytics", + "rel": "references", + "src": "post_lantern_route" + }, + { + "confidence": 1.0, + "dst": "thr_relay_map", + "rel": "authored_thread", + "src": "user_leena" + }, + { + "confidence": 1.0, + "dst": "event_project_lantern", + "rel": "discusses", + "src": "thr_relay_map" + }, + { + "confidence": 1.0, + "dst": "org_sunmesh_analytics", + "rel": "references", + "src": "thr_relay_map" + }, + { + "confidence": 1.0, + "dst": "thr_supply_leak", + "rel": "authored_thread", + "src": "user_diya" + }, + { + "confidence": 1.0, + "dst": "event_project_lantern", + "rel": "discusses", + "src": "thr_supply_leak" + }, + { + "confidence": 1.0, + "dst": "org_northbridge_logistics", + "rel": "references", + "src": "thr_supply_leak" + }, + { + "confidence": 1.0, + "dst": "org_northbridge_logistics", + "rel": "works_at", + "src": "user_bharat" + }, + { + "confidence": 1.0, + "dst": "org_northbridge_logistics", + "rel": "works_at", + "src": "user_hiro" + }, + { + "confidence": 1.0, + "dst": "org_sunmesh_analytics", + "rel": "works_at", + "src": "user_leena" + }, + { + "confidence": 1.0, + "dst": "org_sunmesh_analytics", + "rel": "works_at", + "src": "user_priya" + }, + { + "confidence": 1.0, + "dst": "org_helios_labs", + "rel": "works_at", + "src": "user_aria" + }, + { + "confidence": 1.0, + "dst": "loc_dockyard17", + "rel": "located_in", + "src": "user_bharat" + }, + { + "confidence": 1.0, + "dst": "loc_dockyard17", + "rel": "located_in", + "src": "user_hiro" + }, + { + "confidence": 1.0, + "dst": "loc_sector9", + "rel": "located_in", + "src": "user_leena" + }, + { + "confidence": 1.0, + "dst": "loc_sector9", + "rel": "located_in", + "src": "user_priya" + }, + { + "confidence": 1.0, + "dst": "loc_sector9", + "rel": "located_in", + "src": "user_aria" + }, + { + "confidence": 1.0, + "dst": "loc_dockyard17", + "rel": "operates_in", + "src": "org_northbridge_logistics" + }, + { + "confidence": 1.0, + "dst": "loc_sector9", + "rel": "operates_in", + "src": "org_sunmesh_analytics" + }, + { + "confidence": 1.0, + "dst": "loc_sector9", + "rel": "operates_in", + "src": "org_helios_labs" + }, + { + "confidence": 0.95, + "dst": "user_bharat", + "rel": "connected_to", + "src": "user_ivy" + }, + { + "confidence": 0.95, + "dst": "user_hiro", + "rel": "connected_to", + "src": "user_bharat" + }, + { + "confidence": 0.89, + "dst": "user_elin", + "rel": "connected_to", + "src": "user_diya" + }, + { + "confidence": 0.87, + "dst": "user_aria", + "rel": "connected_to", + "src": "user_elin" + }, + { + "confidence": 0.84, + "dst": "user_cyrus", + "rel": "connected_to", + "src": "user_aria" + }, + { + "confidence": 0.91, + "dst": "user_leena", + "rel": "connected_to", + "src": "user_priya" + }, + { + "confidence": 0.83, + "dst": "user_aria", + "rel": "connected_to", + "src": "user_leena" + }, + { + "confidence": 0.9, + "dst": "event_project_lantern", + "rel": "collaborates_on", + "src": "user_bharat" + }, + { + "confidence": 0.9, + "dst": "event_project_lantern", + "rel": "collaborates_on", + "src": "user_hiro" + }, + { + "confidence": 0.9, + "dst": "event_project_lantern", + "rel": "monitors", + "src": "user_leena" + }, + { + "confidence": 0.77, + "dst": "event_glass_harbor", + "rel": "connected_to", + "src": "event_project_lantern" + }, + { + "confidence": 0.77, + "dst": "org_northbridge_logistics", + "rel": "connected_to", + "src": "org_atlas_freight" + }, + { + "confidence": 1.0, + "dst": "user_hiro", + "rel": "alias_of", + "src": "alias_docksparrow" + }, + { + "confidence": 1.0, + "dst": "user_elin", + "rel": "alias_of", + "src": "alias_mapleghost" + }, + { + "confidence": 1.0, + "dst": "user_priya", + "rel": "alias_of", + "src": "alias_hollowsignal" + }, + { + "confidence": 1.0, + "dst": "user_ivy", + "rel": "alias_of", + "src": "alias_orchidfox" + }, + { + "confidence": 1.0, + "dst": "user_diya", + "rel": "alias_of", + "src": "alias_monsoonbyte" + }, + { + "confidence": 1.0, + "dst": "user_faris", + "rel": "alias_of", + "src": "alias_nightrelay" + }, + { + "confidence": 1.0, + "dst": "user_cyrus", + "rel": "alias_of", + "src": "alias_quartzlotus" + }, + { + "confidence": 1.0, + "dst": "user_nora", + "rel": "alias_of", + "src": "alias_emberglass" + }, + { + "confidence": 1.0, + "dst": "user_mika", + "rel": "alias_of", + "src": "alias_basinraven" + }, + { + "confidence": 1.0, + "dst": "user_soren", + "rel": "alias_of", + "src": "alias_tideshard" + }, + { + "confidence": 1.0, + "dst": "user_omar", + "rel": "alias_of", + "src": "alias_ironwhisper" + }, + { + "confidence": 1.0, + "dst": "user_rhea", + "rel": "alias_of", + "src": "alias_cinderveil" + }, + { + "confidence": 1.0, + "dst": "user_tara", + "rel": "alias_of", + "src": "alias_sablekeel" + }, + { + "confidence": 1.0, + "dst": "user_kian", + "rel": "alias_of", + "src": "alias_lanternmoth" + }, + { + "confidence": 1.0, + "dst": "org_apex_dynamics", + "rel": "works_at", + "src": "user_cyrus" + }, + { + "confidence": 1.0, + "dst": "loc_old_town", + "rel": "located_in", + "src": "user_cyrus" + }, + { + "confidence": 1.0, + "dst": "org_blueharbor_media", + "rel": "works_at", + "src": "user_diya" + }, + { + "confidence": 1.0, + "dst": "loc_old_town", + "rel": "located_in", + "src": "user_diya" + }, + { + "confidence": 1.0, + "dst": "org_helios_labs", + "rel": "works_at", + "src": "user_elin" + }, + { + "confidence": 1.0, + "dst": "loc_sector9", + "rel": "located_in", + "src": "user_elin" + }, + { + "confidence": 1.0, + "dst": "org_tidewatch_ops", + "rel": "works_at", + "src": "user_faris" + }, + { + "confidence": 1.0, + "dst": "loc_rivergate", + "rel": "located_in", + "src": "user_faris" + } + ], + "task_id": "fixed_task_24", + "task_type": "fixed_trace" + }, + { + "answer": "user_nora", + "metadata": { + "difficulty": "high", + "difficulty_level": 3, + "question_id": "high_06", + "support_nodes": 50 + }, + "question": "Foundry Row, North Basin, and Uplink Yard spread: identify the user behind alias_emberglass before the Harborlight relay takes over.", + "supporting_edges": [ + { + "confidence": 1.0, + "dst": "user_nora", + "rel": "alias_of", + "src": "alias_emberglass" + }, + { + "confidence": 1.0, + "dst": "post_basin_photo", + "rel": "authored_post", + "src": "alias_emberglass" + }, + { + "confidence": 1.0, + "dst": "loc_foundry_row", + "rel": "references", + "src": "post_basin_photo" + }, + { + "confidence": 1.0, + "dst": "event_amber_veil", + "rel": "references", + "src": "post_basin_photo" + }, + { + "confidence": 1.0, + "dst": "user_rhea", + "rel": "alias_of", + "src": "alias_cinderveil" + }, + { + "confidence": 1.0, + "dst": "post_foundry_map", + "rel": "authored_post", + "src": "alias_cinderveil" + }, + { + "confidence": 1.0, + "dst": "loc_foundry_row", + "rel": "references", + "src": "post_foundry_map" + }, + { + "confidence": 1.0, + "dst": "event_ember_tide", + "rel": "references", + "src": "post_foundry_map" + }, + { + "confidence": 1.0, + "dst": "user_tara", + "rel": "alias_of", + "src": "alias_sablekeel" + }, + { + "confidence": 1.0, + "dst": "post_uplink_note", + "rel": "authored_post", + "src": "alias_sablekeel" + }, + { + "confidence": 1.0, + "dst": "loc_uplink_yard", + "rel": "references", + "src": "post_uplink_note" + }, + { + "confidence": 1.0, + "dst": "org_harborlight_transit", + "rel": "references", + "src": "post_uplink_note" + }, + { + "confidence": 1.0, + "dst": "thr_foundry_watch", + "rel": "authored_thread", + "src": "user_nora" + }, + { + "confidence": 1.0, + "dst": "event_ember_tide", + "rel": "discusses", + "src": "thr_foundry_watch" + }, + { + "confidence": 1.0, + "dst": "org_emberline_security", + "rel": "references", + "src": "thr_foundry_watch" + }, + { + "confidence": 1.0, + "dst": "thr_ember_tide_watch", + "rel": "authored_thread", + "src": "user_rhea" + }, + { + "confidence": 1.0, + "dst": "event_ember_tide", + "rel": "discusses", + "src": "thr_ember_tide_watch" + }, + { + "confidence": 1.0, + "dst": "loc_foundry_row", + "rel": "references", + "src": "thr_ember_tide_watch" + }, + { + "confidence": 1.0, + "dst": "thr_uplink_route", + "rel": "authored_thread", + "src": "user_soren" + }, + { + "confidence": 1.0, + "dst": "event_ghost_signal", + "rel": "discusses", + "src": "thr_uplink_route" + }, + { + "confidence": 1.0, + "dst": "org_harborlight_transit", + "rel": "references", + "src": "thr_uplink_route" + }, + { + "confidence": 1.0, + "dst": "org_emberline_security", + "rel": "works_at", + "src": "user_nora" + }, + { + "confidence": 1.0, + "dst": "org_emberline_security", + "rel": "works_at", + "src": "user_rhea" + }, + { + "confidence": 1.0, + "dst": "org_harborlight_transit", + "rel": "works_at", + "src": "user_soren" + }, + { + "confidence": 1.0, + "dst": "org_harborlight_transit", + "rel": "works_at", + "src": "user_tara" + }, + { + "confidence": 1.0, + "dst": "loc_foundry_row", + "rel": "located_in", + "src": "user_nora" + }, + { + "confidence": 1.0, + "dst": "loc_foundry_row", + "rel": "located_in", + "src": "user_rhea" + }, + { + "confidence": 1.0, + "dst": "loc_uplink_yard", + "rel": "located_in", + "src": "user_soren" + }, + { + "confidence": 1.0, + "dst": "loc_uplink_yard", + "rel": "located_in", + "src": "user_tara" + }, + { + "confidence": 1.0, + "dst": "loc_foundry_row", + "rel": "operates_in", + "src": "org_emberline_security" + }, + { + "confidence": 1.0, + "dst": "loc_uplink_yard", + "rel": "operates_in", + "src": "org_harborlight_transit" + }, + { + "confidence": 0.87, + "dst": "user_rhea", + "rel": "connected_to", + "src": "user_nora" + }, + { + "confidence": 0.86, + "dst": "user_soren", + "rel": "connected_to", + "src": "user_rhea" + }, + { + "confidence": 0.86, + "dst": "user_tara", + "rel": "connected_to", + "src": "user_soren" + }, + { + "confidence": 0.9, + "dst": "event_amber_veil", + "rel": "investigates", + "src": "user_nora" + }, + { + "confidence": 0.9, + "dst": "event_ember_tide", + "rel": "collaborates_on", + "src": "user_rhea" + }, + { + "confidence": 0.9, + "dst": "event_ghost_signal", + "rel": "collaborates_on", + "src": "user_soren" + }, + { + "confidence": 0.9, + "dst": "event_ghost_signal", + "rel": "reports_on", + "src": "user_tara" + }, + { + "confidence": 0.77, + "dst": "event_ghost_signal", + "rel": "connected_to", + "src": "event_ember_tide" + }, + { + "confidence": 0.77, + "dst": "org_tidewatch_ops", + "rel": "connected_to", + "src": "org_harborlight_transit" + }, + { + "confidence": 1.0, + "dst": "user_ivy", + "rel": "alias_of", + "src": "alias_orchidfox" + }, + { + "confidence": 1.0, + "dst": "user_bharat", + "rel": "alias_of", + "src": "alias_steelquill" + }, + { + "confidence": 1.0, + "dst": "user_diya", + "rel": "alias_of", + "src": "alias_monsoonbyte" + }, + { + "confidence": 1.0, + "dst": "user_faris", + "rel": "alias_of", + "src": "alias_nightrelay" + }, + { + "confidence": 1.0, + "dst": "user_elin", + "rel": "alias_of", + "src": "alias_mapleghost" + }, + { + "confidence": 1.0, + "dst": "user_hiro", + "rel": "alias_of", + "src": "alias_docksparrow" + }, + { + "confidence": 1.0, + "dst": "user_cyrus", + "rel": "alias_of", + "src": "alias_quartzlotus" + }, + { + "confidence": 1.0, + "dst": "user_mika", + "rel": "alias_of", + "src": "alias_basinraven" + }, + { + "confidence": 1.0, + "dst": "user_soren", + "rel": "alias_of", + "src": "alias_tideshard" + }, + { + "confidence": 1.0, + "dst": "user_priya", + "rel": "alias_of", + "src": "alias_hollowsignal" + }, + { + "confidence": 1.0, + "dst": "user_omar", + "rel": "alias_of", + "src": "alias_ironwhisper" + }, + { + "confidence": 1.0, + "dst": "user_kian", + "rel": "alias_of", + "src": "alias_lanternmoth" + }, + { + "confidence": 1.0, + "dst": "user_leena", + "rel": "alias_of", + "src": "alias_frostledger" + }, + { + "confidence": 1.0, + "dst": "org_helios_labs", + "rel": "works_at", + "src": "user_aria" + }, + { + "confidence": 1.0, + "dst": "loc_sector9", + "rel": "located_in", + "src": "user_aria" + }, + { + "confidence": 1.0, + "dst": "org_northbridge_logistics", + "rel": "works_at", + "src": "user_bharat" + } + ], + "task_id": "fixed_task_25", + "task_type": "fixed_trace" + }, + { + "answer": "user_kian", + "metadata": { + "difficulty": "high", + "difficulty_level": 3, + "question_id": "high_07", + "support_nodes": 50 + }, + "question": "Freight and customs bridge: which Atlas Freight user behind alias_lanternmoth connects Glass Harbor with the Northbridge chain?", + "supporting_edges": [ + { + "confidence": 1.0, + "dst": "user_kian", + "rel": "alias_of", + "src": "alias_lanternmoth" + }, + { + "confidence": 1.0, + "dst": "post_quay_ledgers", + "rel": "authored_post", + "src": "alias_lanternmoth" + }, + { + "confidence": 1.0, + "dst": "loc_east_quay", + "rel": "references", + "src": "post_quay_ledgers" + }, + { + "confidence": 1.0, + "dst": "event_glass_harbor", + "rel": "references", + "src": "post_quay_ledgers" + }, + { + "confidence": 1.0, + "dst": "thr_quiet_manifest", + "rel": "authored_thread", + "src": "user_kian" + }, + { + "confidence": 1.0, + "dst": "event_glass_harbor", + "rel": "discusses", + "src": "thr_quiet_manifest" + }, + { + "confidence": 1.0, + "dst": "org_atlas_freight", + "rel": "references", + "src": "thr_quiet_manifest" + }, + { + "confidence": 1.0, + "dst": "org_atlas_freight", + "rel": "works_at", + "src": "user_kian" + }, + { + "confidence": 1.0, + "dst": "org_atlas_freight", + "rel": "works_at", + "src": "user_omar" + }, + { + "confidence": 1.0, + "dst": "org_northbridge_logistics", + "rel": "works_at", + "src": "user_bharat" + }, + { + "confidence": 1.0, + "dst": "org_northbridge_logistics", + "rel": "works_at", + "src": "user_hiro" + }, + { + "confidence": 1.0, + "dst": "loc_east_quay", + "rel": "located_in", + "src": "user_kian" + }, + { + "confidence": 1.0, + "dst": "loc_east_quay", + "rel": "located_in", + "src": "user_omar" + }, + { + "confidence": 1.0, + "dst": "loc_dockyard17", + "rel": "located_in", + "src": "user_bharat" + }, + { + "confidence": 1.0, + "dst": "loc_dockyard17", + "rel": "located_in", + "src": "user_hiro" + }, + { + "confidence": 1.0, + "dst": "loc_east_quay", + "rel": "operates_in", + "src": "org_atlas_freight" + }, + { + "confidence": 1.0, + "dst": "loc_dockyard17", + "rel": "operates_in", + "src": "org_northbridge_logistics" + }, + { + "confidence": 0.93, + "dst": "user_omar", + "rel": "connected_to", + "src": "user_kian" + }, + { + "confidence": 0.9, + "dst": "user_mika", + "rel": "connected_to", + "src": "user_omar" + }, + { + "confidence": 0.8, + "dst": "user_bharat", + "rel": "connected_to", + "src": "user_kian" + }, + { + "confidence": 0.95, + "dst": "user_hiro", + "rel": "connected_to", + "src": "user_bharat" + }, + { + "confidence": 0.9, + "dst": "event_glass_harbor", + "rel": "collaborates_on", + "src": "user_kian" + }, + { + "confidence": 0.9, + "dst": "event_glass_harbor", + "rel": "collaborates_on", + "src": "user_omar" + }, + { + "confidence": 0.9, + "dst": "event_glass_harbor", + "rel": "monitors", + "src": "user_priya" + }, + { + "confidence": 0.9, + "dst": "event_project_lantern", + "rel": "collaborates_on", + "src": "user_bharat" + }, + { + "confidence": 0.9, + "dst": "event_project_lantern", + "rel": "collaborates_on", + "src": "user_hiro" + }, + { + "confidence": 1.0, + "dst": "post_shift_roster", + "rel": "authored_post", + "src": "alias_docksparrow" + }, + { + "confidence": 1.0, + "dst": "loc_dockyard17", + "rel": "references", + "src": "post_shift_roster" + }, + { + "confidence": 1.0, + "dst": "org_northbridge_logistics", + "rel": "references", + "src": "post_shift_roster" + }, + { + "confidence": 1.0, + "dst": "post_midnight_manifest", + "rel": "authored_post", + "src": "alias_orchidfox" + }, + { + "confidence": 1.0, + "dst": "loc_dockyard17", + "rel": "references", + "src": "post_midnight_manifest" + }, + { + "confidence": 1.0, + "dst": "thr_supply_leak", + "rel": "authored_thread", + "src": "user_diya" + }, + { + "confidence": 1.0, + "dst": "org_northbridge_logistics", + "rel": "references", + "src": "thr_supply_leak" + }, + { + "confidence": 0.77, + "dst": "org_northbridge_logistics", + "rel": "connected_to", + "src": "org_atlas_freight" + }, + { + "confidence": 1.0, + "dst": "user_omar", + "rel": "alias_of", + "src": "alias_ironwhisper" + }, + { + "confidence": 1.0, + "dst": "user_bharat", + "rel": "alias_of", + "src": "alias_steelquill" + }, + { + "confidence": 1.0, + "dst": "user_hiro", + "rel": "alias_of", + "src": "alias_docksparrow" + }, + { + "confidence": 1.0, + "dst": "user_ivy", + "rel": "alias_of", + "src": "alias_orchidfox" + }, + { + "confidence": 1.0, + "dst": "user_diya", + "rel": "alias_of", + "src": "alias_monsoonbyte" + }, + { + "confidence": 1.0, + "dst": "user_faris", + "rel": "alias_of", + "src": "alias_nightrelay" + }, + { + "confidence": 1.0, + "dst": "user_elin", + "rel": "alias_of", + "src": "alias_mapleghost" + }, + { + "confidence": 1.0, + "dst": "user_cyrus", + "rel": "alias_of", + "src": "alias_quartzlotus" + }, + { + "confidence": 1.0, + "dst": "user_nora", + "rel": "alias_of", + "src": "alias_emberglass" + }, + { + "confidence": 1.0, + "dst": "user_mika", + "rel": "alias_of", + "src": "alias_basinraven" + }, + { + "confidence": 1.0, + "dst": "user_soren", + "rel": "alias_of", + "src": "alias_tideshard" + }, + { + "confidence": 1.0, + "dst": "user_priya", + "rel": "alias_of", + "src": "alias_hollowsignal" + }, + { + "confidence": 1.0, + "dst": "user_rhea", + "rel": "alias_of", + "src": "alias_cinderveil" + }, + { + "confidence": 1.0, + "dst": "user_tara", + "rel": "alias_of", + "src": "alias_sablekeel" + }, + { + "confidence": 1.0, + "dst": "user_leena", + "rel": "alias_of", + "src": "alias_frostledger" + }, + { + "confidence": 1.0, + "dst": "org_helios_labs", + "rel": "works_at", + "src": "user_aria" + }, + { + "confidence": 1.0, + "dst": "loc_sector9", + "rel": "located_in", + "src": "user_aria" + }, + { + "confidence": 1.0, + "dst": "org_apex_dynamics", + "rel": "works_at", + "src": "user_cyrus" + }, + { + "confidence": 1.0, + "dst": "loc_old_town", + "rel": "located_in", + "src": "user_cyrus" + }, + { + "confidence": 1.0, + "dst": "org_blueharbor_media", + "rel": "works_at", + "src": "user_diya" + }, + { + "confidence": 1.0, + "dst": "loc_old_town", + "rel": "located_in", + "src": "user_diya" + }, + { + "confidence": 1.0, + "dst": "org_helios_labs", + "rel": "works_at", + "src": "user_elin" + }, + { + "confidence": 1.0, + "dst": "loc_sector9", + "rel": "located_in", + "src": "user_elin" + }, + { + "confidence": 1.0, + "dst": "org_tidewatch_ops", + "rel": "works_at", + "src": "user_faris" + } + ], + "task_id": "fixed_task_26", + "task_type": "fixed_trace" + }, + { + "answer": "user_cyrus", + "metadata": { + "difficulty": "high", + "difficulty_level": 3, + "question_id": "high_08", + "support_nodes": 50 + }, + "question": "Black Kite, Amber Veil, and Iron Wharf overlap: which user behind alias_quartzlotus is the Apex-side collaborator?", + "supporting_edges": [ + { + "confidence": 1.0, + "dst": "user_cyrus", + "rel": "alias_of", + "src": "alias_quartzlotus" + }, + { + "confidence": 1.0, + "dst": "org_apex_dynamics", + "rel": "works_at", + "src": "user_cyrus" + }, + { + "confidence": 1.0, + "dst": "loc_old_town", + "rel": "located_in", + "src": "user_cyrus" + }, + { + "confidence": 1.0, + "dst": "loc_old_town", + "rel": "operates_in", + "src": "org_apex_dynamics" + }, + { + "confidence": 0.9, + "dst": "event_black_kite", + "rel": "collaborates_on", + "src": "user_cyrus" + }, + { + "confidence": 1.0, + "dst": "thr_port_audit", + "rel": "authored_thread", + "src": "user_jules" + }, + { + "confidence": 1.0, + "dst": "event_black_kite", + "rel": "discusses", + "src": "thr_port_audit" + }, + { + "confidence": 1.0, + "dst": "post_drone_parts", + "rel": "authored_post", + "src": "alias_monsoonbyte" + }, + { + "confidence": 1.0, + "dst": "event_black_kite", + "rel": "references", + "src": "post_drone_parts" + }, + { + "confidence": 0.9, + "dst": "event_amber_veil", + "rel": "investigates", + "src": "user_nora" + }, + { + "confidence": 1.0, + "dst": "thr_basin_shift", + "rel": "authored_thread", + "src": "user_quinn" + }, + { + "confidence": 1.0, + "dst": "event_amber_veil", + "rel": "discusses", + "src": "thr_basin_shift" + }, + { + "confidence": 0.9, + "dst": "event_iron_wharf", + "rel": "collaborates_on", + "src": "user_mika" + }, + { + "confidence": 1.0, + "dst": "thr_customs_breach", + "rel": "authored_thread", + "src": "user_mika" + }, + { + "confidence": 1.0, + "dst": "event_iron_wharf", + "rel": "discusses", + "src": "thr_customs_breach" + }, + { + "confidence": 1.0, + "dst": "org_kestrel_works", + "rel": "works_at", + "src": "user_ivy" + }, + { + "confidence": 1.0, + "dst": "org_emberline_security", + "rel": "works_at", + "src": "user_nora" + }, + { + "confidence": 1.0, + "dst": "org_orion_customs", + "rel": "works_at", + "src": "user_mika" + }, + { + "confidence": 1.0, + "dst": "org_orion_customs", + "rel": "works_at", + "src": "user_quinn" + }, + { + "confidence": 1.0, + "dst": "loc_rivergate", + "rel": "located_in", + "src": "user_ivy" + }, + { + "confidence": 1.0, + "dst": "loc_foundry_row", + "rel": "located_in", + "src": "user_nora" + }, + { + "confidence": 1.0, + "dst": "loc_north_basin", + "rel": "located_in", + "src": "user_mika" + }, + { + "confidence": 1.0, + "dst": "loc_north_basin", + "rel": "located_in", + "src": "user_quinn" + }, + { + "confidence": 1.0, + "dst": "loc_rivergate", + "rel": "operates_in", + "src": "org_kestrel_works" + }, + { + "confidence": 1.0, + "dst": "loc_foundry_row", + "rel": "operates_in", + "src": "org_emberline_security" + }, + { + "confidence": 1.0, + "dst": "loc_north_basin", + "rel": "operates_in", + "src": "org_orion_customs" + }, + { + "confidence": 0.83, + "dst": "user_gita", + "rel": "connected_to", + "src": "user_cyrus" + }, + { + "confidence": 0.86, + "dst": "user_elin", + "rel": "connected_to", + "src": "user_ivy" + }, + { + "confidence": 0.89, + "dst": "user_quinn", + "rel": "connected_to", + "src": "user_mika" + }, + { + "confidence": 0.88, + "dst": "user_nora", + "rel": "connected_to", + "src": "user_quinn" + }, + { + "confidence": 0.87, + "dst": "user_rhea", + "rel": "connected_to", + "src": "user_nora" + }, + { + "confidence": 0.77, + "dst": "event_amber_veil", + "rel": "connected_to", + "src": "event_black_kite" + }, + { + "confidence": 0.77, + "dst": "org_emberline_security", + "rel": "connected_to", + "src": "org_orion_customs" + }, + { + "confidence": 1.0, + "dst": "user_ivy", + "rel": "alias_of", + "src": "alias_orchidfox" + }, + { + "confidence": 1.0, + "dst": "user_mika", + "rel": "alias_of", + "src": "alias_basinraven" + }, + { + "confidence": 1.0, + "dst": "user_nora", + "rel": "alias_of", + "src": "alias_emberglass" + }, + { + "confidence": 1.0, + "dst": "user_bharat", + "rel": "alias_of", + "src": "alias_steelquill" + }, + { + "confidence": 1.0, + "dst": "user_diya", + "rel": "alias_of", + "src": "alias_monsoonbyte" + }, + { + "confidence": 1.0, + "dst": "user_faris", + "rel": "alias_of", + "src": "alias_nightrelay" + }, + { + "confidence": 1.0, + "dst": "user_elin", + "rel": "alias_of", + "src": "alias_mapleghost" + }, + { + "confidence": 1.0, + "dst": "user_hiro", + "rel": "alias_of", + "src": "alias_docksparrow" + }, + { + "confidence": 1.0, + "dst": "user_soren", + "rel": "alias_of", + "src": "alias_tideshard" + }, + { + "confidence": 1.0, + "dst": "user_priya", + "rel": "alias_of", + "src": "alias_hollowsignal" + }, + { + "confidence": 1.0, + "dst": "user_omar", + "rel": "alias_of", + "src": "alias_ironwhisper" + }, + { + "confidence": 1.0, + "dst": "user_rhea", + "rel": "alias_of", + "src": "alias_cinderveil" + }, + { + "confidence": 1.0, + "dst": "user_tara", + "rel": "alias_of", + "src": "alias_sablekeel" + }, + { + "confidence": 1.0, + "dst": "user_kian", + "rel": "alias_of", + "src": "alias_lanternmoth" + }, + { + "confidence": 1.0, + "dst": "user_leena", + "rel": "alias_of", + "src": "alias_frostledger" + } + ], + "task_id": "fixed_task_27", + "task_type": "fixed_trace" + }, + { + "answer": "user_tara", + "metadata": { + "difficulty": "high", + "difficulty_level": 3, + "question_id": "high_09", + "support_nodes": 50 + }, + "question": "Ghost Signal and Ember Tide relay: which user behind alias_sablekeel is the Harborlight reporting endpoint?", + "supporting_edges": [ + { + "confidence": 1.0, + "dst": "user_tara", + "rel": "alias_of", + "src": "alias_sablekeel" + }, + { + "confidence": 1.0, + "dst": "post_uplink_note", + "rel": "authored_post", + "src": "alias_sablekeel" + }, + { + "confidence": 1.0, + "dst": "loc_uplink_yard", + "rel": "references", + "src": "post_uplink_note" + }, + { + "confidence": 1.0, + "dst": "org_harborlight_transit", + "rel": "references", + "src": "post_uplink_note" + }, + { + "confidence": 1.0, + "dst": "user_soren", + "rel": "alias_of", + "src": "alias_tideshard" + }, + { + "confidence": 1.0, + "dst": "post_hull_signal", + "rel": "authored_post", + "src": "alias_tideshard" + }, + { + "confidence": 1.0, + "dst": "loc_uplink_yard", + "rel": "references", + "src": "post_hull_signal" + }, + { + "confidence": 1.0, + "dst": "event_ghost_signal", + "rel": "references", + "src": "post_hull_signal" + }, + { + "confidence": 1.0, + "dst": "thr_ghost_signal_net", + "rel": "authored_thread", + "src": "user_tara" + }, + { + "confidence": 1.0, + "dst": "event_ghost_signal", + "rel": "discusses", + "src": "thr_ghost_signal_net" + }, + { + "confidence": 1.0, + "dst": "loc_uplink_yard", + "rel": "references", + "src": "thr_ghost_signal_net" + }, + { + "confidence": 1.0, + "dst": "thr_uplink_route", + "rel": "authored_thread", + "src": "user_soren" + }, + { + "confidence": 1.0, + "dst": "event_ghost_signal", + "rel": "discusses", + "src": "thr_uplink_route" + }, + { + "confidence": 1.0, + "dst": "org_harborlight_transit", + "rel": "references", + "src": "thr_uplink_route" + }, + { + "confidence": 1.0, + "dst": "thr_ember_tide_watch", + "rel": "authored_thread", + "src": "user_rhea" + }, + { + "confidence": 1.0, + "dst": "event_ember_tide", + "rel": "discusses", + "src": "thr_ember_tide_watch" + }, + { + "confidence": 1.0, + "dst": "loc_foundry_row", + "rel": "references", + "src": "thr_ember_tide_watch" + }, + { + "confidence": 1.0, + "dst": "org_harborlight_transit", + "rel": "works_at", + "src": "user_tara" + }, + { + "confidence": 1.0, + "dst": "org_harborlight_transit", + "rel": "works_at", + "src": "user_soren" + }, + { + "confidence": 1.0, + "dst": "org_emberline_security", + "rel": "works_at", + "src": "user_rhea" + }, + { + "confidence": 1.0, + "dst": "org_emberline_security", + "rel": "works_at", + "src": "user_nora" + }, + { + "confidence": 1.0, + "dst": "loc_uplink_yard", + "rel": "located_in", + "src": "user_tara" + }, + { + "confidence": 1.0, + "dst": "loc_uplink_yard", + "rel": "located_in", + "src": "user_soren" + }, + { + "confidence": 1.0, + "dst": "loc_foundry_row", + "rel": "located_in", + "src": "user_rhea" + }, + { + "confidence": 1.0, + "dst": "loc_foundry_row", + "rel": "located_in", + "src": "user_nora" + }, + { + "confidence": 1.0, + "dst": "loc_uplink_yard", + "rel": "operates_in", + "src": "org_harborlight_transit" + }, + { + "confidence": 1.0, + "dst": "loc_foundry_row", + "rel": "operates_in", + "src": "org_emberline_security" + }, + { + "confidence": 0.86, + "dst": "user_soren", + "rel": "connected_to", + "src": "user_rhea" + }, + { + "confidence": 0.86, + "dst": "user_tara", + "rel": "connected_to", + "src": "user_soren" + }, + { + "confidence": 0.87, + "dst": "user_rhea", + "rel": "connected_to", + "src": "user_nora" + }, + { + "confidence": 0.9, + "dst": "event_ember_tide", + "rel": "collaborates_on", + "src": "user_rhea" + }, + { + "confidence": 0.9, + "dst": "event_ghost_signal", + "rel": "collaborates_on", + "src": "user_soren" + }, + { + "confidence": 0.9, + "dst": "event_ghost_signal", + "rel": "reports_on", + "src": "user_tara" + }, + { + "confidence": 0.77, + "dst": "event_ghost_signal", + "rel": "connected_to", + "src": "event_ember_tide" + }, + { + "confidence": 0.77, + "dst": "org_tidewatch_ops", + "rel": "connected_to", + "src": "org_harborlight_transit" + }, + { + "confidence": 1.0, + "dst": "user_rhea", + "rel": "alias_of", + "src": "alias_cinderveil" + }, + { + "confidence": 1.0, + "dst": "user_nora", + "rel": "alias_of", + "src": "alias_emberglass" + }, + { + "confidence": 1.0, + "dst": "user_ivy", + "rel": "alias_of", + "src": "alias_orchidfox" + }, + { + "confidence": 1.0, + "dst": "user_bharat", + "rel": "alias_of", + "src": "alias_steelquill" + }, + { + "confidence": 1.0, + "dst": "user_diya", + "rel": "alias_of", + "src": "alias_monsoonbyte" + }, + { + "confidence": 1.0, + "dst": "user_faris", + "rel": "alias_of", + "src": "alias_nightrelay" + }, + { + "confidence": 1.0, + "dst": "user_elin", + "rel": "alias_of", + "src": "alias_mapleghost" + }, + { + "confidence": 1.0, + "dst": "user_hiro", + "rel": "alias_of", + "src": "alias_docksparrow" + }, + { + "confidence": 1.0, + "dst": "user_cyrus", + "rel": "alias_of", + "src": "alias_quartzlotus" + }, + { + "confidence": 1.0, + "dst": "user_mika", + "rel": "alias_of", + "src": "alias_basinraven" + }, + { + "confidence": 1.0, + "dst": "user_priya", + "rel": "alias_of", + "src": "alias_hollowsignal" + }, + { + "confidence": 1.0, + "dst": "user_omar", + "rel": "alias_of", + "src": "alias_ironwhisper" + }, + { + "confidence": 1.0, + "dst": "user_kian", + "rel": "alias_of", + "src": "alias_lanternmoth" + }, + { + "confidence": 1.0, + "dst": "user_leena", + "rel": "alias_of", + "src": "alias_frostledger" + }, + { + "confidence": 1.0, + "dst": "org_helios_labs", + "rel": "works_at", + "src": "user_aria" + }, + { + "confidence": 1.0, + "dst": "loc_sector9", + "rel": "located_in", + "src": "user_aria" + }, + { + "confidence": 1.0, + "dst": "org_northbridge_logistics", + "rel": "works_at", + "src": "user_bharat" + }, + { + "confidence": 1.0, + "dst": "loc_dockyard17", + "rel": "located_in", + "src": "user_bharat" + }, + { + "confidence": 1.0, + "dst": "org_apex_dynamics", + "rel": "works_at", + "src": "user_cyrus" + } + ], + "task_id": "fixed_task_28", + "task_type": "fixed_trace" + }, + { + "answer": "user_priya", + "metadata": { + "difficulty": "high", + "difficulty_level": 3, + "question_id": "high_10", + "support_nodes": 55 + }, + "question": "End-to-end benchmark sweep: across Lantern, Black Kite, Glass Harbor, Iron Wharf, Ember Tide, and Ghost Signal, which user behind alias_hollowsignal anchors the Sunmesh monitoring side?", + "supporting_edges": [ + { + "confidence": 1.0, + "dst": "user_ivy", + "rel": "alias_of", + "src": "alias_orchidfox" + }, + { + "confidence": 1.0, + "dst": "user_bharat", + "rel": "alias_of", + "src": "alias_steelquill" + }, + { + "confidence": 1.0, + "dst": "user_diya", + "rel": "alias_of", + "src": "alias_monsoonbyte" + }, + { + "confidence": 1.0, + "dst": "user_faris", + "rel": "alias_of", + "src": "alias_nightrelay" + }, + { + "confidence": 1.0, + "dst": "user_elin", + "rel": "alias_of", + "src": "alias_mapleghost" + }, + { + "confidence": 1.0, + "dst": "user_hiro", + "rel": "alias_of", + "src": "alias_docksparrow" + }, + { + "confidence": 1.0, + "dst": "user_cyrus", + "rel": "alias_of", + "src": "alias_quartzlotus" + }, + { + "confidence": 1.0, + "dst": "user_nora", + "rel": "alias_of", + "src": "alias_emberglass" + }, + { + "confidence": 1.0, + "dst": "user_mika", + "rel": "alias_of", + "src": "alias_basinraven" + }, + { + "confidence": 1.0, + "dst": "user_soren", + "rel": "alias_of", + "src": "alias_tideshard" + }, + { + "confidence": 1.0, + "dst": "user_priya", + "rel": "alias_of", + "src": "alias_hollowsignal" + }, + { + "confidence": 1.0, + "dst": "user_omar", + "rel": "alias_of", + "src": "alias_ironwhisper" + }, + { + "confidence": 1.0, + "dst": "user_rhea", + "rel": "alias_of", + "src": "alias_cinderveil" + }, + { + "confidence": 1.0, + "dst": "user_tara", + "rel": "alias_of", + "src": "alias_sablekeel" + }, + { + "confidence": 1.0, + "dst": "user_kian", + "rel": "alias_of", + "src": "alias_lanternmoth" + }, + { + "confidence": 1.0, + "dst": "user_leena", + "rel": "alias_of", + "src": "alias_frostledger" + }, + { + "confidence": 1.0, + "dst": "org_helios_labs", + "rel": "works_at", + "src": "user_aria" + }, + { + "confidence": 1.0, + "dst": "loc_sector9", + "rel": "located_in", + "src": "user_aria" + }, + { + "confidence": 1.0, + "dst": "org_northbridge_logistics", + "rel": "works_at", + "src": "user_bharat" + }, + { + "confidence": 1.0, + "dst": "loc_dockyard17", + "rel": "located_in", + "src": "user_bharat" + }, + { + "confidence": 1.0, + "dst": "org_apex_dynamics", + "rel": "works_at", + "src": "user_cyrus" + }, + { + "confidence": 1.0, + "dst": "loc_old_town", + "rel": "located_in", + "src": "user_cyrus" + }, + { + "confidence": 1.0, + "dst": "org_blueharbor_media", + "rel": "works_at", + "src": "user_diya" + }, + { + "confidence": 1.0, + "dst": "loc_old_town", + "rel": "located_in", + "src": "user_diya" + }, + { + "confidence": 1.0, + "dst": "org_helios_labs", + "rel": "works_at", + "src": "user_elin" + }, + { + "confidence": 1.0, + "dst": "loc_sector9", + "rel": "located_in", + "src": "user_elin" + }, + { + "confidence": 1.0, + "dst": "org_tidewatch_ops", + "rel": "works_at", + "src": "user_faris" + }, + { + "confidence": 1.0, + "dst": "loc_rivergate", + "rel": "located_in", + "src": "user_faris" + }, + { + "confidence": 1.0, + "dst": "org_apex_dynamics", + "rel": "works_at", + "src": "user_gita" + }, + { + "confidence": 1.0, + "dst": "loc_old_town", + "rel": "located_in", + "src": "user_gita" + }, + { + "confidence": 1.0, + "dst": "org_northbridge_logistics", + "rel": "works_at", + "src": "user_hiro" + }, + { + "confidence": 1.0, + "dst": "loc_dockyard17", + "rel": "located_in", + "src": "user_hiro" + }, + { + "confidence": 1.0, + "dst": "org_kestrel_works", + "rel": "works_at", + "src": "user_ivy" + }, + { + "confidence": 1.0, + "dst": "loc_rivergate", + "rel": "located_in", + "src": "user_ivy" + }, + { + "confidence": 1.0, + "dst": "org_blueharbor_media", + "rel": "works_at", + "src": "user_jules" + }, + { + "confidence": 1.0, + "dst": "loc_old_town", + "rel": "located_in", + "src": "user_jules" + }, + { + "confidence": 1.0, + "dst": "org_atlas_freight", + "rel": "works_at", + "src": "user_kian" + }, + { + "confidence": 1.0, + "dst": "loc_east_quay", + "rel": "located_in", + "src": "user_kian" + }, + { + "confidence": 1.0, + "dst": "org_sunmesh_analytics", + "rel": "works_at", + "src": "user_leena" + }, + { + "confidence": 1.0, + "dst": "loc_sector9", + "rel": "located_in", + "src": "user_leena" + }, + { + "confidence": 1.0, + "dst": "org_orion_customs", + "rel": "works_at", + "src": "user_mika" + }, + { + "confidence": 1.0, + "dst": "loc_north_basin", + "rel": "located_in", + "src": "user_mika" + }, + { + "confidence": 1.0, + "dst": "org_emberline_security", + "rel": "works_at", + "src": "user_nora" + }, + { + "confidence": 1.0, + "dst": "loc_foundry_row", + "rel": "located_in", + "src": "user_nora" + }, + { + "confidence": 1.0, + "dst": "org_atlas_freight", + "rel": "works_at", + "src": "user_omar" + }, + { + "confidence": 1.0, + "dst": "loc_east_quay", + "rel": "located_in", + "src": "user_omar" + }, + { + "confidence": 1.0, + "dst": "org_sunmesh_analytics", + "rel": "works_at", + "src": "user_priya" + }, + { + "confidence": 1.0, + "dst": "loc_sector9", + "rel": "located_in", + "src": "user_priya" + }, + { + "confidence": 1.0, + "dst": "org_orion_customs", + "rel": "works_at", + "src": "user_quinn" + }, + { + "confidence": 1.0, + "dst": "loc_north_basin", + "rel": "located_in", + "src": "user_quinn" + }, + { + "confidence": 1.0, + "dst": "org_emberline_security", + "rel": "works_at", + "src": "user_rhea" + }, + { + "confidence": 1.0, + "dst": "loc_foundry_row", + "rel": "located_in", + "src": "user_rhea" + }, + { + "confidence": 1.0, + "dst": "org_harborlight_transit", + "rel": "works_at", + "src": "user_soren" + }, + { + "confidence": 1.0, + "dst": "loc_uplink_yard", + "rel": "located_in", + "src": "user_soren" + }, + { + "confidence": 1.0, + "dst": "org_harborlight_transit", + "rel": "works_at", + "src": "user_tara" + }, + { + "confidence": 1.0, + "dst": "loc_uplink_yard", + "rel": "located_in", + "src": "user_tara" + }, + { + "confidence": 1.0, + "dst": "loc_sector9", + "rel": "operates_in", + "src": "org_helios_labs" + }, + { + "confidence": 1.0, + "dst": "loc_dockyard17", + "rel": "operates_in", + "src": "org_northbridge_logistics" + } + ], + "task_id": "fixed_task_29", + "task_type": "fixed_trace" + } + ], + "source_seed": "datasets\\fixed_levels\\seed_fixed_levels.json" +} \ No newline at end of file diff --git a/datasets/fixed_levels/leaderboard_fixed_levels.json b/datasets/fixed_levels/leaderboard_fixed_levels.json new file mode 100644 index 0000000000000000000000000000000000000000..bd22901c4b276b7f4db5cd3604de2393496fa228 --- /dev/null +++ b/datasets/fixed_levels/leaderboard_fixed_levels.json @@ -0,0 +1,1401 @@ +[ + { + "config": { + "max_agents": 3, + "max_breadth": 2, + "max_depth": 2, + "max_steps": 20, + "max_width": 2, + "seed": 2026, + "seeded_questions": 15, + "swarm_enabled": true + }, + "created_at": "2026-04-01T18:48:39+00:00", + "episodes": 15, + "metrics": { + "avg_compactness_reward": 0.0, + "avg_connectivity_gain_reward": 0.16666666666666666, + "avg_connectivity_reward": 0.16999999999999998, + "avg_diversity_reward": 0.1157777777777778, + "avg_entity_informativeness_reward": -0.08858065677817137, + "avg_format_reward": 0.14999999999999997, + "avg_graph_f1": 0.8492063492063492, + "avg_knowledge_carrier_reward": 0.5, + "avg_knowledge_indexing_reward": 0.052000000000000005, + "avg_relation_informativeness_reward": 0.07135858524047924, + "avg_reward": 4.197526826881651, + "avg_soft_shaping_reward": 0.24999999999999994, + "avg_spawn_count": 4.0, + "avg_spawn_critical_steps": 6.0, + "avg_steps_to_solution": 9.0, + "deanonymization_accuracy": 1.0, + "leaderboard_score": 0.8543934355282199, + "retrieval_signal": 0.6932, + "spawn_completion_rate": 1.0, + "spawn_signal": 0.6666666666666666, + "structural_signal": 0.5730889190257948, + "task_success_rate": 1.0, + "tool_efficiency": 0.5 + }, + "run_id": "run_0001", + "run_name": "fixed_levels_qwen_swarm" + }, + { + "config": { + "max_agents": 3, + "max_breadth": 2, + "max_depth": 2, + "max_steps": 24, + "max_width": 2, + "seed": 2026, + "seeded_questions": 30, + "swarm_enabled": true + }, + "created_at": "2026-04-02T09:16:05+00:00", + "episodes": 30, + "metrics": { + "avg_compactness_reward": 0.0, + "avg_connectivity_gain_reward": 0.2000000000000001, + "avg_connectivity_reward": 0.12999999999999998, + "avg_diversity_reward": 0.12433333333333325, + "avg_entity_informativeness_reward": 0.000700571890338102, + "avg_format_reward": 0.15, + "avg_graph_f1": 0.2916528337385394, + "avg_knowledge_carrier_reward": 0.5, + "avg_knowledge_indexing_reward": 0.05070078042510192, + "avg_relation_informativeness_reward": 0.07853375358885142, + "avg_reward": 4.377456514967488, + "avg_soft_shaping_reward": 0.3, + "avg_spawn_count": 4.0, + "avg_spawn_critical_steps": 6.0, + "avg_steps_to_solution": 9.0, + "deanonymization_accuracy": 0.0, + "leaderboard_score": 0.6241912131110795, + "retrieval_signal": 0.6927452731487858, + "spawn_completion_rate": 1.0, + "spawn_signal": 0.6666666666666666, + "structural_signal": 0.5869968650958378, + "task_success_rate": 1.0, + "tool_efficiency": 0.5 + }, + "run_id": "run_0002", + "run_name": "fixed_levels_qwen_swarm" + }, + { + "config": { + "max_agents": 3, + "max_breadth": 2, + "max_depth": 2, + "max_steps": 24, + "max_width": 2, + "seed": 2026, + "seeded_questions": 30, + "swarm_enabled": true + }, + "created_at": "2026-04-03T13:22:03+00:00", + "episodes": 3, + "metrics": { + "avg_compactness_reward": 0.0, + "avg_connectivity_gain_reward": 0.20000000000000004, + "avg_connectivity_reward": -0.06666666666666667, + "avg_diversity_reward": 0.13444444444444445, + "avg_entity_informativeness_reward": -0.01010882862863417, + "avg_format_reward": 0.15, + "avg_graph_f1": 0.5793650793650794, + "avg_knowledge_carrier_reward": 0.5, + "avg_knowledge_indexing_reward": 0.10372960372960373, + "avg_relation_informativeness_reward": 0.07108687894082726, + "avg_reward": 4.419313576918165, + "avg_soft_shaping_reward": 0.3, + "avg_spawn_count": 4.0, + "avg_spawn_critical_steps": 6.0, + "avg_steps_to_solution": 9.0, + "deanonymization_accuracy": 0.0, + "leaderboard_score": 0.6797400780463063, + "retrieval_signal": 0.7113053613053614, + "spawn_completion_rate": 1.0, + "spawn_signal": 0.6666666666666666, + "structural_signal": 0.5356956100624386, + "task_success_rate": 1.0, + "tool_efficiency": 0.5 + }, + "run_id": "run_0003", + "run_name": "fixed_levels_qwen_swarm" + }, + { + "config": { + "max_agents": 3, + "max_breadth": 2, + "max_depth": 2, + "max_steps": 24, + "max_width": 2, + "seed": 2026, + "seeded_questions": 30, + "swarm_enabled": true + }, + "created_at": "2026-04-06T18:29:39+00:00", + "episodes": 30, + "metrics": { + "avg_compactness_reward": 0.0, + "avg_connectivity_gain_reward": 0.2000000000000001, + "avg_connectivity_reward": 0.12999999999999998, + "avg_diversity_reward": 0.12433333333333325, + "avg_entity_informativeness_reward": -0.02515191749984708, + "avg_format_reward": 0.15, + "avg_graph_f1": 0.2916528337385394, + "avg_knowledge_carrier_reward": 0.5, + "avg_knowledge_indexing_reward": 0.11539120363588044, + "avg_relation_informativeness_reward": 0.0769903534735767, + "avg_reward": 4.460667345528021, + "avg_soft_shaping_reward": 0.3, + "avg_spawn_count": 4.0, + "avg_spawn_critical_steps": 6.0, + "avg_steps_to_solution": 9.0, + "deanonymization_accuracy": 0.0, + "leaderboard_score": 0.6269168609961595, + "retrieval_signal": 0.7153869212725582, + "spawn_completion_rate": 1.0, + "spawn_signal": 0.6666666666666666, + "structural_signal": 0.5815176871947458, + "task_success_rate": 1.0, + "tool_efficiency": 0.5 + }, + "run_id": "run_0004", + "run_name": "fixed_levels_qwen_swarm" + }, + { + "config": { + "max_agents": 3, + "max_breadth": 2, + "max_depth": 2, + "max_steps": 24, + "max_width": 2, + "seed": 2026, + "seeded_questions": 30, + "swarm_enabled": true + }, + "created_at": "2026-04-06T18:33:06+00:00", + "episodes": 2, + "metrics": { + "avg_compactness_reward": 0.0, + "avg_connectivity_gain_reward": 0.2, + "avg_connectivity_reward": -0.15, + "avg_diversity_reward": 0.13833333333333334, + "avg_entity_informativeness_reward": -0.026628229842114173, + "avg_format_reward": 0.15, + "avg_graph_f1": 0.6190476190476191, + "avg_knowledge_carrier_reward": 0.5, + "avg_knowledge_indexing_reward": 0.10681818181818181, + "avg_relation_informativeness_reward": 0.048120982127120335, + "avg_reward": 4.334953339016039, + "avg_soft_shaping_reward": 0.3, + "avg_spawn_count": 4.0, + "avg_spawn_critical_steps": 6.0, + "avg_steps_to_solution": 9.0, + "deanonymization_accuracy": 0.0, + "leaderboard_score": 0.685242999396977, + "retrieval_signal": 0.7123863636363637, + "spawn_completion_rate": 1.0, + "spawn_signal": 0.6666666666666666, + "structural_signal": 0.5075485504570012, + "task_success_rate": 1.0, + "tool_efficiency": 0.5 + }, + "run_id": "run_0005", + "run_name": "fixed_levels_qwen_swarm" + }, + { + "config": { + "max_agents": 1, + "max_breadth": 2, + "max_depth": 2, + "max_steps": 24, + "max_width": 2, + "seed": 2026, + "seeded_questions": 30, + "swarm_enabled": true + }, + "created_at": "2026-04-06T18:54:52+00:00", + "episodes": 1, + "metrics": { + "avg_compactness_reward": 0.0, + "avg_connectivity_gain_reward": 0.1, + "avg_connectivity_reward": -0.3, + "avg_diversity_reward": 0.08, + "avg_entity_informativeness_reward": -0.02450859227728558, + "avg_format_reward": 0.15, + "avg_graph_f1": 0.33333333333333337, + "avg_knowledge_carrier_reward": 0.5, + "avg_knowledge_indexing_reward": 0.08181818181818182, + "avg_relation_informativeness_reward": 0.04353540016904645, + "avg_reward": 3.037246438342494, + "avg_soft_shaping_reward": 0.15, + "avg_spawn_count": 2.0, + "avg_spawn_critical_steps": 6.0, + "avg_steps_to_solution": 5.0, + "deanonymization_accuracy": 0.0, + "leaderboard_score": 0.6201263424948862, + "retrieval_signal": 0.7036363636363637, + "spawn_completion_rate": 1.0, + "spawn_signal": 0.6666666666666666, + "structural_signal": 0.45080536157835216, + "task_success_rate": 1.0, + "tool_efficiency": 0.5 + }, + "run_id": "run_0006", + "run_name": "fixed_levels_qwen_swarm" + }, + { + "config": { + "max_agents": 1, + "max_breadth": 2, + "max_depth": 2, + "max_steps": 24, + "max_width": 2, + "seed": 2026, + "seeded_questions": 30, + "swarm_enabled": true + }, + "created_at": "2026-04-06T19:22:57+00:00", + "episodes": 1, + "metrics": { + "avg_compactness_reward": 0.0, + "avg_connectivity_gain_reward": 0.1, + "avg_connectivity_reward": -0.3, + "avg_diversity_reward": 0.08, + "avg_entity_informativeness_reward": -0.005263146336646693, + "avg_format_reward": 0.15, + "avg_graph_f1": 0.33333333333333337, + "avg_knowledge_carrier_reward": 0.5, + "avg_knowledge_indexing_reward": 0.08181818181818182, + "avg_relation_informativeness_reward": 0.044276243254877785, + "avg_reward": 3.057232727368964, + "avg_soft_shaping_reward": 0.15, + "avg_spawn_count": 2.0, + "avg_spawn_critical_steps": 6.0, + "avg_steps_to_solution": 5.0, + "deanonymization_accuracy": 0.0, + "leaderboard_score": 0.6205293479318178, + "retrieval_signal": 0.7036363636363637, + "spawn_completion_rate": 1.0, + "spawn_signal": 0.6666666666666666, + "structural_signal": 0.4548026193836462, + "task_success_rate": 1.0, + "tool_efficiency": 0.5 + }, + "run_id": "run_0007", + "run_name": "fixed_levels_qwen_swarm" + }, + { + "config": { + "llm_model": "qwen3:1.7b", + "llm_provider": "ollama", + "max_agents": 1, + "max_breadth": 2, + "max_depth": 2, + "max_steps": 24, + "max_width": 2, + "seed": 2026, + "seeded_questions": 30, + "swarm_enabled": true + }, + "created_at": "2026-04-06T19:48:33+00:00", + "episodes": 3, + "metrics": { + "avg_compactness_reward": 0.0, + "avg_connectivity_gain_reward": 0.10000000000000002, + "avg_connectivity_reward": -0.09999999999999999, + "avg_diversity_reward": 0.08, + "avg_entity_informativeness_reward": -0.028683816517602444, + "avg_format_reward": 0.15, + "avg_graph_f1": 0.15537340619307835, + "avg_knowledge_carrier_reward": 0.5, + "avg_knowledge_indexing_reward": 0.07932190760059611, + "avg_relation_informativeness_reward": 0.044225025032092045, + "avg_reward": 3.1324990406542437, + "avg_soft_shaping_reward": 0.15, + "avg_spawn_count": 2.0, + "avg_spawn_critical_steps": 6.0, + "avg_steps_to_solution": 5.0, + "deanonymization_accuracy": 0.0, + "leaderboard_score": 0.5890485416309927, + "retrieval_signal": 0.7027626676602087, + "spawn_completion_rate": 1.0, + "spawn_signal": 0.6666666666666666, + "structural_signal": 0.5001082417028979, + "task_success_rate": 1.0, + "tool_efficiency": 0.5 + }, + "run_id": "run_0008", + "run_name": "fixed_levels_qwen_swarm" + }, + { + "config": { + "llm_model": "qwen3:1.7b", + "llm_provider": "ollama", + "max_agents": 1, + "max_breadth": 2, + "max_depth": 2, + "max_steps": 24, + "max_width": 2, + "seed": 2026, + "seeded_questions": 30, + "swarm_enabled": true + }, + "created_at": "2026-04-06T19:55:08+00:00", + "episodes": 1, + "metrics": { + "avg_compactness_reward": 0.0, + "avg_connectivity_gain_reward": 0.1, + "avg_connectivity_reward": -0.3, + "avg_diversity_reward": 0.08, + "avg_entity_informativeness_reward": -0.005263146336646693, + "avg_format_reward": 0.15, + "avg_graph_f1": 0.33333333333333337, + "avg_knowledge_carrier_reward": 0.5, + "avg_knowledge_indexing_reward": 0.08181818181818182, + "avg_relation_informativeness_reward": 0.04406984773661544, + "avg_reward": 3.0570263318507016, + "avg_soft_shaping_reward": 0.15, + "avg_spawn_count": 2.0, + "avg_spawn_critical_steps": 6.0, + "avg_steps_to_solution": 5.0, + "deanonymization_accuracy": 0.0, + "leaderboard_score": 0.6205251901591228, + "retrieval_signal": 0.7036363636363637, + "spawn_completion_rate": 1.0, + "spawn_signal": 0.6666666666666666, + "structural_signal": 0.45476134027999376, + "task_success_rate": 1.0, + "tool_efficiency": 0.5 + }, + "run_id": "run_0009", + "run_name": "fixed_levels_qwen_swarm" + }, + { + "config": { + "llm_model": "qwen3:1.7b", + "llm_provider": "ollama", + "max_agents": 1, + "max_breadth": 2, + "max_depth": 2, + "max_steps": 24, + "max_width": 2, + "seed": 2026, + "seeded_questions": 30, + "swarm_enabled": true + }, + "created_at": "2026-04-06T20:01:34+00:00", + "episodes": 1, + "metrics": { + "avg_compactness_reward": 0.0, + "avg_connectivity_gain_reward": 0.1, + "avg_connectivity_reward": -0.3, + "avg_diversity_reward": 0.08, + "avg_entity_informativeness_reward": -0.020826953461399098, + "avg_format_reward": 0.15, + "avg_graph_f1": 0.33333333333333337, + "avg_knowledge_carrier_reward": 0.5, + "avg_knowledge_indexing_reward": 0.08181818181818182, + "avg_relation_informativeness_reward": 0.04348043923536236, + "avg_reward": 3.040873116224696, + "avg_soft_shaping_reward": 0.15, + "avg_spawn_count": 2.0, + "avg_spawn_critical_steps": 6.0, + "avg_steps_to_solution": 5.0, + "deanonymization_accuracy": 0.0, + "leaderboard_score": 0.6201995296517067, + "retrieval_signal": 0.7036363636363637, + "spawn_completion_rate": 1.0, + "spawn_signal": 0.6666666666666666, + "structural_signal": 0.45153069715479266, + "task_success_rate": 1.0, + "tool_efficiency": 0.5 + }, + "run_id": "run_0010", + "run_name": "fixed_levels_qwen_swarm" + }, + { + "config": { + "max_agents": 3, + "max_breadth": 2, + "max_depth": 2, + "max_steps": 24, + "max_width": 2, + "seed": 2026, + "seeded_questions": 30, + "swarm_enabled": true + }, + "created_at": "2026-04-06T20:46:11+00:00", + "episodes": 1, + "metrics": { + "avg_compactness_reward": 0.0, + "avg_connectivity_gain_reward": 0.2, + "avg_connectivity_reward": -0.15, + "avg_diversity_reward": 0.12666666666666665, + "avg_entity_informativeness_reward": 0.019629386278697845, + "avg_format_reward": 0.15, + "avg_graph_f1": 0.5714285714285715, + "avg_knowledge_carrier_reward": 0.5, + "avg_knowledge_indexing_reward": 0.12272727272727273, + "avg_relation_informativeness_reward": 0.08347928023822283, + "avg_reward": 1.829702015111513, + "avg_soft_shaping_reward": 0.3, + "avg_spawn_count": 4.0, + "avg_spawn_critical_steps": 6.0, + "avg_steps_to_solution": 9.0, + "deanonymization_accuracy": 0.0, + "leaderboard_score": 0.6715432845394145, + "retrieval_signal": 0.7179545454545455, + "spawn_completion_rate": 1.0, + "spawn_signal": 0.6666666666666666, + "structural_signal": 0.5221217333033842, + "task_success_rate": 1.0, + "tool_efficiency": 0.5 + }, + "run_id": "run_0011", + "run_name": "fixed_levels_qwen_swarm" + }, + { + "config": { + "max_agents": 3, + "max_breadth": 2, + "max_depth": 2, + "max_steps": 24, + "max_width": 2, + "seed": 2026, + "seeded_questions": 30, + "swarm_enabled": true + }, + "created_at": "2026-04-06T20:49:44+00:00", + "episodes": 1, + "metrics": { + "avg_compactness_reward": 0.0, + "avg_connectivity_gain_reward": 0.2, + "avg_connectivity_reward": -0.15, + "avg_diversity_reward": 0.12666666666666665, + "avg_entity_informativeness_reward": 0.019629386278697845, + "avg_format_reward": 0.15, + "avg_graph_f1": 0.5714285714285715, + "avg_knowledge_carrier_reward": 0.5, + "avg_knowledge_indexing_reward": 0.12272727272727273, + "avg_relation_informativeness_reward": 0.08335372627068136, + "avg_reward": 0.7139904233885594, + "avg_soft_shaping_reward": 0.3, + "avg_spawn_count": 4.0, + "avg_spawn_critical_steps": 6.0, + "avg_steps_to_solution": 9.0, + "deanonymization_accuracy": 0.0, + "leaderboard_score": 0.6641542345113342, + "retrieval_signal": 0.7179545454545455, + "spawn_completion_rate": 1.0, + "spawn_signal": 0.6666666666666666, + "structural_signal": 0.5220966225098759, + "task_success_rate": 1.0, + "tool_efficiency": 0.5 + }, + "run_id": "run_0012", + "run_name": "fixed_levels_qwen_swarm" + }, + { + "config": { + "max_agents": 3, + "max_breadth": 2, + "max_depth": 2, + "max_steps": 24, + "max_width": 2, + "seed": 2026, + "seeded_questions": 30, + "swarm_enabled": true + }, + "created_at": "2026-04-06T20:59:43+00:00", + "episodes": 1, + "metrics": { + "avg_compactness_reward": 0.0, + "avg_connectivity_gain_reward": 0.2, + "avg_connectivity_reward": -0.15, + "avg_diversity_reward": 0.12666666666666665, + "avg_entity_informativeness_reward": 0.0036675120354726642, + "avg_format_reward": 0.15, + "avg_graph_f1": 0.5714285714285715, + "avg_knowledge_carrier_reward": 0.5, + "avg_knowledge_indexing_reward": 0.12272727272727273, + "avg_relation_informativeness_reward": 0.08250745620050208, + "avg_reward": 0.7138056720677886, + "avg_soft_shaping_reward": 0.3, + "avg_spawn_count": 4.0, + "avg_spawn_critical_steps": 6.0, + "avg_steps_to_solution": 9.0, + "deanonymization_accuracy": 0.0, + "leaderboard_score": 0.6638424503476543, + "retrieval_signal": 0.7179545454545455, + "spawn_completion_rate": 1.0, + "spawn_signal": 0.6666666666666666, + "structural_signal": 0.518734993647195, + "task_success_rate": 1.0, + "tool_efficiency": 0.5 + }, + "run_id": "run_0013", + "run_name": "fixed_levels_qwen_swarm" + }, + { + "config": { + "llm_model": "gpt-5.4-mini", + "llm_provider": "openai", + "max_agents": 3, + "max_breadth": 2, + "max_depth": 2, + "max_steps": 24, + "max_width": 2, + "seed": 2026, + "seeded_questions": 30, + "swarm_enabled": true + }, + "created_at": "2026-04-07T09:44:40+00:00", + "episodes": 1, + "metrics": { + "avg_compactness_reward": 0.0, + "avg_connectivity_gain_reward": 0.2, + "avg_connectivity_reward": -0.15, + "avg_diversity_reward": 0.12666666666666665, + "avg_entity_informativeness_reward": -0.018704290877944903, + "avg_format_reward": 0.15, + "avg_graph_f1": 0.5714285714285715, + "avg_knowledge_carrier_reward": 0.5, + "avg_knowledge_indexing_reward": 0.12272727272727273, + "avg_relation_informativeness_reward": 0.08056039127695382, + "avg_reward": 0.7135379106634446, + "avg_soft_shaping_reward": 0.3, + "avg_spawn_count": 4.0, + "avg_spawn_critical_steps": 6.0, + "avg_steps_to_solution": 9.0, + "deanonymization_accuracy": 0.0, + "leaderboard_score": 0.6633913226563717, + "retrieval_signal": 0.7179545454545455, + "spawn_completion_rate": 1.0, + "spawn_signal": 0.6666666666666666, + "structural_signal": 0.5138712200798018, + "task_success_rate": 1.0, + "tool_efficiency": 0.5 + }, + "run_id": "run_0014", + "run_name": "fixed_levels_qwen_swarm" + }, + { + "config": { + "llm_model": "gpt-5.4-mini", + "llm_provider": "openai", + "max_agents": 3, + "max_breadth": 2, + "max_depth": 2, + "max_steps": 24, + "max_width": 2, + "seed": 2026, + "seeded_questions": 30, + "swarm_enabled": true + }, + "created_at": "2026-04-07T09:55:19+00:00", + "episodes": 1, + "metrics": { + "avg_compactness_reward": 0.0, + "avg_connectivity_gain_reward": 0.2, + "avg_connectivity_reward": -0.15, + "avg_diversity_reward": 0.12666666666666665, + "avg_entity_informativeness_reward": -0.018704290877944903, + "avg_format_reward": 0.15, + "avg_graph_f1": 0.5714285714285715, + "avg_knowledge_carrier_reward": 0.5, + "avg_knowledge_indexing_reward": 0.12272727272727273, + "avg_relation_informativeness_reward": 0.08056039127695382, + "avg_reward": 0.7135379106634446, + "avg_soft_shaping_reward": 0.3, + "avg_spawn_count": 4.0, + "avg_spawn_critical_steps": 6.0, + "avg_steps_to_solution": 9.0, + "deanonymization_accuracy": 0.0, + "leaderboard_score": 0.6633913226563717, + "retrieval_signal": 0.7179545454545455, + "spawn_completion_rate": 1.0, + "spawn_signal": 0.6666666666666666, + "structural_signal": 0.5138712200798018, + "task_success_rate": 1.0, + "tool_efficiency": 0.5 + }, + "run_id": "run_0015", + "run_name": "fixed_levels_qwen_swarm" + }, + { + "config": { + "llm_model": "gpt-5.4-mini", + "llm_provider": "openai", + "max_agents": 3, + "max_breadth": 2, + "max_depth": 2, + "max_steps": 24, + "max_width": 2, + "seed": 2026, + "seeded_questions": 30, + "swarm_enabled": true + }, + "created_at": "2026-04-07T09:56:28+00:00", + "episodes": 30, + "metrics": { + "avg_compactness_reward": 0.0, + "avg_connectivity_gain_reward": 0.2000000000000001, + "avg_connectivity_reward": 0.12999999999999998, + "avg_diversity_reward": 0.12433333333333325, + "avg_entity_informativeness_reward": -0.02515191749984708, + "avg_format_reward": 0.15, + "avg_graph_f1": 0.2916528337385394, + "avg_knowledge_carrier_reward": 0.5, + "avg_knowledge_indexing_reward": 0.11539120363588044, + "avg_relation_informativeness_reward": 0.0769903534735767, + "avg_reward": 0.7150555461096118, + "avg_soft_shaping_reward": 0.3, + "avg_spawn_count": 4.0, + "avg_spawn_critical_steps": 6.0, + "avg_steps_to_solution": 9.0, + "deanonymization_accuracy": 0.0, + "leaderboard_score": 0.6132407715455404, + "retrieval_signal": 0.7153869212725582, + "spawn_completion_rate": 1.0, + "spawn_signal": 0.6666666666666666, + "structural_signal": 0.5815176871947458, + "task_success_rate": 1.0, + "tool_efficiency": 0.5 + }, + "run_id": "run_0016", + "run_name": "fixed_levels_qwen_swarm" + }, + { + "config": { + "llm_model": "gpt-5.4-mini", + "llm_provider": "openai", + "max_agents": 3, + "max_breadth": 2, + "max_depth": 2, + "max_steps": 24, + "max_width": 2, + "seed": 2026, + "seeded_questions": 30, + "swarm_enabled": true + }, + "created_at": "2026-04-07T10:02:32+00:00", + "episodes": 1, + "metrics": { + "avg_compactness_reward": 0.0, + "avg_connectivity_gain_reward": 0.2, + "avg_connectivity_reward": -0.15, + "avg_diversity_reward": 0.12666666666666665, + "avg_entity_informativeness_reward": -0.018704290877944903, + "avg_format_reward": 0.15, + "avg_graph_f1": 0.5714285714285715, + "avg_knowledge_carrier_reward": 0.5, + "avg_knowledge_indexing_reward": 0.12272727272727273, + "avg_relation_informativeness_reward": 0.08056039127695382, + "avg_reward": 0.7135379106634446, + "avg_soft_shaping_reward": 0.3, + "avg_spawn_count": 4.0, + "avg_spawn_critical_steps": 6.0, + "avg_steps_to_solution": 9.0, + "deanonymization_accuracy": 0.0, + "leaderboard_score": 0.6633913226563717, + "retrieval_signal": 0.7179545454545455, + "spawn_completion_rate": 1.0, + "spawn_signal": 0.6666666666666666, + "structural_signal": 0.5138712200798018, + "task_success_rate": 1.0, + "tool_efficiency": 0.5 + }, + "run_id": "run_0017", + "run_name": "fixed_levels_qwen_swarm" + }, + { + "config": { + "llm_model": "gpt-5.4-mini", + "llm_provider": "openai", + "max_agents": 3, + "max_breadth": 2, + "max_depth": 2, + "max_steps": 24, + "max_width": 2, + "seed": 2026, + "seeded_questions": 30, + "swarm_enabled": true + }, + "created_at": "2026-04-07T10:02:49+00:00", + "episodes": 3, + "metrics": { + "avg_compactness_reward": 0.0, + "avg_connectivity_gain_reward": 0.20000000000000004, + "avg_connectivity_reward": -0.06666666666666667, + "avg_diversity_reward": 0.13444444444444445, + "avg_entity_informativeness_reward": -0.029992009599206938, + "avg_format_reward": 0.15, + "avg_graph_f1": 0.5793650793650794, + "avg_knowledge_carrier_reward": 0.5, + "avg_knowledge_indexing_reward": 0.10372960372960373, + "avg_relation_informativeness_reward": 0.06898843512226, + "avg_reward": 0.7133699465240085, + "avg_soft_shaping_reward": 0.3, + "avg_spawn_count": 4.0, + "avg_spawn_critical_steps": 6.0, + "avg_steps_to_solution": 9.0, + "deanonymization_accuracy": 0.0, + "leaderboard_score": 0.6656078661080486, + "retrieval_signal": 0.7113053613053614, + "spawn_completion_rate": 1.0, + "spawn_signal": 0.6666666666666666, + "structural_signal": 0.5312992851046106, + "task_success_rate": 1.0, + "tool_efficiency": 0.5 + }, + "run_id": "run_0018", + "run_name": "fixed_levels_qwen_swarm" + }, + { + "config": { + "llm_model": "gpt-5.4-mini", + "llm_provider": "openai", + "max_agents": 3, + "max_breadth": 2, + "max_depth": 2, + "max_steps": 24, + "max_width": 2, + "seed": 2026, + "seeded_questions": 30, + "swarm_enabled": true + }, + "created_at": "2026-04-07T10:04:53+00:00", + "episodes": 3, + "metrics": { + "avg_compactness_reward": 0.0, + "avg_connectivity_gain_reward": 0.20000000000000004, + "avg_connectivity_reward": -0.06666666666666667, + "avg_diversity_reward": 0.13444444444444445, + "avg_entity_informativeness_reward": -0.029992009599206938, + "avg_format_reward": 0.15, + "avg_graph_f1": 0.5793650793650794, + "avg_knowledge_carrier_reward": 0.5, + "avg_knowledge_indexing_reward": 0.10372960372960373, + "avg_relation_informativeness_reward": 0.06898843512226, + "avg_reward": 0.7133699465240085, + "avg_soft_shaping_reward": 0.3, + "avg_spawn_count": 4.0, + "avg_spawn_critical_steps": 6.0, + "avg_steps_to_solution": 9.0, + "deanonymization_accuracy": 0.0, + "leaderboard_score": 0.6656078661080486, + "retrieval_signal": 0.7113053613053614, + "spawn_completion_rate": 1.0, + "spawn_signal": 0.6666666666666666, + "structural_signal": 0.5312992851046106, + "task_success_rate": 1.0, + "tool_efficiency": 0.5 + }, + "run_id": "run_0019", + "run_name": "fixed_levels_qwen_swarm" + }, + { + "config": { + "llm_model": "gpt-5.4-mini", + "llm_provider": "openai", + "max_agents": 3, + "max_breadth": 2, + "max_depth": 2, + "max_steps": 24, + "max_width": 2, + "seed": 2026, + "seeded_questions": 30, + "swarm_enabled": true + }, + "created_at": "2026-04-07T10:11:34+00:00", + "episodes": 3, + "metrics": { + "avg_compactness_reward": 0.0, + "avg_connectivity_gain_reward": 0.20000000000000004, + "avg_connectivity_reward": -0.06666666666666667, + "avg_diversity_reward": 0.13444444444444445, + "avg_entity_informativeness_reward": -0.029992009599206938, + "avg_format_reward": 0.15, + "avg_graph_f1": 0.5793650793650794, + "avg_knowledge_carrier_reward": 0.5, + "avg_knowledge_indexing_reward": 0.10372960372960373, + "avg_relation_informativeness_reward": 0.06898843512226, + "avg_reward": 0.7133699465240085, + "avg_soft_shaping_reward": 0.3, + "avg_spawn_count": 4.0, + "avg_spawn_critical_steps": 6.0, + "avg_steps_to_solution": 9.0, + "deanonymization_accuracy": 0.0, + "leaderboard_score": 0.6656078661080486, + "retrieval_signal": 0.7113053613053614, + "spawn_completion_rate": 1.0, + "spawn_signal": 0.6666666666666666, + "structural_signal": 0.5312992851046106, + "task_success_rate": 1.0, + "tool_efficiency": 0.5 + }, + "run_id": "run_0020", + "run_name": "fixed_levels_qwen_swarm" + }, + { + "config": { + "llm_model": "gpt-5.4-mini", + "llm_provider": "openai", + "max_agents": 3, + "max_breadth": 2, + "max_depth": 2, + "max_steps": 24, + "max_width": 2, + "seed": 2026, + "seeded_questions": 30, + "swarm_enabled": true + }, + "created_at": "2026-04-07T10:29:54+00:00", + "episodes": 3, + "metrics": { + "avg_compactness_reward": 0.0, + "avg_connectivity_gain_reward": 0.0, + "avg_connectivity_reward": 0.0, + "avg_diversity_reward": 0.0, + "avg_entity_informativeness_reward": 0.0, + "avg_format_reward": 0.15, + "avg_graph_f1": 0.0, + "avg_knowledge_carrier_reward": 0.0, + "avg_knowledge_indexing_reward": 0.0, + "avg_relation_informativeness_reward": 0.0, + "avg_reward": 0.5519400198339021, + "avg_soft_shaping_reward": 0.0, + "avg_spawn_count": 0.0, + "avg_spawn_critical_steps": 0.0, + "avg_steps_to_solution": 1.0, + "deanonymization_accuracy": 0.0, + "leaderboard_score": 0.2785970009916951, + "retrieval_signal": 0.5, + "spawn_completion_rate": 0.0, + "spawn_signal": 0.4, + "structural_signal": 0.5, + "task_success_rate": 0.0, + "tool_efficiency": 1.0 + }, + "run_id": "run_0021", + "run_name": "fixed_levels_qwen_swarm" + }, + { + "config": { + "llm_model": "gpt-5.4-mini", + "llm_provider": "openai", + "max_agents": 3, + "max_breadth": 2, + "max_depth": 2, + "max_steps": 24, + "max_width": 2, + "seed": 2026, + "seeded_questions": 30, + "swarm_enabled": true + }, + "created_at": "2026-04-07T15:59:20+00:00", + "episodes": 1, + "metrics": { + "avg_compactness_reward": 0.0, + "avg_connectivity_gain_reward": 0.0, + "avg_connectivity_reward": 0.0, + "avg_diversity_reward": 0.0, + "avg_entity_informativeness_reward": 0.0, + "avg_format_reward": 0.15, + "avg_graph_f1": 0.0, + "avg_knowledge_carrier_reward": 0.0, + "avg_knowledge_indexing_reward": 0.0, + "avg_relation_informativeness_reward": 0.0, + "avg_reward": 0.5519400198339021, + "avg_soft_shaping_reward": 0.0, + "avg_spawn_count": 0.0, + "avg_spawn_critical_steps": 0.0, + "avg_steps_to_solution": 1.0, + "deanonymization_accuracy": 0.0, + "leaderboard_score": 0.2785970009916951, + "retrieval_signal": 0.5, + "spawn_completion_rate": 0.0, + "spawn_signal": 0.4, + "structural_signal": 0.5, + "task_success_rate": 0.0, + "tool_efficiency": 1.0 + }, + "run_id": "run_0022", + "run_name": "fixed_levels_qwen_swarm" + }, + { + "config": { + "llm_model": "gpt-5.4-mini", + "llm_provider": "openai", + "max_agents": 3, + "max_breadth": 2, + "max_depth": 2, + "max_steps": 24, + "max_width": 2, + "seed": 2026, + "seeded_questions": 30, + "swarm_enabled": true + }, + "created_at": "2026-04-08T04:25:00+00:00", + "episodes": 1, + "metrics": { + "avg_compactness_reward": 0.0, + "avg_connectivity_gain_reward": 0.0, + "avg_connectivity_reward": 0.0, + "avg_diversity_reward": 0.0, + "avg_entity_informativeness_reward": 0.0, + "avg_format_reward": 0.15, + "avg_graph_f1": 0.0, + "avg_knowledge_carrier_reward": 0.0, + "avg_knowledge_indexing_reward": 0.0, + "avg_relation_informativeness_reward": 0.0, + "avg_reward": 0.5519400198339021, + "avg_soft_shaping_reward": 0.0, + "avg_spawn_count": 0.0, + "avg_spawn_critical_steps": 0.0, + "avg_steps_to_solution": 1.0, + "deanonymization_accuracy": 0.0, + "leaderboard_score": 0.2785970009916951, + "retrieval_signal": 0.5, + "spawn_completion_rate": 0.0, + "spawn_signal": 0.4, + "structural_signal": 0.5, + "task_success_rate": 0.0, + "tool_efficiency": 1.0 + }, + "run_id": "run_0023", + "run_name": "fixed_levels_qwen_swarm" + }, + { + "config": { + "llm_model": "gpt-5.4-mini", + "llm_provider": "openai", + "max_agents": 3, + "max_breadth": 2, + "max_depth": 2, + "max_steps": 24, + "max_width": 2, + "seed": 2026, + "seeded_questions": 30, + "swarm_enabled": true + }, + "created_at": "2026-04-08T04:28:07+00:00", + "episodes": 1, + "metrics": { + "avg_compactness_reward": 0.0, + "avg_connectivity_gain_reward": 0.0, + "avg_connectivity_reward": 0.0, + "avg_diversity_reward": 0.0, + "avg_entity_informativeness_reward": 0.0, + "avg_format_reward": 0.15, + "avg_graph_f1": 0.0, + "avg_knowledge_carrier_reward": 0.0, + "avg_knowledge_indexing_reward": 0.0, + "avg_relation_informativeness_reward": 0.0, + "avg_reward": 0.5519400198339021, + "avg_soft_shaping_reward": 0.0, + "avg_spawn_count": 0.0, + "avg_spawn_critical_steps": 0.0, + "avg_steps_to_solution": 1.0, + "deanonymization_accuracy": 0.0, + "leaderboard_score": 0.2785970009916951, + "retrieval_signal": 0.5, + "spawn_completion_rate": 0.0, + "spawn_signal": 0.4, + "structural_signal": 0.5, + "task_success_rate": 0.0, + "tool_efficiency": 1.0 + }, + "run_id": "run_0024", + "run_name": "fixed_levels_qwen_swarm" + }, + { + "config": { + "llm_model": "gpt-5.4-mini", + "llm_provider": "openai", + "max_agents": 3, + "max_breadth": 2, + "max_depth": 2, + "max_steps": 24, + "max_width": 2, + "seed": 2026, + "seeded_questions": 30, + "swarm_enabled": true + }, + "created_at": "2026-04-08T04:39:32+00:00", + "episodes": 1, + "metrics": { + "avg_compactness_reward": 0.0, + "avg_connectivity_gain_reward": 0.0, + "avg_connectivity_reward": 0.0, + "avg_diversity_reward": 0.0, + "avg_entity_informativeness_reward": 0.0, + "avg_format_reward": 0.15, + "avg_graph_f1": 0.0, + "avg_knowledge_carrier_reward": 0.0, + "avg_knowledge_indexing_reward": 0.0, + "avg_relation_informativeness_reward": 0.0, + "avg_reward": 0.5519400198339021, + "avg_soft_shaping_reward": 0.0, + "avg_spawn_count": 0.0, + "avg_spawn_critical_steps": 0.0, + "avg_steps_to_solution": 1.0, + "deanonymization_accuracy": 0.0, + "leaderboard_score": 0.2785970009916951, + "retrieval_signal": 0.5, + "spawn_completion_rate": 0.0, + "spawn_signal": 0.4, + "structural_signal": 0.5, + "task_success_rate": 0.0, + "tool_efficiency": 1.0 + }, + "run_id": "run_0025", + "run_name": "fixed_levels_qwen_swarm" + }, + { + "config": { + "llm_model": "gpt-5.4-mini", + "llm_provider": "openai", + "max_agents": 3, + "max_breadth": 2, + "max_depth": 2, + "max_steps": 24, + "max_width": 2, + "seed": 2026, + "seeded_questions": 30, + "swarm_enabled": true + }, + "created_at": "2026-04-08T04:40:21+00:00", + "episodes": 30, + "metrics": { + "avg_compactness_reward": 0.0, + "avg_connectivity_gain_reward": 0.13333333333333336, + "avg_connectivity_reward": 0.09999999999999999, + "avg_diversity_reward": 0.03911111111111111, + "avg_entity_informativeness_reward": -0.00951758755541623, + "avg_format_reward": 0.15, + "avg_graph_f1": 0.08482743691314255, + "avg_knowledge_carrier_reward": 0.3333333333333333, + "avg_knowledge_indexing_reward": 0.0832325289772058, + "avg_relation_informativeness_reward": 0.024842289016879314, + "avg_reward": 0.6636425017249088, + "avg_soft_shaping_reward": 0.19999999999999993, + "avg_spawn_count": 2.6666666666666665, + "avg_spawn_critical_steps": 4.0, + "avg_steps_to_solution": 6.333333333333333, + "deanonymization_accuracy": 0.0, + "leaderboard_score": 0.4644798510150634, + "retrieval_signal": 0.6457980518086888, + "spawn_completion_rate": 1.0, + "spawn_signal": 0.7, + "structural_signal": 0.5472649402922927, + "task_success_rate": 0.6666666666666666, + "tool_efficiency": 0.5 + }, + "run_id": "run_0026", + "run_name": "fixed_levels_qwen_swarm" + }, + { + "config": { + "llm_model": "gpt-5.4-mini", + "llm_provider": "openai", + "max_agents": 3, + "max_breadth": 2, + "max_depth": 2, + "max_steps": 24, + "max_width": 2, + "seed": 2026, + "seeded_questions": 30, + "swarm_enabled": true + }, + "created_at": "2026-04-08T05:01:16+00:00", + "episodes": 10, + "metrics": { + "avg_compactness_reward": 0.0, + "avg_connectivity_gain_reward": 0.0, + "avg_connectivity_reward": 0.0, + "avg_diversity_reward": 0.0, + "avg_entity_informativeness_reward": 0.0, + "avg_format_reward": 0.14999999999999997, + "avg_graph_f1": 0.0, + "avg_knowledge_carrier_reward": 0.0, + "avg_knowledge_indexing_reward": 0.0, + "avg_relation_informativeness_reward": 0.0, + "avg_reward": 0.5519400198339021, + "avg_soft_shaping_reward": 0.0, + "avg_spawn_count": 0.0, + "avg_spawn_critical_steps": 0.0, + "avg_steps_to_solution": 1.0, + "deanonymization_accuracy": 0.0, + "leaderboard_score": 0.2785970009916951, + "retrieval_signal": 0.5, + "spawn_completion_rate": 0.0, + "spawn_signal": 0.4, + "structural_signal": 0.5, + "task_success_rate": 0.0, + "tool_efficiency": 1.0 + }, + "run_id": "run_0027", + "run_name": "fixed_levels_qwen_swarm" + }, + { + "config": { + "llm_model": "gpt-5.4-mini", + "llm_provider": "openai", + "max_agents": 3, + "max_breadth": 2, + "max_depth": 2, + "max_steps": 24, + "max_width": 2, + "seed": 2026, + "seeded_questions": 30, + "swarm_enabled": true + }, + "created_at": "2026-04-08T05:01:29+00:00", + "episodes": 10, + "metrics": { + "avg_compactness_reward": 0.0, + "avg_connectivity_gain_reward": 0.19999999999999998, + "avg_connectivity_reward": 0.06, + "avg_diversity_reward": 0.0, + "avg_entity_informativeness_reward": 0.0, + "avg_format_reward": 0.14999999999999997, + "avg_graph_f1": 0.18535980927285275, + "avg_knowledge_carrier_reward": 0.5, + "avg_knowledge_indexing_reward": 0.09575879120879122, + "avg_relation_informativeness_reward": 0.0, + "avg_reward": 0.7109638031154166, + "avg_soft_shaping_reward": 0.29999999999999993, + "avg_spawn_count": 4.0, + "avg_spawn_critical_steps": 6.0, + "avg_steps_to_solution": 9.0, + "deanonymization_accuracy": 0.0, + "leaderboard_score": 0.5866289994462388, + "retrieval_signal": 0.708515576923077, + "spawn_completion_rate": 1.0, + "spawn_signal": 0.6666666666666666, + "structural_signal": 0.535, + "task_success_rate": 1.0, + "tool_efficiency": 0.5 + }, + "run_id": "run_0028", + "run_name": "fixed_levels_qwen_swarm" + }, + { + "config": { + "llm_model": "gpt-5.4-mini", + "llm_provider": "openai", + "max_agents": 3, + "max_breadth": 2, + "max_depth": 2, + "max_steps": 24, + "max_width": 2, + "seed": 2026, + "seeded_questions": 30, + "swarm_enabled": true + }, + "created_at": "2026-04-08T05:01:43+00:00", + "episodes": 10, + "metrics": { + "avg_compactness_reward": 0.0, + "avg_connectivity_gain_reward": 0.19999999999999998, + "avg_connectivity_reward": 0.24, + "avg_diversity_reward": 0.11733333333333333, + "avg_entity_informativeness_reward": -0.028552762666248687, + "avg_format_reward": 0.14999999999999997, + "avg_graph_f1": 0.06912250146657492, + "avg_knowledge_carrier_reward": 0.5, + "avg_knowledge_indexing_reward": 0.15393879572282626, + "avg_relation_informativeness_reward": 0.07452686705063795, + "avg_reward": 0.7171006884027153, + "avg_soft_shaping_reward": 0.29999999999999993, + "avg_spawn_count": 4.0, + "avg_spawn_critical_steps": 6.0, + "avg_steps_to_solution": 9.0, + "deanonymization_accuracy": 0.0, + "leaderboard_score": 0.5730007362494549, + "retrieval_signal": 0.7288785785029892, + "spawn_completion_rate": 1.0, + "spawn_signal": 0.6666666666666666, + "structural_signal": 0.6067948208768779, + "task_success_rate": 1.0, + "tool_efficiency": 0.5 + }, + "run_id": "run_0029", + "run_name": "fixed_levels_qwen_swarm" + }, + { + "config": { + "llm_model": "gpt-5.4-mini", + "llm_provider": "openai", + "max_agents": 3, + "max_breadth": 2, + "max_depth": 2, + "max_steps": 24, + "max_width": 2, + "seed": 2026, + "seeded_questions": 30, + "swarm_enabled": true + }, + "created_at": "2026-04-08T15:57:03+00:00", + "episodes": 10, + "metrics": { + "avg_compactness_reward": 0.0, + "avg_connectivity_gain_reward": 0.0, + "avg_connectivity_reward": 0.0, + "avg_diversity_reward": 0.0, + "avg_entity_informativeness_reward": 0.0, + "avg_format_reward": 0.14999999999999997, + "avg_graph_f1": 0.0, + "avg_knowledge_carrier_reward": 0.0, + "avg_knowledge_indexing_reward": 0.0, + "avg_relation_informativeness_reward": 0.0, + "avg_reward": 0.5519400198339021, + "avg_soft_shaping_reward": 0.0, + "avg_spawn_count": 0.0, + "avg_spawn_critical_steps": 0.0, + "avg_steps_to_solution": 1.0, + "deanonymization_accuracy": 0.0, + "leaderboard_score": 0.2785970009916951, + "retrieval_signal": 0.5, + "spawn_completion_rate": 0.0, + "spawn_signal": 0.4, + "structural_signal": 0.5, + "task_success_rate": 0.0, + "tool_efficiency": 1.0 + }, + "run_id": "run_0030", + "run_name": "fixed_levels_qwen_swarm" + }, + { + "config": { + "llm_model": "gpt-5.4-mini", + "llm_provider": "openai", + "max_agents": 3, + "max_breadth": 2, + "max_depth": 2, + "max_steps": 24, + "max_width": 2, + "seed": 2026, + "seeded_questions": 30, + "swarm_enabled": true + }, + "created_at": "2026-04-08T15:57:18+00:00", + "episodes": 10, + "metrics": { + "avg_compactness_reward": 0.0, + "avg_connectivity_gain_reward": 0.19999999999999998, + "avg_connectivity_reward": 0.06, + "avg_diversity_reward": 0.0, + "avg_entity_informativeness_reward": 0.0, + "avg_format_reward": 0.14999999999999997, + "avg_graph_f1": 0.18535980927285275, + "avg_knowledge_carrier_reward": 0.5, + "avg_knowledge_indexing_reward": 0.09575879120879122, + "avg_relation_informativeness_reward": 0.0, + "avg_reward": 0.7109638031154166, + "avg_soft_shaping_reward": 0.29999999999999993, + "avg_spawn_count": 4.0, + "avg_spawn_critical_steps": 6.0, + "avg_steps_to_solution": 9.0, + "deanonymization_accuracy": 0.0, + "leaderboard_score": 0.5866289994462388, + "retrieval_signal": 0.708515576923077, + "spawn_completion_rate": 1.0, + "spawn_signal": 0.6666666666666666, + "structural_signal": 0.535, + "task_success_rate": 1.0, + "tool_efficiency": 0.5 + }, + "run_id": "run_0031", + "run_name": "fixed_levels_qwen_swarm" + }, + { + "config": { + "llm_model": "gpt-5.4-mini", + "llm_provider": "openai", + "max_agents": 3, + "max_breadth": 2, + "max_depth": 2, + "max_steps": 24, + "max_width": 2, + "seed": 2026, + "seeded_questions": 30, + "swarm_enabled": true + }, + "created_at": "2026-04-08T15:57:32+00:00", + "episodes": 10, + "metrics": { + "avg_compactness_reward": 0.0, + "avg_connectivity_gain_reward": 0.19999999999999998, + "avg_connectivity_reward": 0.24, + "avg_diversity_reward": 0.11733333333333333, + "avg_entity_informativeness_reward": -0.028552762666248687, + "avg_format_reward": 0.14999999999999997, + "avg_graph_f1": 0.06912250146657492, + "avg_knowledge_carrier_reward": 0.5, + "avg_knowledge_indexing_reward": 0.15393879572282626, + "avg_relation_informativeness_reward": 0.07452686705063795, + "avg_reward": 0.7171006884027153, + "avg_soft_shaping_reward": 0.29999999999999993, + "avg_spawn_count": 4.0, + "avg_spawn_critical_steps": 6.0, + "avg_steps_to_solution": 9.0, + "deanonymization_accuracy": 0.0, + "leaderboard_score": 0.5730007362494549, + "retrieval_signal": 0.7288785785029892, + "spawn_completion_rate": 1.0, + "spawn_signal": 0.6666666666666666, + "structural_signal": 0.6067948208768779, + "task_success_rate": 1.0, + "tool_efficiency": 0.5 + }, + "run_id": "run_0032", + "run_name": "fixed_levels_qwen_swarm" + }, + { + "config": { + "llm_model": "qwen3:8b", + "llm_provider": "openai", + "max_agents": 3, + "max_breadth": 2, + "max_depth": 2, + "max_steps": 18, + "max_width": 2, + "seed": 7, + "seeded_questions": 30, + "swarm_enabled": true + }, + "created_at": "2026-04-20T19:46:04+00:00", + "episodes": 1, + "metrics": { + "avg_compactness_reward": 0.0, + "avg_connectivity_gain_reward": 0.0, + "avg_connectivity_reward": 0.0, + "avg_diversity_reward": 0.0, + "avg_entity_informativeness_reward": 0.0, + "avg_format_reward": 0.15, + "avg_graph_f1": 0.0, + "avg_knowledge_carrier_reward": 0.0, + "avg_knowledge_indexing_reward": 0.0, + "avg_relation_informativeness_reward": 0.0, + "avg_reward": 0.5519400198339021, + "avg_soft_shaping_reward": 0.0, + "avg_spawn_count": 0.0, + "avg_spawn_critical_steps": 0.0, + "avg_steps_to_solution": 1.0, + "deanonymization_accuracy": 0.0, + "leaderboard_score": 0.2785970009916951, + "retrieval_signal": 0.5, + "spawn_completion_rate": 0.0, + "spawn_signal": 0.4, + "structural_signal": 0.5, + "task_success_rate": 0.0, + "tool_efficiency": 1.0 + }, + "run_id": "run_0033", + "run_name": "fixed_levels_qwen_swarm" + } +] \ No newline at end of file diff --git a/datasets/fixed_levels/qwen_swarm_benchmark_fixed_levels.json b/datasets/fixed_levels/qwen_swarm_benchmark_fixed_levels.json new file mode 100644 index 0000000000000000000000000000000000000000..4052238522ffed2fd9e825b4e35aa3f8b4191250 --- /dev/null +++ b/datasets/fixed_levels/qwen_swarm_benchmark_fixed_levels.json @@ -0,0 +1,69 @@ +{ + "dashboard": "datasets/fixed_levels/dashboard_fixed_levels.html", + "record": { + "config": { + "max_agents": 3, + "max_breadth": 2, + "max_depth": 2, + "max_steps": 20, + "max_width": 2, + "seed": 2026, + "seeded_questions": 15, + "swarm_enabled": true + }, + "created_at": "2026-04-01T18:48:39+00:00", + "episodes": 15, + "metrics": { + "avg_compactness_reward": 0.0, + "avg_connectivity_gain_reward": 0.16666666666666666, + "avg_connectivity_reward": 0.16999999999999998, + "avg_diversity_reward": 0.1157777777777778, + "avg_entity_informativeness_reward": -0.08858065677817137, + "avg_format_reward": 0.14999999999999997, + "avg_graph_f1": 0.8492063492063492, + "avg_knowledge_carrier_reward": 0.5, + "avg_knowledge_indexing_reward": 0.052000000000000005, + "avg_relation_informativeness_reward": 0.07135858524047924, + "avg_reward": 4.197526826881651, + "avg_soft_shaping_reward": 0.24999999999999994, + "avg_spawn_count": 4.0, + "avg_spawn_critical_steps": 6.0, + "avg_steps_to_solution": 9.0, + "deanonymization_accuracy": 1.0, + "leaderboard_score": 0.8543934355282199, + "retrieval_signal": 0.6932, + "spawn_completion_rate": 1.0, + "spawn_signal": 0.6666666666666666, + "structural_signal": 0.5730889190257948, + "task_success_rate": 1.0, + "tool_efficiency": 0.5 + }, + "run_id": "run_0001", + "run_name": "fixed_levels_qwen_swarm" + }, + "summary": { + "avg_compactness_reward": 0.0, + "avg_connectivity_gain_reward": 0.16666666666666666, + "avg_connectivity_reward": 0.16999999999999998, + "avg_diversity_reward": 0.1157777777777778, + "avg_entity_informativeness_reward": -0.08858065677817137, + "avg_format_reward": 0.14999999999999997, + "avg_graph_f1": 0.8492063492063492, + "avg_knowledge_carrier_reward": 0.5, + "avg_knowledge_indexing_reward": 0.052000000000000005, + "avg_relation_informativeness_reward": 0.07135858524047924, + "avg_reward": 4.197526826881651, + "avg_soft_shaping_reward": 0.24999999999999994, + "avg_spawn_count": 4.0, + "avg_spawn_critical_steps": 6.0, + "avg_steps_to_solution": 9.0, + "deanonymization_accuracy": 1.0, + "leaderboard_score": 0.8543934355282199, + "retrieval_signal": 0.6932, + "spawn_completion_rate": 1.0, + "spawn_signal": 0.6666666666666666, + "structural_signal": 0.5730889190257948, + "task_success_rate": 1.0, + "tool_efficiency": 0.5 + } +} diff --git a/datasets/fixed_levels/qwen_swarm_eval_by_difficulty.json b/datasets/fixed_levels/qwen_swarm_eval_by_difficulty.json new file mode 100644 index 0000000000000000000000000000000000000000..1b77dc716eabbad385f7eca94371c2aac4b850a8 --- /dev/null +++ b/datasets/fixed_levels/qwen_swarm_eval_by_difficulty.json @@ -0,0 +1,53 @@ +{ + "by_difficulty": { + "easy": { + "avg_graph_f1": 1.0, + "avg_reward": 3.610490808845623, + "avg_steps": 9.0, + "avg_tool_calls": 4.0, + "episodes": 5, + "task_success_rate": 1.0 + }, + "high": { + "avg_graph_f1": 0.5476190476190477, + "avg_reward": 4.207102815893519, + "avg_steps": 9.0, + "avg_tool_calls": 4.0, + "episodes": 5, + "task_success_rate": 1.0 + }, + "mid": { + "avg_graph_f1": 1.0, + "avg_reward": 4.822687547070801, + "avg_steps": 9.0, + "avg_tool_calls": 4.0, + "episodes": 5, + "task_success_rate": 1.0 + } + }, + "overall": { + "avg_compactness_reward": 0.0, + "avg_connectivity_gain_reward": 0.16666666666666666, + "avg_connectivity_reward": 0.16999999999999998, + "avg_diversity_reward": 0.1157777777777778, + "avg_entity_informativeness_reward": -0.07289878447762359, + "avg_format_reward": 0.14999999999999997, + "avg_graph_f1": 0.8492063492063492, + "avg_knowledge_carrier_reward": 0.5, + "avg_knowledge_indexing_reward": 0.052000000000000005, + "avg_relation_informativeness_reward": 0.07157694332826091, + "avg_reward": 4.213427057269981, + "avg_soft_shaping_reward": 0.24999999999999994, + "avg_spawn_count": 4.0, + "avg_spawn_critical_steps": 6.0, + "avg_steps_to_solution": 9.0, + "deanonymization_accuracy": 1.0, + "leaderboard_score": 0.8546911504342771, + "retrieval_signal": 0.6932, + "spawn_completion_rate": 1.0, + "spawn_signal": 0.6666666666666666, + "structural_signal": 0.5762689651034608, + "task_success_rate": 1.0, + "tool_efficiency": 0.5 + } +} \ No newline at end of file diff --git a/datasets/fixed_levels/qwen_swarm_eval_fixed_levels.json b/datasets/fixed_levels/qwen_swarm_eval_fixed_levels.json new file mode 100644 index 0000000000000000000000000000000000000000..a467bedc91c9276db4c393c4b633b8150ee71baf --- /dev/null +++ b/datasets/fixed_levels/qwen_swarm_eval_fixed_levels.json @@ -0,0 +1,25 @@ +{ + "avg_compactness_reward": 0.0, + "avg_connectivity_gain_reward": 0.16666666666666666, + "avg_connectivity_reward": 0.16999999999999998, + "avg_diversity_reward": 0.1157777777777778, + "avg_entity_informativeness_reward": -0.02824631570420193, + "avg_format_reward": 0.14999999999999997, + "avg_graph_f1": 0.8492063492063492, + "avg_knowledge_carrier_reward": 0.5, + "avg_knowledge_indexing_reward": 0.07400000000000001, + "avg_relation_informativeness_reward": 0.06905976285357758, + "avg_reward": 4.285384567790942, + "avg_soft_shaping_reward": 0.24999999999999994, + "avg_spawn_count": 4.0, + "avg_spawn_critical_steps": 6.0, + "avg_steps_to_solution": 9.0, + "deanonymization_accuracy": 1.0, + "leaderboard_score": 0.8565775118852701, + "retrieval_signal": 0.7009000000000001, + "spawn_completion_rate": 1.0, + "spawn_signal": 0.6666666666666666, + "structural_signal": 0.5846960227632085, + "task_success_rate": 1.0, + "tool_efficiency": 0.5 +} diff --git a/datasets/fixed_levels/seed_fixed_levels.json b/datasets/fixed_levels/seed_fixed_levels.json new file mode 100644 index 0000000000000000000000000000000000000000..9424b00b5f3efbc1cffcde87366ac70b23750ea2 --- /dev/null +++ b/datasets/fixed_levels/seed_fixed_levels.json @@ -0,0 +1,6977 @@ +{ + "seeding": { + "seeded_nodes": [ + { + "node_id": "user_aria", + "node_type": "user", + "attrs": { + "name": "Aria Sen", + "org": "Helios Labs", + "location": "Sector 9" + } + }, + { + "node_id": "user_bharat", + "node_type": "user", + "attrs": { + "name": "Bharat Kulkarni", + "org": "Northbridge Logistics", + "location": "Dockyard 17" + } + }, + { + "node_id": "user_cyrus", + "node_type": "user", + "attrs": { + "name": "Cyrus Mehta", + "org": "Apex Dynamics", + "location": "Old Town" + } + }, + { + "node_id": "user_diya", + "node_type": "user", + "attrs": { + "name": "Diya Roy", + "org": "Blueharbor Media", + "location": "Old Town" + } + }, + { + "node_id": "user_elin", + "node_type": "user", + "attrs": { + "name": "Elin Das", + "org": "Helios Labs", + "location": "Sector 9" + } + }, + { + "node_id": "user_faris", + "node_type": "user", + "attrs": { + "name": "Faris Noor", + "org": "Tidewatch Ops", + "location": "Rivergate" + } + }, + { + "node_id": "user_gita", + "node_type": "user", + "attrs": { + "name": "Gita Pradhan", + "org": "Apex Dynamics", + "location": "Old Town" + } + }, + { + "node_id": "user_hiro", + "node_type": "user", + "attrs": { + "name": "Hiro Tan", + "org": "Northbridge Logistics", + "location": "Dockyard 17" + } + }, + { + "node_id": "user_ivy", + "node_type": "user", + "attrs": { + "name": "Ivy Kapoor", + "org": "Kestrel Works", + "location": "Rivergate" + } + }, + { + "node_id": "user_jules", + "node_type": "user", + "attrs": { + "name": "Jules Banerjee", + "org": "Blueharbor Media", + "location": "Old Town" + } + }, + { + "node_id": "user_kian", + "node_type": "user", + "attrs": { + "name": "Kian Bose", + "org": "Atlas Freight", + "location": "East Quay" + } + }, + { + "node_id": "user_leena", + "node_type": "user", + "attrs": { + "name": "Leena Das", + "org": "Sunmesh Analytics", + "location": "Sector 9" + } + }, + { + "node_id": "user_mika", + "node_type": "user", + "attrs": { + "name": "Mika Solanki", + "org": "Orion Customs", + "location": "North Basin" + } + }, + { + "node_id": "user_nora", + "node_type": "user", + "attrs": { + "name": "Nora Iqbal", + "org": "Emberline Security", + "location": "Foundry Row" + } + }, + { + "node_id": "user_omar", + "node_type": "user", + "attrs": { + "name": "Omar Sheikh", + "org": "Atlas Freight", + "location": "East Quay" + } + }, + { + "node_id": "user_priya", + "node_type": "user", + "attrs": { + "name": "Priya Menon", + "org": "Sunmesh Analytics", + "location": "Sector 9" + } + }, + { + "node_id": "user_quinn", + "node_type": "user", + "attrs": { + "name": "Quinn Rao", + "org": "Orion Customs", + "location": "North Basin" + } + }, + { + "node_id": "user_rhea", + "node_type": "user", + "attrs": { + "name": "Rhea Kapoor", + "org": "Emberline Security", + "location": "Foundry Row" + } + }, + { + "node_id": "user_soren", + "node_type": "user", + "attrs": { + "name": "Soren Malik", + "org": "Harborlight Transit", + "location": "Uplink Yard" + } + }, + { + "node_id": "user_tara", + "node_type": "user", + "attrs": { + "name": "Tara Dey", + "org": "Harborlight Transit", + "location": "Uplink Yard" + } + }, + { + "node_id": "alias_orchidfox", + "node_type": "alias", + "attrs": { + "handle": "@orchidfox" + } + }, + { + "node_id": "alias_steelquill", + "node_type": "alias", + "attrs": { + "handle": "@steelquill" + } + }, + { + "node_id": "alias_monsoonbyte", + "node_type": "alias", + "attrs": { + "handle": "@monsoonbyte" + } + }, + { + "node_id": "alias_nightrelay", + "node_type": "alias", + "attrs": { + "handle": "@nightrelay" + } + }, + { + "node_id": "alias_mapleghost", + "node_type": "alias", + "attrs": { + "handle": "@mapleghost" + } + }, + { + "node_id": "alias_docksparrow", + "node_type": "alias", + "attrs": { + "handle": "@docksparrow" + } + }, + { + "node_id": "alias_quartzlotus", + "node_type": "alias", + "attrs": { + "handle": "@quartzlotus" + } + }, + { + "node_id": "alias_emberglass", + "node_type": "alias", + "attrs": { + "handle": "@emberglass" + } + }, + { + "node_id": "alias_basinraven", + "node_type": "alias", + "attrs": { + "handle": "@basinraven" + } + }, + { + "node_id": "alias_tideshard", + "node_type": "alias", + "attrs": { + "handle": "@tideshard" + } + }, + { + "node_id": "alias_hollowsignal", + "node_type": "alias", + "attrs": { + "handle": "@hollowsignal" + } + }, + { + "node_id": "alias_ironwhisper", + "node_type": "alias", + "attrs": { + "handle": "@ironwhisper" + } + }, + { + "node_id": "alias_cinderveil", + "node_type": "alias", + "attrs": { + "handle": "@cinderveil" + } + }, + { + "node_id": "alias_sablekeel", + "node_type": "alias", + "attrs": { + "handle": "@sablekeel" + } + }, + { + "node_id": "alias_lanternmoth", + "node_type": "alias", + "attrs": { + "handle": "@lanternmoth" + } + }, + { + "node_id": "alias_frostledger", + "node_type": "alias", + "attrs": { + "handle": "@frostledger" + } + }, + { + "node_id": "org_helios_labs", + "node_type": "org", + "attrs": { + "name": "Helios Labs" + } + }, + { + "node_id": "org_northbridge_logistics", + "node_type": "org", + "attrs": { + "name": "Northbridge Logistics" + } + }, + { + "node_id": "org_apex_dynamics", + "node_type": "org", + "attrs": { + "name": "Apex Dynamics" + } + }, + { + "node_id": "org_blueharbor_media", + "node_type": "org", + "attrs": { + "name": "Blueharbor Media" + } + }, + { + "node_id": "org_tidewatch_ops", + "node_type": "org", + "attrs": { + "name": "Tidewatch Ops" + } + }, + { + "node_id": "org_kestrel_works", + "node_type": "org", + "attrs": { + "name": "Kestrel Works" + } + }, + { + "node_id": "org_atlas_freight", + "node_type": "org", + "attrs": { + "name": "Atlas Freight" + } + }, + { + "node_id": "org_sunmesh_analytics", + "node_type": "org", + "attrs": { + "name": "Sunmesh Analytics" + } + }, + { + "node_id": "org_orion_customs", + "node_type": "org", + "attrs": { + "name": "Orion Customs" + } + }, + { + "node_id": "org_emberline_security", + "node_type": "org", + "attrs": { + "name": "Emberline Security" + } + }, + { + "node_id": "org_harborlight_transit", + "node_type": "org", + "attrs": { + "name": "Harborlight Transit" + } + }, + { + "node_id": "loc_dockyard17", + "node_type": "location", + "attrs": { + "name": "Dockyard 17" + } + }, + { + "node_id": "loc_sector9", + "node_type": "location", + "attrs": { + "name": "Sector 9" + } + }, + { + "node_id": "loc_old_town", + "node_type": "location", + "attrs": { + "name": "Old Town" + } + }, + { + "node_id": "loc_rivergate", + "node_type": "location", + "attrs": { + "name": "Rivergate" + } + }, + { + "node_id": "loc_east_quay", + "node_type": "location", + "attrs": { + "name": "East Quay" + } + }, + { + "node_id": "loc_foundry_row", + "node_type": "location", + "attrs": { + "name": "Foundry Row" + } + }, + { + "node_id": "loc_north_basin", + "node_type": "location", + "attrs": { + "name": "North Basin" + } + }, + { + "node_id": "loc_uplink_yard", + "node_type": "location", + "attrs": { + "name": "Uplink Yard" + } + }, + { + "node_id": "event_project_lantern", + "node_type": "event", + "attrs": { + "name": "Project Lantern" + } + }, + { + "node_id": "event_black_kite", + "node_type": "event", + "attrs": { + "name": "Black Kite" + } + }, + { + "node_id": "event_silent_current", + "node_type": "event", + "attrs": { + "name": "Silent Current" + } + }, + { + "node_id": "event_amber_veil", + "node_type": "event", + "attrs": { + "name": "Amber Veil" + } + }, + { + "node_id": "event_glass_harbor", + "node_type": "event", + "attrs": { + "name": "Glass Harbor" + } + }, + { + "node_id": "event_ember_tide", + "node_type": "event", + "attrs": { + "name": "Ember Tide" + } + }, + { + "node_id": "event_iron_wharf", + "node_type": "event", + "attrs": { + "name": "Iron Wharf" + } + }, + { + "node_id": "event_ghost_signal", + "node_type": "event", + "attrs": { + "name": "Ghost Signal" + } + }, + { + "node_id": "thr_supply_leak", + "node_type": "thread", + "attrs": { + "topic": "supply_chain" + } + }, + { + "node_id": "thr_port_audit", + "node_type": "thread", + "attrs": { + "topic": "port_audit" + } + }, + { + "node_id": "thr_customs_breach", + "node_type": "thread", + "attrs": { + "topic": "customs_breach" + } + }, + { + "node_id": "thr_relay_map", + "node_type": "thread", + "attrs": { + "topic": "relay_map" + } + }, + { + "node_id": "thr_foundry_watch", + "node_type": "thread", + "attrs": { + "topic": "foundry_watch" + } + }, + { + "node_id": "thr_basin_shift", + "node_type": "thread", + "attrs": { + "topic": "basin_shift" + } + }, + { + "node_id": "thr_quiet_manifest", + "node_type": "thread", + "attrs": { + "topic": "quiet_manifest" + } + }, + { + "node_id": "thr_uplink_route", + "node_type": "thread", + "attrs": { + "topic": "uplink_route" + } + }, + { + "node_id": "thr_ember_tide_watch", + "node_type": "thread", + "attrs": { + "topic": "ember_tide" + } + }, + { + "node_id": "thr_ghost_signal_net", + "node_type": "thread", + "attrs": { + "topic": "ghost_signal" + } + }, + { + "node_id": "post_shift_roster", + "node_type": "post", + "attrs": { + "channel": "microblog" + } + }, + { + "node_id": "post_midnight_manifest", + "node_type": "post", + "attrs": { + "channel": "microblog" + } + }, + { + "node_id": "post_sat_phone_ping", + "node_type": "post", + "attrs": { + "channel": "microblog" + } + }, + { + "node_id": "post_drone_parts", + "node_type": "post", + "attrs": { + "channel": "microblog" + } + }, + { + "node_id": "post_relay_schedule", + "node_type": "post", + "attrs": { + "channel": "microblog" + } + }, + { + "node_id": "post_quay_ledgers", + "node_type": "post", + "attrs": { + "channel": "microblog" + } + }, + { + "node_id": "post_customs_tag", + "node_type": "post", + "attrs": { + "channel": "microblog" + } + }, + { + "node_id": "post_hull_signal", + "node_type": "post", + "attrs": { + "channel": "microblog" + } + }, + { + "node_id": "post_basin_photo", + "node_type": "post", + "attrs": { + "channel": "microblog" + } + }, + { + "node_id": "post_foundry_map", + "node_type": "post", + "attrs": { + "channel": "microblog" + } + }, + { + "node_id": "post_lantern_route", + "node_type": "post", + "attrs": { + "channel": "microblog" + } + }, + { + "node_id": "post_uplink_note", + "node_type": "post", + "attrs": { + "channel": "microblog" + } + } + ], + "seeded_edges": [ + { + "src": "alias_orchidfox", + "rel": "alias_of", + "dst": "user_ivy", + "confidence": 1.0 + }, + { + "src": "alias_steelquill", + "rel": "alias_of", + "dst": "user_bharat", + "confidence": 1.0 + }, + { + "src": "alias_monsoonbyte", + "rel": "alias_of", + "dst": "user_diya", + "confidence": 1.0 + }, + { + "src": "alias_nightrelay", + "rel": "alias_of", + "dst": "user_faris", + "confidence": 1.0 + }, + { + "src": "alias_mapleghost", + "rel": "alias_of", + "dst": "user_elin", + "confidence": 1.0 + }, + { + "src": "alias_docksparrow", + "rel": "alias_of", + "dst": "user_hiro", + "confidence": 1.0 + }, + { + "src": "alias_quartzlotus", + "rel": "alias_of", + "dst": "user_cyrus", + "confidence": 1.0 + }, + { + "src": "alias_emberglass", + "rel": "alias_of", + "dst": "user_nora", + "confidence": 1.0 + }, + { + "src": "alias_basinraven", + "rel": "alias_of", + "dst": "user_mika", + "confidence": 1.0 + }, + { + "src": "alias_tideshard", + "rel": "alias_of", + "dst": "user_soren", + "confidence": 1.0 + }, + { + "src": "alias_hollowsignal", + "rel": "alias_of", + "dst": "user_priya", + "confidence": 1.0 + }, + { + "src": "alias_ironwhisper", + "rel": "alias_of", + "dst": "user_omar", + "confidence": 1.0 + }, + { + "src": "alias_cinderveil", + "rel": "alias_of", + "dst": "user_rhea", + "confidence": 1.0 + }, + { + "src": "alias_sablekeel", + "rel": "alias_of", + "dst": "user_tara", + "confidence": 1.0 + }, + { + "src": "alias_lanternmoth", + "rel": "alias_of", + "dst": "user_kian", + "confidence": 1.0 + }, + { + "src": "alias_frostledger", + "rel": "alias_of", + "dst": "user_leena", + "confidence": 1.0 + }, + { + "src": "user_aria", + "rel": "works_at", + "dst": "org_helios_labs", + "confidence": 1.0 + }, + { + "src": "user_aria", + "rel": "located_in", + "dst": "loc_sector9", + "confidence": 1.0 + }, + { + "src": "user_bharat", + "rel": "works_at", + "dst": "org_northbridge_logistics", + "confidence": 1.0 + }, + { + "src": "user_bharat", + "rel": "located_in", + "dst": "loc_dockyard17", + "confidence": 1.0 + }, + { + "src": "user_cyrus", + "rel": "works_at", + "dst": "org_apex_dynamics", + "confidence": 1.0 + }, + { + "src": "user_cyrus", + "rel": "located_in", + "dst": "loc_old_town", + "confidence": 1.0 + }, + { + "src": "user_diya", + "rel": "works_at", + "dst": "org_blueharbor_media", + "confidence": 1.0 + }, + { + "src": "user_diya", + "rel": "located_in", + "dst": "loc_old_town", + "confidence": 1.0 + }, + { + "src": "user_elin", + "rel": "works_at", + "dst": "org_helios_labs", + "confidence": 1.0 + }, + { + "src": "user_elin", + "rel": "located_in", + "dst": "loc_sector9", + "confidence": 1.0 + }, + { + "src": "user_faris", + "rel": "works_at", + "dst": "org_tidewatch_ops", + "confidence": 1.0 + }, + { + "src": "user_faris", + "rel": "located_in", + "dst": "loc_rivergate", + "confidence": 1.0 + }, + { + "src": "user_gita", + "rel": "works_at", + "dst": "org_apex_dynamics", + "confidence": 1.0 + }, + { + "src": "user_gita", + "rel": "located_in", + "dst": "loc_old_town", + "confidence": 1.0 + }, + { + "src": "user_hiro", + "rel": "works_at", + "dst": "org_northbridge_logistics", + "confidence": 1.0 + }, + { + "src": "user_hiro", + "rel": "located_in", + "dst": "loc_dockyard17", + "confidence": 1.0 + }, + { + "src": "user_ivy", + "rel": "works_at", + "dst": "org_kestrel_works", + "confidence": 1.0 + }, + { + "src": "user_ivy", + "rel": "located_in", + "dst": "loc_rivergate", + "confidence": 1.0 + }, + { + "src": "user_jules", + "rel": "works_at", + "dst": "org_blueharbor_media", + "confidence": 1.0 + }, + { + "src": "user_jules", + "rel": "located_in", + "dst": "loc_old_town", + "confidence": 1.0 + }, + { + "src": "user_kian", + "rel": "works_at", + "dst": "org_atlas_freight", + "confidence": 1.0 + }, + { + "src": "user_kian", + "rel": "located_in", + "dst": "loc_east_quay", + "confidence": 1.0 + }, + { + "src": "user_leena", + "rel": "works_at", + "dst": "org_sunmesh_analytics", + "confidence": 1.0 + }, + { + "src": "user_leena", + "rel": "located_in", + "dst": "loc_sector9", + "confidence": 1.0 + }, + { + "src": "user_mika", + "rel": "works_at", + "dst": "org_orion_customs", + "confidence": 1.0 + }, + { + "src": "user_mika", + "rel": "located_in", + "dst": "loc_north_basin", + "confidence": 1.0 + }, + { + "src": "user_nora", + "rel": "works_at", + "dst": "org_emberline_security", + "confidence": 1.0 + }, + { + "src": "user_nora", + "rel": "located_in", + "dst": "loc_foundry_row", + "confidence": 1.0 + }, + { + "src": "user_omar", + "rel": "works_at", + "dst": "org_atlas_freight", + "confidence": 1.0 + }, + { + "src": "user_omar", + "rel": "located_in", + "dst": "loc_east_quay", + "confidence": 1.0 + }, + { + "src": "user_priya", + "rel": "works_at", + "dst": "org_sunmesh_analytics", + "confidence": 1.0 + }, + { + "src": "user_priya", + "rel": "located_in", + "dst": "loc_sector9", + "confidence": 1.0 + }, + { + "src": "user_quinn", + "rel": "works_at", + "dst": "org_orion_customs", + "confidence": 1.0 + }, + { + "src": "user_quinn", + "rel": "located_in", + "dst": "loc_north_basin", + "confidence": 1.0 + }, + { + "src": "user_rhea", + "rel": "works_at", + "dst": "org_emberline_security", + "confidence": 1.0 + }, + { + "src": "user_rhea", + "rel": "located_in", + "dst": "loc_foundry_row", + "confidence": 1.0 + }, + { + "src": "user_soren", + "rel": "works_at", + "dst": "org_harborlight_transit", + "confidence": 1.0 + }, + { + "src": "user_soren", + "rel": "located_in", + "dst": "loc_uplink_yard", + "confidence": 1.0 + }, + { + "src": "user_tara", + "rel": "works_at", + "dst": "org_harborlight_transit", + "confidence": 1.0 + }, + { + "src": "user_tara", + "rel": "located_in", + "dst": "loc_uplink_yard", + "confidence": 1.0 + }, + { + "src": "org_helios_labs", + "rel": "operates_in", + "dst": "loc_sector9", + "confidence": 1.0 + }, + { + "src": "org_northbridge_logistics", + "rel": "operates_in", + "dst": "loc_dockyard17", + "confidence": 1.0 + }, + { + "src": "org_apex_dynamics", + "rel": "operates_in", + "dst": "loc_old_town", + "confidence": 1.0 + }, + { + "src": "org_blueharbor_media", + "rel": "operates_in", + "dst": "loc_old_town", + "confidence": 1.0 + }, + { + "src": "org_tidewatch_ops", + "rel": "operates_in", + "dst": "loc_rivergate", + "confidence": 1.0 + }, + { + "src": "org_kestrel_works", + "rel": "operates_in", + "dst": "loc_rivergate", + "confidence": 1.0 + }, + { + "src": "org_atlas_freight", + "rel": "operates_in", + "dst": "loc_east_quay", + "confidence": 1.0 + }, + { + "src": "org_sunmesh_analytics", + "rel": "operates_in", + "dst": "loc_sector9", + "confidence": 1.0 + }, + { + "src": "org_orion_customs", + "rel": "operates_in", + "dst": "loc_north_basin", + "confidence": 1.0 + }, + { + "src": "org_emberline_security", + "rel": "operates_in", + "dst": "loc_foundry_row", + "confidence": 1.0 + }, + { + "src": "org_harborlight_transit", + "rel": "operates_in", + "dst": "loc_uplink_yard", + "confidence": 1.0 + }, + { + "src": "user_ivy", + "rel": "connected_to", + "dst": "user_bharat", + "confidence": 0.95 + }, + { + "src": "user_bharat", + "rel": "connected_to", + "dst": "user_hiro", + "confidence": 0.95 + }, + { + "src": "user_hiro", + "rel": "connected_to", + "dst": "user_faris", + "confidence": 0.92 + }, + { + "src": "user_faris", + "rel": "connected_to", + "dst": "user_diya", + "confidence": 0.9 + }, + { + "src": "user_diya", + "rel": "connected_to", + "dst": "user_elin", + "confidence": 0.89 + }, + { + "src": "user_elin", + "rel": "connected_to", + "dst": "user_aria", + "confidence": 0.87 + }, + { + "src": "user_aria", + "rel": "connected_to", + "dst": "user_cyrus", + "confidence": 0.84 + }, + { + "src": "user_cyrus", + "rel": "connected_to", + "dst": "user_gita", + "confidence": 0.83 + }, + { + "src": "user_gita", + "rel": "connected_to", + "dst": "user_jules", + "confidence": 0.82 + }, + { + "src": "user_jules", + "rel": "connected_to", + "dst": "user_bharat", + "confidence": 0.81 + }, + { + "src": "user_diya", + "rel": "connected_to", + "dst": "user_ivy", + "confidence": 0.9 + }, + { + "src": "user_ivy", + "rel": "connected_to", + "dst": "user_elin", + "confidence": 0.86 + }, + { + "src": "user_kian", + "rel": "connected_to", + "dst": "user_omar", + "confidence": 0.93 + }, + { + "src": "user_omar", + "rel": "connected_to", + "dst": "user_mika", + "confidence": 0.9 + }, + { + "src": "user_mika", + "rel": "connected_to", + "dst": "user_quinn", + "confidence": 0.89 + }, + { + "src": "user_quinn", + "rel": "connected_to", + "dst": "user_nora", + "confidence": 0.88 + }, + { + "src": "user_nora", + "rel": "connected_to", + "dst": "user_rhea", + "confidence": 0.87 + }, + { + "src": "user_rhea", + "rel": "connected_to", + "dst": "user_soren", + "confidence": 0.86 + }, + { + "src": "user_soren", + "rel": "connected_to", + "dst": "user_tara", + "confidence": 0.86 + }, + { + "src": "user_tara", + "rel": "connected_to", + "dst": "user_kian", + "confidence": 0.84 + }, + { + "src": "user_priya", + "rel": "connected_to", + "dst": "user_leena", + "confidence": 0.91 + }, + { + "src": "user_leena", + "rel": "connected_to", + "dst": "user_aria", + "confidence": 0.83 + }, + { + "src": "user_priya", + "rel": "connected_to", + "dst": "user_nora", + "confidence": 0.82 + }, + { + "src": "user_kian", + "rel": "connected_to", + "dst": "user_bharat", + "confidence": 0.8 + }, + { + "src": "user_soren", + "rel": "connected_to", + "dst": "user_faris", + "confidence": 0.79 + }, + { + "src": "user_quinn", + "rel": "connected_to", + "dst": "user_hiro", + "confidence": 0.78 + }, + { + "src": "alias_orchidfox", + "rel": "authored_post", + "dst": "post_midnight_manifest", + "confidence": 1.0 + }, + { + "src": "alias_docksparrow", + "rel": "authored_post", + "dst": "post_shift_roster", + "confidence": 1.0 + }, + { + "src": "alias_nightrelay", + "rel": "authored_post", + "dst": "post_sat_phone_ping", + "confidence": 1.0 + }, + { + "src": "alias_monsoonbyte", + "rel": "authored_post", + "dst": "post_drone_parts", + "confidence": 1.0 + }, + { + "src": "alias_steelquill", + "rel": "authored_post", + "dst": "post_relay_schedule", + "confidence": 1.0 + }, + { + "src": "alias_lanternmoth", + "rel": "authored_post", + "dst": "post_quay_ledgers", + "confidence": 1.0 + }, + { + "src": "alias_basinraven", + "rel": "authored_post", + "dst": "post_customs_tag", + "confidence": 1.0 + }, + { + "src": "alias_tideshard", + "rel": "authored_post", + "dst": "post_hull_signal", + "confidence": 1.0 + }, + { + "src": "alias_emberglass", + "rel": "authored_post", + "dst": "post_basin_photo", + "confidence": 1.0 + }, + { + "src": "alias_cinderveil", + "rel": "authored_post", + "dst": "post_foundry_map", + "confidence": 1.0 + }, + { + "src": "alias_frostledger", + "rel": "authored_post", + "dst": "post_lantern_route", + "confidence": 1.0 + }, + { + "src": "alias_sablekeel", + "rel": "authored_post", + "dst": "post_uplink_note", + "confidence": 1.0 + }, + { + "src": "post_midnight_manifest", + "rel": "references", + "dst": "loc_dockyard17", + "confidence": 1.0 + }, + { + "src": "post_midnight_manifest", + "rel": "references", + "dst": "event_project_lantern", + "confidence": 1.0 + }, + { + "src": "post_shift_roster", + "rel": "references", + "dst": "loc_dockyard17", + "confidence": 1.0 + }, + { + "src": "post_shift_roster", + "rel": "references", + "dst": "org_northbridge_logistics", + "confidence": 1.0 + }, + { + "src": "post_sat_phone_ping", + "rel": "references", + "dst": "loc_rivergate", + "confidence": 1.0 + }, + { + "src": "post_sat_phone_ping", + "rel": "references", + "dst": "event_project_lantern", + "confidence": 1.0 + }, + { + "src": "post_drone_parts", + "rel": "references", + "dst": "event_black_kite", + "confidence": 1.0 + }, + { + "src": "post_drone_parts", + "rel": "references", + "dst": "org_kestrel_works", + "confidence": 1.0 + }, + { + "src": "post_relay_schedule", + "rel": "references", + "dst": "event_project_lantern", + "confidence": 1.0 + }, + { + "src": "post_relay_schedule", + "rel": "references", + "dst": "loc_sector9", + "confidence": 1.0 + }, + { + "src": "post_quay_ledgers", + "rel": "references", + "dst": "loc_east_quay", + "confidence": 1.0 + }, + { + "src": "post_quay_ledgers", + "rel": "references", + "dst": "event_glass_harbor", + "confidence": 1.0 + }, + { + "src": "post_customs_tag", + "rel": "references", + "dst": "loc_north_basin", + "confidence": 1.0 + }, + { + "src": "post_customs_tag", + "rel": "references", + "dst": "event_iron_wharf", + "confidence": 1.0 + }, + { + "src": "post_hull_signal", + "rel": "references", + "dst": "loc_uplink_yard", + "confidence": 1.0 + }, + { + "src": "post_hull_signal", + "rel": "references", + "dst": "event_ghost_signal", + "confidence": 1.0 + }, + { + "src": "post_basin_photo", + "rel": "references", + "dst": "loc_foundry_row", + "confidence": 1.0 + }, + { + "src": "post_basin_photo", + "rel": "references", + "dst": "event_amber_veil", + "confidence": 1.0 + }, + { + "src": "post_foundry_map", + "rel": "references", + "dst": "loc_foundry_row", + "confidence": 1.0 + }, + { + "src": "post_foundry_map", + "rel": "references", + "dst": "event_ember_tide", + "confidence": 1.0 + }, + { + "src": "post_lantern_route", + "rel": "references", + "dst": "event_project_lantern", + "confidence": 1.0 + }, + { + "src": "post_lantern_route", + "rel": "references", + "dst": "org_sunmesh_analytics", + "confidence": 1.0 + }, + { + "src": "post_uplink_note", + "rel": "references", + "dst": "loc_uplink_yard", + "confidence": 1.0 + }, + { + "src": "post_uplink_note", + "rel": "references", + "dst": "org_harborlight_transit", + "confidence": 1.0 + }, + { + "src": "user_diya", + "rel": "authored_thread", + "dst": "thr_supply_leak", + "confidence": 1.0 + }, + { + "src": "user_jules", + "rel": "authored_thread", + "dst": "thr_port_audit", + "confidence": 1.0 + }, + { + "src": "user_mika", + "rel": "authored_thread", + "dst": "thr_customs_breach", + "confidence": 1.0 + }, + { + "src": "user_leena", + "rel": "authored_thread", + "dst": "thr_relay_map", + "confidence": 1.0 + }, + { + "src": "user_nora", + "rel": "authored_thread", + "dst": "thr_foundry_watch", + "confidence": 1.0 + }, + { + "src": "user_quinn", + "rel": "authored_thread", + "dst": "thr_basin_shift", + "confidence": 1.0 + }, + { + "src": "user_kian", + "rel": "authored_thread", + "dst": "thr_quiet_manifest", + "confidence": 1.0 + }, + { + "src": "user_soren", + "rel": "authored_thread", + "dst": "thr_uplink_route", + "confidence": 1.0 + }, + { + "src": "user_rhea", + "rel": "authored_thread", + "dst": "thr_ember_tide_watch", + "confidence": 1.0 + }, + { + "src": "user_tara", + "rel": "authored_thread", + "dst": "thr_ghost_signal_net", + "confidence": 1.0 + }, + { + "src": "thr_supply_leak", + "rel": "discusses", + "dst": "event_project_lantern", + "confidence": 1.0 + }, + { + "src": "thr_supply_leak", + "rel": "references", + "dst": "org_northbridge_logistics", + "confidence": 1.0 + }, + { + "src": "thr_port_audit", + "rel": "discusses", + "dst": "event_black_kite", + "confidence": 1.0 + }, + { + "src": "thr_port_audit", + "rel": "references", + "dst": "org_kestrel_works", + "confidence": 1.0 + }, + { + "src": "thr_customs_breach", + "rel": "discusses", + "dst": "event_iron_wharf", + "confidence": 1.0 + }, + { + "src": "thr_customs_breach", + "rel": "references", + "dst": "org_orion_customs", + "confidence": 1.0 + }, + { + "src": "thr_relay_map", + "rel": "discusses", + "dst": "event_project_lantern", + "confidence": 1.0 + }, + { + "src": "thr_relay_map", + "rel": "references", + "dst": "org_sunmesh_analytics", + "confidence": 1.0 + }, + { + "src": "thr_foundry_watch", + "rel": "discusses", + "dst": "event_ember_tide", + "confidence": 1.0 + }, + { + "src": "thr_foundry_watch", + "rel": "references", + "dst": "org_emberline_security", + "confidence": 1.0 + }, + { + "src": "thr_basin_shift", + "rel": "discusses", + "dst": "event_amber_veil", + "confidence": 1.0 + }, + { + "src": "thr_basin_shift", + "rel": "references", + "dst": "loc_north_basin", + "confidence": 1.0 + }, + { + "src": "thr_quiet_manifest", + "rel": "discusses", + "dst": "event_glass_harbor", + "confidence": 1.0 + }, + { + "src": "thr_quiet_manifest", + "rel": "references", + "dst": "org_atlas_freight", + "confidence": 1.0 + }, + { + "src": "thr_uplink_route", + "rel": "discusses", + "dst": "event_ghost_signal", + "confidence": 1.0 + }, + { + "src": "thr_uplink_route", + "rel": "references", + "dst": "org_harborlight_transit", + "confidence": 1.0 + }, + { + "src": "thr_ember_tide_watch", + "rel": "discusses", + "dst": "event_ember_tide", + "confidence": 1.0 + }, + { + "src": "thr_ember_tide_watch", + "rel": "references", + "dst": "loc_foundry_row", + "confidence": 1.0 + }, + { + "src": "thr_ghost_signal_net", + "rel": "discusses", + "dst": "event_ghost_signal", + "confidence": 1.0 + }, + { + "src": "thr_ghost_signal_net", + "rel": "references", + "dst": "loc_uplink_yard", + "confidence": 1.0 + }, + { + "src": "user_bharat", + "rel": "collaborates_on", + "dst": "event_project_lantern", + "confidence": 0.9 + }, + { + "src": "user_hiro", + "rel": "collaborates_on", + "dst": "event_project_lantern", + "confidence": 0.9 + }, + { + "src": "user_faris", + "rel": "collaborates_on", + "dst": "event_project_lantern", + "confidence": 0.9 + }, + { + "src": "user_diya", + "rel": "investigates", + "dst": "event_project_lantern", + "confidence": 0.9 + }, + { + "src": "user_leena", + "rel": "monitors", + "dst": "event_project_lantern", + "confidence": 0.9 + }, + { + "src": "user_ivy", + "rel": "collaborates_on", + "dst": "event_black_kite", + "confidence": 0.9 + }, + { + "src": "user_cyrus", + "rel": "collaborates_on", + "dst": "event_black_kite", + "confidence": 0.9 + }, + { + "src": "user_elin", + "rel": "investigates", + "dst": "event_black_kite", + "confidence": 0.9 + }, + { + "src": "user_jules", + "rel": "reports_on", + "dst": "event_black_kite", + "confidence": 0.9 + }, + { + "src": "user_kian", + "rel": "collaborates_on", + "dst": "event_glass_harbor", + "confidence": 0.9 + }, + { + "src": "user_omar", + "rel": "collaborates_on", + "dst": "event_glass_harbor", + "confidence": 0.9 + }, + { + "src": "user_priya", + "rel": "monitors", + "dst": "event_glass_harbor", + "confidence": 0.9 + }, + { + "src": "user_mika", + "rel": "collaborates_on", + "dst": "event_iron_wharf", + "confidence": 0.9 + }, + { + "src": "user_quinn", + "rel": "collaborates_on", + "dst": "event_iron_wharf", + "confidence": 0.9 + }, + { + "src": "user_nora", + "rel": "investigates", + "dst": "event_amber_veil", + "confidence": 0.9 + }, + { + "src": "user_rhea", + "rel": "collaborates_on", + "dst": "event_ember_tide", + "confidence": 0.9 + }, + { + "src": "user_soren", + "rel": "collaborates_on", + "dst": "event_ghost_signal", + "confidence": 0.9 + }, + { + "src": "user_tara", + "rel": "reports_on", + "dst": "event_ghost_signal", + "confidence": 0.9 + }, + { + "src": "user_gita", + "rel": "monitors", + "dst": "event_silent_current", + "confidence": 0.9 + }, + { + "src": "user_jules", + "rel": "reports_on", + "dst": "event_silent_current", + "confidence": 0.9 + }, + { + "src": "event_project_lantern", + "rel": "connected_to", + "dst": "event_glass_harbor", + "confidence": 0.77 + }, + { + "src": "event_black_kite", + "rel": "connected_to", + "dst": "event_amber_veil", + "confidence": 0.77 + }, + { + "src": "event_ember_tide", + "rel": "connected_to", + "dst": "event_ghost_signal", + "confidence": 0.77 + }, + { + "src": "org_atlas_freight", + "rel": "connected_to", + "dst": "org_northbridge_logistics", + "confidence": 0.77 + }, + { + "src": "org_orion_customs", + "rel": "connected_to", + "dst": "org_emberline_security", + "confidence": 0.77 + }, + { + "src": "org_harborlight_transit", + "rel": "connected_to", + "dst": "org_tidewatch_ops", + "confidence": 0.77 + } + ], + "seeded_questions": [ + { + "task_type": "fixed_trace", + "question": "alias_orchidfox -> post_midnight_manifest -> loc_dockyard17 -> connected collaborator on event_project_lantern. Who is it?", + "answer": "user_bharat", + "supporting_edges": [ + { + "src": "alias_orchidfox", + "rel": "alias_of", + "dst": "user_ivy", + "confidence": 1.0 + }, + { + "src": "alias_orchidfox", + "rel": "authored_post", + "dst": "post_midnight_manifest", + "confidence": 1.0 + }, + { + "src": "post_midnight_manifest", + "rel": "references", + "dst": "loc_dockyard17", + "confidence": 1.0 + }, + { + "src": "user_ivy", + "rel": "connected_to", + "dst": "user_bharat", + "confidence": 0.95 + }, + { + "src": "user_bharat", + "rel": "collaborates_on", + "dst": "event_project_lantern", + "confidence": 0.9 + } + ], + "metadata": { + "difficulty": "easy", + "difficulty_level": 1, + "question_id": "easy_01", + "support_nodes": 6 + } + }, + { + "task_type": "fixed_trace", + "question": "thr_supply_leak references org_northbridge_logistics. Which alias_docksparrow user works there and collaborates on event_project_lantern?", + "answer": "user_hiro", + "supporting_edges": [ + { + "src": "thr_supply_leak", + "rel": "references", + "dst": "org_northbridge_logistics", + "confidence": 1.0 + }, + { + "src": "alias_docksparrow", + "rel": "alias_of", + "dst": "user_hiro", + "confidence": 1.0 + }, + { + "src": "user_hiro", + "rel": "works_at", + "dst": "org_northbridge_logistics", + "confidence": 1.0 + }, + { + "src": "user_hiro", + "rel": "collaborates_on", + "dst": "event_project_lantern", + "confidence": 0.9 + } + ], + "metadata": { + "difficulty": "easy", + "difficulty_level": 1, + "question_id": "easy_02", + "support_nodes": 5 + } + }, + { + "task_type": "fixed_trace", + "question": "alias_monsoonbyte authored post_drone_parts about event_black_kite. Which user behind that alias is directly connected to the Kestrel collaborator?", + "answer": "user_diya", + "supporting_edges": [ + { + "src": "alias_monsoonbyte", + "rel": "alias_of", + "dst": "user_diya", + "confidence": 1.0 + }, + { + "src": "alias_monsoonbyte", + "rel": "authored_post", + "dst": "post_drone_parts", + "confidence": 1.0 + }, + { + "src": "post_drone_parts", + "rel": "references", + "dst": "event_black_kite", + "confidence": 1.0 + }, + { + "src": "user_ivy", + "rel": "works_at", + "dst": "org_kestrel_works", + "confidence": 1.0 + }, + { + "src": "user_ivy", + "rel": "collaborates_on", + "dst": "event_black_kite", + "confidence": 0.9 + }, + { + "src": "user_ivy", + "rel": "connected_to", + "dst": "user_elin", + "confidence": 0.86 + } + ], + "metadata": { + "difficulty": "easy", + "difficulty_level": 1, + "question_id": "easy_03", + "support_nodes": 7 + } + }, + { + "task_type": "fixed_trace", + "question": "alias_nightrelay references loc_rivergate. Which user behind it works at an org operating there and collaborates on event_project_lantern?", + "answer": "user_faris", + "supporting_edges": [ + { + "src": "alias_nightrelay", + "rel": "alias_of", + "dst": "user_faris", + "confidence": 1.0 + }, + { + "src": "alias_nightrelay", + "rel": "authored_post", + "dst": "post_sat_phone_ping", + "confidence": 1.0 + }, + { + "src": "post_sat_phone_ping", + "rel": "references", + "dst": "loc_rivergate", + "confidence": 1.0 + }, + { + "src": "user_faris", + "rel": "works_at", + "dst": "org_tidewatch_ops", + "confidence": 1.0 + }, + { + "src": "org_tidewatch_ops", + "rel": "operates_in", + "dst": "loc_rivergate", + "confidence": 1.0 + }, + { + "src": "user_faris", + "rel": "collaborates_on", + "dst": "event_project_lantern", + "confidence": 0.9 + } + ], + "metadata": { + "difficulty": "easy", + "difficulty_level": 1, + "question_id": "easy_04", + "support_nodes": 6 + } + }, + { + "task_type": "fixed_trace", + "question": "thr_port_audit discusses Black Kite and references Kestrel Works. Which alias_orchidfox user authored post_midnight_manifest and collaborates on Black Kite?", + "answer": "user_ivy", + "supporting_edges": [ + { + "src": "thr_port_audit", + "rel": "discusses", + "dst": "event_black_kite", + "confidence": 1.0 + }, + { + "src": "thr_port_audit", + "rel": "references", + "dst": "org_kestrel_works", + "confidence": 1.0 + }, + { + "src": "alias_orchidfox", + "rel": "alias_of", + "dst": "user_ivy", + "confidence": 1.0 + }, + { + "src": "alias_orchidfox", + "rel": "authored_post", + "dst": "post_midnight_manifest", + "confidence": 1.0 + }, + { + "src": "user_ivy", + "rel": "works_at", + "dst": "org_kestrel_works", + "confidence": 1.0 + }, + { + "src": "user_ivy", + "rel": "collaborates_on", + "dst": "event_black_kite", + "confidence": 0.9 + } + ], + "metadata": { + "difficulty": "easy", + "difficulty_level": 1, + "question_id": "easy_05", + "support_nodes": 6 + } + }, + { + "task_type": "fixed_trace", + "question": "Which Atlas Freight user behind alias_lanternmoth authored post_quay_ledgers and collaborates on event_glass_harbor?", + "answer": "user_kian", + "supporting_edges": [ + { + "src": "alias_lanternmoth", + "rel": "alias_of", + "dst": "user_kian", + "confidence": 1.0 + }, + { + "src": "alias_lanternmoth", + "rel": "authored_post", + "dst": "post_quay_ledgers", + "confidence": 1.0 + }, + { + "src": "user_kian", + "rel": "works_at", + "dst": "org_atlas_freight", + "confidence": 1.0 + }, + { + "src": "user_kian", + "rel": "collaborates_on", + "dst": "event_glass_harbor", + "confidence": 0.9 + } + ], + "metadata": { + "difficulty": "easy", + "difficulty_level": 1, + "question_id": "easy_06", + "support_nodes": 5 + } + }, + { + "task_type": "fixed_trace", + "question": "Which Orion Customs user behind alias_basinraven authored post_customs_tag and collaborates on event_iron_wharf?", + "answer": "user_mika", + "supporting_edges": [ + { + "src": "alias_basinraven", + "rel": "alias_of", + "dst": "user_mika", + "confidence": 1.0 + }, + { + "src": "alias_basinraven", + "rel": "authored_post", + "dst": "post_customs_tag", + "confidence": 1.0 + }, + { + "src": "user_mika", + "rel": "works_at", + "dst": "org_orion_customs", + "confidence": 1.0 + }, + { + "src": "user_mika", + "rel": "collaborates_on", + "dst": "event_iron_wharf", + "confidence": 0.9 + } + ], + "metadata": { + "difficulty": "easy", + "difficulty_level": 1, + "question_id": "easy_07", + "support_nodes": 5 + } + }, + { + "task_type": "fixed_trace", + "question": "Which user behind alias_emberglass posted basin_photo from Foundry Row and investigates Amber Veil?", + "answer": "user_nora", + "supporting_edges": [ + { + "src": "alias_emberglass", + "rel": "alias_of", + "dst": "user_nora", + "confidence": 1.0 + }, + { + "src": "alias_emberglass", + "rel": "authored_post", + "dst": "post_basin_photo", + "confidence": 1.0 + }, + { + "src": "post_basin_photo", + "rel": "references", + "dst": "loc_foundry_row", + "confidence": 1.0 + }, + { + "src": "user_nora", + "rel": "investigates", + "dst": "event_amber_veil", + "confidence": 0.9 + } + ], + "metadata": { + "difficulty": "easy", + "difficulty_level": 1, + "question_id": "easy_08", + "support_nodes": 5 + } + }, + { + "task_type": "fixed_trace", + "question": "Which user behind alias_tideshard authored post_hull_signal and collaborates on Ghost Signal?", + "answer": "user_soren", + "supporting_edges": [ + { + "src": "alias_tideshard", + "rel": "alias_of", + "dst": "user_soren", + "confidence": 1.0 + }, + { + "src": "alias_tideshard", + "rel": "authored_post", + "dst": "post_hull_signal", + "confidence": 1.0 + }, + { + "src": "user_soren", + "rel": "collaborates_on", + "dst": "event_ghost_signal", + "confidence": 0.9 + } + ], + "metadata": { + "difficulty": "easy", + "difficulty_level": 1, + "question_id": "easy_09", + "support_nodes": 4 + } + }, + { + "task_type": "fixed_trace", + "question": "Which Harborlight Transit user behind alias_sablekeel authored post_uplink_note and reports on Ghost Signal?", + "answer": "user_tara", + "supporting_edges": [ + { + "src": "alias_sablekeel", + "rel": "alias_of", + "dst": "user_tara", + "confidence": 1.0 + }, + { + "src": "alias_sablekeel", + "rel": "authored_post", + "dst": "post_uplink_note", + "confidence": 1.0 + }, + { + "src": "user_tara", + "rel": "works_at", + "dst": "org_harborlight_transit", + "confidence": 1.0 + }, + { + "src": "user_tara", + "rel": "reports_on", + "dst": "event_ghost_signal", + "confidence": 0.9 + } + ], + "metadata": { + "difficulty": "easy", + "difficulty_level": 1, + "question_id": "easy_10", + "support_nodes": 5 + } + }, + { + "task_type": "fixed_trace", + "question": "Follow alias_docksparrow through post_shift_roster, Dockyard 17, and the Lantern chain. Return the org node id.", + "answer": "org_northbridge_logistics", + "supporting_edges": [ + { + "src": "alias_docksparrow", + "rel": "alias_of", + "dst": "user_hiro", + "confidence": 1.0 + }, + { + "src": "alias_docksparrow", + "rel": "authored_post", + "dst": "post_shift_roster", + "confidence": 1.0 + }, + { + "src": "post_shift_roster", + "rel": "references", + "dst": "loc_dockyard17", + "confidence": 1.0 + }, + { + "src": "post_shift_roster", + "rel": "references", + "dst": "org_northbridge_logistics", + "confidence": 1.0 + }, + { + "src": "thr_supply_leak", + "rel": "references", + "dst": "org_northbridge_logistics", + "confidence": 1.0 + }, + { + "src": "user_hiro", + "rel": "works_at", + "dst": "org_northbridge_logistics", + "confidence": 1.0 + }, + { + "src": "user_hiro", + "rel": "located_in", + "dst": "loc_dockyard17", + "confidence": 1.0 + }, + { + "src": "user_hiro", + "rel": "collaborates_on", + "dst": "event_project_lantern", + "confidence": 0.9 + }, + { + "src": "user_bharat", + "rel": "collaborates_on", + "dst": "event_project_lantern", + "confidence": 0.9 + }, + { + "src": "user_bharat", + "rel": "connected_to", + "dst": "user_hiro", + "confidence": 0.95 + }, + { + "src": "user_hiro", + "rel": "connected_to", + "dst": "user_faris", + "confidence": 0.92 + }, + { + "src": "alias_orchidfox", + "rel": "alias_of", + "dst": "user_ivy", + "confidence": 1.0 + }, + { + "src": "alias_orchidfox", + "rel": "authored_post", + "dst": "post_midnight_manifest", + "confidence": 1.0 + }, + { + "src": "post_midnight_manifest", + "rel": "references", + "dst": "loc_dockyard17", + "confidence": 1.0 + }, + { + "src": "post_midnight_manifest", + "rel": "references", + "dst": "event_project_lantern", + "confidence": 1.0 + }, + { + "src": "alias_lanternmoth", + "rel": "alias_of", + "dst": "user_kian", + "confidence": 1.0 + }, + { + "src": "alias_lanternmoth", + "rel": "authored_post", + "dst": "post_quay_ledgers", + "confidence": 1.0 + }, + { + "src": "post_quay_ledgers", + "rel": "references", + "dst": "loc_east_quay", + "confidence": 1.0 + }, + { + "src": "post_quay_ledgers", + "rel": "references", + "dst": "event_glass_harbor", + "confidence": 1.0 + } + ], + "metadata": { + "difficulty": "mid", + "difficulty_level": 2, + "question_id": "mid_01", + "support_nodes": 17 + } + }, + { + "task_type": "fixed_trace", + "question": "Across the Glass Harbor cluster, which user behind alias_lanternmoth links to the Atlas Freight network from thr_quiet_manifest?", + "answer": "user_kian", + "supporting_edges": [ + { + "src": "alias_lanternmoth", + "rel": "alias_of", + "dst": "user_kian", + "confidence": 1.0 + }, + { + "src": "alias_lanternmoth", + "rel": "authored_post", + "dst": "post_quay_ledgers", + "confidence": 1.0 + }, + { + "src": "post_quay_ledgers", + "rel": "references", + "dst": "loc_east_quay", + "confidence": 1.0 + }, + { + "src": "post_quay_ledgers", + "rel": "references", + "dst": "event_glass_harbor", + "confidence": 1.0 + }, + { + "src": "user_kian", + "rel": "authored_thread", + "dst": "thr_quiet_manifest", + "confidence": 1.0 + }, + { + "src": "thr_quiet_manifest", + "rel": "discusses", + "dst": "event_glass_harbor", + "confidence": 1.0 + }, + { + "src": "thr_quiet_manifest", + "rel": "references", + "dst": "org_atlas_freight", + "confidence": 1.0 + }, + { + "src": "user_kian", + "rel": "works_at", + "dst": "org_atlas_freight", + "confidence": 1.0 + }, + { + "src": "user_omar", + "rel": "works_at", + "dst": "org_atlas_freight", + "confidence": 1.0 + }, + { + "src": "user_kian", + "rel": "collaborates_on", + "dst": "event_glass_harbor", + "confidence": 0.9 + }, + { + "src": "user_omar", + "rel": "collaborates_on", + "dst": "event_glass_harbor", + "confidence": 0.9 + }, + { + "src": "user_priya", + "rel": "monitors", + "dst": "event_glass_harbor", + "confidence": 0.9 + }, + { + "src": "user_kian", + "rel": "connected_to", + "dst": "user_omar", + "confidence": 0.93 + }, + { + "src": "user_omar", + "rel": "connected_to", + "dst": "user_mika", + "confidence": 0.9 + }, + { + "src": "alias_orchidfox", + "rel": "alias_of", + "dst": "user_ivy", + "confidence": 1.0 + }, + { + "src": "alias_orchidfox", + "rel": "authored_post", + "dst": "post_midnight_manifest", + "confidence": 1.0 + }, + { + "src": "post_midnight_manifest", + "rel": "references", + "dst": "loc_dockyard17", + "confidence": 1.0 + }, + { + "src": "post_midnight_manifest", + "rel": "references", + "dst": "event_project_lantern", + "confidence": 1.0 + }, + { + "src": "alias_basinraven", + "rel": "alias_of", + "dst": "user_mika", + "confidence": 1.0 + }, + { + "src": "alias_basinraven", + "rel": "authored_post", + "dst": "post_customs_tag", + "confidence": 1.0 + } + ], + "metadata": { + "difficulty": "mid", + "difficulty_level": 2, + "question_id": "mid_02", + "support_nodes": 17 + } + }, + { + "task_type": "fixed_trace", + "question": "Trace alias_basinraven through post_customs_tag, thr_customs_breach, and the Orion Customs collaboration chain. Who is it?", + "answer": "user_mika", + "supporting_edges": [ + { + "src": "alias_basinraven", + "rel": "alias_of", + "dst": "user_mika", + "confidence": 1.0 + }, + { + "src": "alias_basinraven", + "rel": "authored_post", + "dst": "post_customs_tag", + "confidence": 1.0 + }, + { + "src": "post_customs_tag", + "rel": "references", + "dst": "loc_north_basin", + "confidence": 1.0 + }, + { + "src": "post_customs_tag", + "rel": "references", + "dst": "event_iron_wharf", + "confidence": 1.0 + }, + { + "src": "user_mika", + "rel": "authored_thread", + "dst": "thr_customs_breach", + "confidence": 1.0 + }, + { + "src": "thr_customs_breach", + "rel": "discusses", + "dst": "event_iron_wharf", + "confidence": 1.0 + }, + { + "src": "thr_customs_breach", + "rel": "references", + "dst": "org_orion_customs", + "confidence": 1.0 + }, + { + "src": "user_mika", + "rel": "works_at", + "dst": "org_orion_customs", + "confidence": 1.0 + }, + { + "src": "user_quinn", + "rel": "works_at", + "dst": "org_orion_customs", + "confidence": 1.0 + }, + { + "src": "user_mika", + "rel": "collaborates_on", + "dst": "event_iron_wharf", + "confidence": 0.9 + }, + { + "src": "user_quinn", + "rel": "collaborates_on", + "dst": "event_iron_wharf", + "confidence": 0.9 + }, + { + "src": "user_mika", + "rel": "connected_to", + "dst": "user_quinn", + "confidence": 0.89 + }, + { + "src": "user_quinn", + "rel": "connected_to", + "dst": "user_nora", + "confidence": 0.88 + }, + { + "src": "org_orion_customs", + "rel": "connected_to", + "dst": "org_emberline_security", + "confidence": 0.77 + }, + { + "src": "alias_orchidfox", + "rel": "alias_of", + "dst": "user_ivy", + "confidence": 1.0 + }, + { + "src": "alias_orchidfox", + "rel": "authored_post", + "dst": "post_midnight_manifest", + "confidence": 1.0 + }, + { + "src": "post_midnight_manifest", + "rel": "references", + "dst": "loc_dockyard17", + "confidence": 1.0 + }, + { + "src": "post_midnight_manifest", + "rel": "references", + "dst": "event_project_lantern", + "confidence": 1.0 + }, + { + "src": "alias_lanternmoth", + "rel": "alias_of", + "dst": "user_kian", + "confidence": 1.0 + } + ], + "metadata": { + "difficulty": "mid", + "difficulty_level": 2, + "question_id": "mid_03", + "support_nodes": 17 + } + }, + { + "task_type": "fixed_trace", + "question": "In the Ember Tide and Amber Veil overlap, which Foundry Row user behind alias_cinderveil collaborates on Ember Tide?", + "answer": "user_rhea", + "supporting_edges": [ + { + "src": "alias_cinderveil", + "rel": "alias_of", + "dst": "user_rhea", + "confidence": 1.0 + }, + { + "src": "alias_cinderveil", + "rel": "authored_post", + "dst": "post_foundry_map", + "confidence": 1.0 + }, + { + "src": "post_foundry_map", + "rel": "references", + "dst": "loc_foundry_row", + "confidence": 1.0 + }, + { + "src": "post_foundry_map", + "rel": "references", + "dst": "event_ember_tide", + "confidence": 1.0 + }, + { + "src": "user_nora", + "rel": "authored_thread", + "dst": "thr_foundry_watch", + "confidence": 1.0 + }, + { + "src": "thr_foundry_watch", + "rel": "discusses", + "dst": "event_ember_tide", + "confidence": 1.0 + }, + { + "src": "thr_foundry_watch", + "rel": "references", + "dst": "org_emberline_security", + "confidence": 1.0 + }, + { + "src": "user_rhea", + "rel": "authored_thread", + "dst": "thr_ember_tide_watch", + "confidence": 1.0 + }, + { + "src": "thr_ember_tide_watch", + "rel": "discusses", + "dst": "event_ember_tide", + "confidence": 1.0 + }, + { + "src": "thr_ember_tide_watch", + "rel": "references", + "dst": "loc_foundry_row", + "confidence": 1.0 + }, + { + "src": "user_rhea", + "rel": "works_at", + "dst": "org_emberline_security", + "confidence": 1.0 + }, + { + "src": "user_nora", + "rel": "works_at", + "dst": "org_emberline_security", + "confidence": 1.0 + }, + { + "src": "user_nora", + "rel": "investigates", + "dst": "event_amber_veil", + "confidence": 0.9 + }, + { + "src": "user_rhea", + "rel": "collaborates_on", + "dst": "event_ember_tide", + "confidence": 0.9 + }, + { + "src": "user_nora", + "rel": "connected_to", + "dst": "user_rhea", + "confidence": 0.87 + }, + { + "src": "event_ember_tide", + "rel": "connected_to", + "dst": "event_ghost_signal", + "confidence": 0.77 + }, + { + "src": "alias_orchidfox", + "rel": "alias_of", + "dst": "user_ivy", + "confidence": 1.0 + }, + { + "src": "alias_orchidfox", + "rel": "authored_post", + "dst": "post_midnight_manifest", + "confidence": 1.0 + }, + { + "src": "post_midnight_manifest", + "rel": "references", + "dst": "loc_dockyard17", + "confidence": 1.0 + }, + { + "src": "post_midnight_manifest", + "rel": "references", + "dst": "event_project_lantern", + "confidence": 1.0 + }, + { + "src": "alias_lanternmoth", + "rel": "alias_of", + "dst": "user_kian", + "confidence": 1.0 + } + ], + "metadata": { + "difficulty": "mid", + "difficulty_level": 2, + "question_id": "mid_04", + "support_nodes": 18 + } + }, + { + "task_type": "fixed_trace", + "question": "Follow alias_tideshard from post_hull_signal into thr_uplink_route and the Harborlight relay. Return the org node id.", + "answer": "org_harborlight_transit", + "supporting_edges": [ + { + "src": "alias_tideshard", + "rel": "alias_of", + "dst": "user_soren", + "confidence": 1.0 + }, + { + "src": "alias_tideshard", + "rel": "authored_post", + "dst": "post_hull_signal", + "confidence": 1.0 + }, + { + "src": "post_hull_signal", + "rel": "references", + "dst": "loc_uplink_yard", + "confidence": 1.0 + }, + { + "src": "post_hull_signal", + "rel": "references", + "dst": "event_ghost_signal", + "confidence": 1.0 + }, + { + "src": "user_soren", + "rel": "authored_thread", + "dst": "thr_uplink_route", + "confidence": 1.0 + }, + { + "src": "thr_uplink_route", + "rel": "discusses", + "dst": "event_ghost_signal", + "confidence": 1.0 + }, + { + "src": "thr_uplink_route", + "rel": "references", + "dst": "org_harborlight_transit", + "confidence": 1.0 + }, + { + "src": "user_soren", + "rel": "works_at", + "dst": "org_harborlight_transit", + "confidence": 1.0 + }, + { + "src": "user_tara", + "rel": "works_at", + "dst": "org_harborlight_transit", + "confidence": 1.0 + }, + { + "src": "user_soren", + "rel": "collaborates_on", + "dst": "event_ghost_signal", + "confidence": 0.9 + }, + { + "src": "user_tara", + "rel": "reports_on", + "dst": "event_ghost_signal", + "confidence": 0.9 + }, + { + "src": "user_rhea", + "rel": "connected_to", + "dst": "user_soren", + "confidence": 0.86 + }, + { + "src": "user_soren", + "rel": "connected_to", + "dst": "user_tara", + "confidence": 0.86 + }, + { + "src": "org_harborlight_transit", + "rel": "operates_in", + "dst": "loc_uplink_yard", + "confidence": 1.0 + }, + { + "src": "org_harborlight_transit", + "rel": "connected_to", + "dst": "org_tidewatch_ops", + "confidence": 0.77 + }, + { + "src": "alias_orchidfox", + "rel": "alias_of", + "dst": "user_ivy", + "confidence": 1.0 + }, + { + "src": "alias_orchidfox", + "rel": "authored_post", + "dst": "post_midnight_manifest", + "confidence": 1.0 + }, + { + "src": "post_midnight_manifest", + "rel": "references", + "dst": "loc_dockyard17", + "confidence": 1.0 + }, + { + "src": "post_midnight_manifest", + "rel": "references", + "dst": "event_project_lantern", + "confidence": 1.0 + }, + { + "src": "alias_lanternmoth", + "rel": "alias_of", + "dst": "user_kian", + "confidence": 1.0 + } + ], + "metadata": { + "difficulty": "mid", + "difficulty_level": 2, + "question_id": "mid_05", + "support_nodes": 17 + } + }, + { + "task_type": "fixed_trace", + "question": "Which Sunmesh user behind alias_frostledger connects post_lantern_route to thr_relay_map and the Sector 9 monitoring chain?", + "answer": "user_leena", + "supporting_edges": [ + { + "src": "alias_frostledger", + "rel": "alias_of", + "dst": "user_leena", + "confidence": 1.0 + }, + { + "src": "alias_frostledger", + "rel": "authored_post", + "dst": "post_lantern_route", + "confidence": 1.0 + }, + { + "src": "post_lantern_route", + "rel": "references", + "dst": "event_project_lantern", + "confidence": 1.0 + }, + { + "src": "post_lantern_route", + "rel": "references", + "dst": "org_sunmesh_analytics", + "confidence": 1.0 + }, + { + "src": "user_leena", + "rel": "authored_thread", + "dst": "thr_relay_map", + "confidence": 1.0 + }, + { + "src": "thr_relay_map", + "rel": "discusses", + "dst": "event_project_lantern", + "confidence": 1.0 + }, + { + "src": "thr_relay_map", + "rel": "references", + "dst": "org_sunmesh_analytics", + "confidence": 1.0 + }, + { + "src": "user_leena", + "rel": "works_at", + "dst": "org_sunmesh_analytics", + "confidence": 1.0 + }, + { + "src": "user_priya", + "rel": "works_at", + "dst": "org_sunmesh_analytics", + "confidence": 1.0 + }, + { + "src": "user_leena", + "rel": "located_in", + "dst": "loc_sector9", + "confidence": 1.0 + }, + { + "src": "org_sunmesh_analytics", + "rel": "operates_in", + "dst": "loc_sector9", + "confidence": 1.0 + }, + { + "src": "user_leena", + "rel": "monitors", + "dst": "event_project_lantern", + "confidence": 0.9 + }, + { + "src": "user_priya", + "rel": "connected_to", + "dst": "user_leena", + "confidence": 0.91 + }, + { + "src": "user_leena", + "rel": "connected_to", + "dst": "user_aria", + "confidence": 0.83 + }, + { + "src": "alias_orchidfox", + "rel": "alias_of", + "dst": "user_ivy", + "confidence": 1.0 + }, + { + "src": "alias_orchidfox", + "rel": "authored_post", + "dst": "post_midnight_manifest", + "confidence": 1.0 + }, + { + "src": "post_midnight_manifest", + "rel": "references", + "dst": "loc_dockyard17", + "confidence": 1.0 + }, + { + "src": "post_midnight_manifest", + "rel": "references", + "dst": "event_project_lantern", + "confidence": 1.0 + }, + { + "src": "alias_lanternmoth", + "rel": "alias_of", + "dst": "user_kian", + "confidence": 1.0 + }, + { + "src": "alias_lanternmoth", + "rel": "authored_post", + "dst": "post_quay_ledgers", + "confidence": 1.0 + }, + { + "src": "post_quay_ledgers", + "rel": "references", + "dst": "loc_east_quay", + "confidence": 1.0 + } + ], + "metadata": { + "difficulty": "mid", + "difficulty_level": 2, + "question_id": "mid_06", + "support_nodes": 17 + } + }, + { + "task_type": "fixed_trace", + "question": "Which user behind alias_emberglass is tied to Amber Veil after combining post_basin_photo, thr_basin_shift, and the Foundry Row investigation chain?", + "answer": "user_nora", + "supporting_edges": [ + { + "src": "alias_emberglass", + "rel": "alias_of", + "dst": "user_nora", + "confidence": 1.0 + }, + { + "src": "alias_emberglass", + "rel": "authored_post", + "dst": "post_basin_photo", + "confidence": 1.0 + }, + { + "src": "post_basin_photo", + "rel": "references", + "dst": "loc_foundry_row", + "confidence": 1.0 + }, + { + "src": "post_basin_photo", + "rel": "references", + "dst": "event_amber_veil", + "confidence": 1.0 + }, + { + "src": "user_quinn", + "rel": "authored_thread", + "dst": "thr_basin_shift", + "confidence": 1.0 + }, + { + "src": "thr_basin_shift", + "rel": "discusses", + "dst": "event_amber_veil", + "confidence": 1.0 + }, + { + "src": "thr_basin_shift", + "rel": "references", + "dst": "loc_north_basin", + "confidence": 1.0 + }, + { + "src": "user_nora", + "rel": "works_at", + "dst": "org_emberline_security", + "confidence": 1.0 + }, + { + "src": "user_quinn", + "rel": "works_at", + "dst": "org_orion_customs", + "confidence": 1.0 + }, + { + "src": "user_nora", + "rel": "located_in", + "dst": "loc_foundry_row", + "confidence": 1.0 + }, + { + "src": "user_nora", + "rel": "investigates", + "dst": "event_amber_veil", + "confidence": 0.9 + }, + { + "src": "user_quinn", + "rel": "connected_to", + "dst": "user_nora", + "confidence": 0.88 + }, + { + "src": "user_nora", + "rel": "connected_to", + "dst": "user_rhea", + "confidence": 0.87 + }, + { + "src": "org_orion_customs", + "rel": "connected_to", + "dst": "org_emberline_security", + "confidence": 0.77 + }, + { + "src": "alias_orchidfox", + "rel": "alias_of", + "dst": "user_ivy", + "confidence": 1.0 + }, + { + "src": "alias_orchidfox", + "rel": "authored_post", + "dst": "post_midnight_manifest", + "confidence": 1.0 + }, + { + "src": "post_midnight_manifest", + "rel": "references", + "dst": "loc_dockyard17", + "confidence": 1.0 + }, + { + "src": "post_midnight_manifest", + "rel": "references", + "dst": "event_project_lantern", + "confidence": 1.0 + }, + { + "src": "alias_lanternmoth", + "rel": "alias_of", + "dst": "user_kian", + "confidence": 1.0 + } + ], + "metadata": { + "difficulty": "mid", + "difficulty_level": 2, + "question_id": "mid_07", + "support_nodes": 18 + } + }, + { + "task_type": "fixed_trace", + "question": "Combine alias_orchidfox, post_midnight_manifest, thr_supply_leak, and the Lantern to Glass Harbor bridge. Which user starts that chain?", + "answer": "user_ivy", + "supporting_edges": [ + { + "src": "alias_orchidfox", + "rel": "alias_of", + "dst": "user_ivy", + "confidence": 1.0 + }, + { + "src": "alias_orchidfox", + "rel": "authored_post", + "dst": "post_midnight_manifest", + "confidence": 1.0 + }, + { + "src": "post_midnight_manifest", + "rel": "references", + "dst": "loc_dockyard17", + "confidence": 1.0 + }, + { + "src": "post_midnight_manifest", + "rel": "references", + "dst": "event_project_lantern", + "confidence": 1.0 + }, + { + "src": "user_diya", + "rel": "authored_thread", + "dst": "thr_supply_leak", + "confidence": 1.0 + }, + { + "src": "thr_supply_leak", + "rel": "discusses", + "dst": "event_project_lantern", + "confidence": 1.0 + }, + { + "src": "thr_supply_leak", + "rel": "references", + "dst": "org_northbridge_logistics", + "confidence": 1.0 + }, + { + "src": "user_ivy", + "rel": "works_at", + "dst": "org_kestrel_works", + "confidence": 1.0 + }, + { + "src": "user_ivy", + "rel": "collaborates_on", + "dst": "event_black_kite", + "confidence": 0.9 + }, + { + "src": "user_ivy", + "rel": "connected_to", + "dst": "user_bharat", + "confidence": 0.95 + }, + { + "src": "user_ivy", + "rel": "connected_to", + "dst": "user_elin", + "confidence": 0.86 + }, + { + "src": "event_project_lantern", + "rel": "connected_to", + "dst": "event_glass_harbor", + "confidence": 0.77 + }, + { + "src": "user_kian", + "rel": "collaborates_on", + "dst": "event_glass_harbor", + "confidence": 0.9 + }, + { + "src": "user_priya", + "rel": "monitors", + "dst": "event_glass_harbor", + "confidence": 0.9 + }, + { + "src": "alias_lanternmoth", + "rel": "alias_of", + "dst": "user_kian", + "confidence": 1.0 + }, + { + "src": "alias_lanternmoth", + "rel": "authored_post", + "dst": "post_quay_ledgers", + "confidence": 1.0 + } + ], + "metadata": { + "difficulty": "mid", + "difficulty_level": 2, + "question_id": "mid_08", + "support_nodes": 17 + } + }, + { + "task_type": "fixed_trace", + "question": "Which user behind alias_monsoonbyte sits at the overlap of Blueharbor Media, Project Lantern, Black Kite, and the Ivy connection chain?", + "answer": "user_diya", + "supporting_edges": [ + { + "src": "alias_monsoonbyte", + "rel": "alias_of", + "dst": "user_diya", + "confidence": 1.0 + }, + { + "src": "alias_monsoonbyte", + "rel": "authored_post", + "dst": "post_drone_parts", + "confidence": 1.0 + }, + { + "src": "post_drone_parts", + "rel": "references", + "dst": "event_black_kite", + "confidence": 1.0 + }, + { + "src": "user_diya", + "rel": "authored_thread", + "dst": "thr_supply_leak", + "confidence": 1.0 + }, + { + "src": "thr_supply_leak", + "rel": "discusses", + "dst": "event_project_lantern", + "confidence": 1.0 + }, + { + "src": "user_jules", + "rel": "authored_thread", + "dst": "thr_port_audit", + "confidence": 1.0 + }, + { + "src": "thr_port_audit", + "rel": "discusses", + "dst": "event_black_kite", + "confidence": 1.0 + }, + { + "src": "user_diya", + "rel": "works_at", + "dst": "org_blueharbor_media", + "confidence": 1.0 + }, + { + "src": "user_ivy", + "rel": "works_at", + "dst": "org_kestrel_works", + "confidence": 1.0 + }, + { + "src": "user_jules", + "rel": "works_at", + "dst": "org_blueharbor_media", + "confidence": 1.0 + }, + { + "src": "user_diya", + "rel": "investigates", + "dst": "event_project_lantern", + "confidence": 0.9 + }, + { + "src": "user_ivy", + "rel": "collaborates_on", + "dst": "event_black_kite", + "confidence": 0.9 + }, + { + "src": "user_jules", + "rel": "reports_on", + "dst": "event_black_kite", + "confidence": 0.9 + }, + { + "src": "user_faris", + "rel": "connected_to", + "dst": "user_diya", + "confidence": 0.9 + }, + { + "src": "user_ivy", + "rel": "connected_to", + "dst": "user_elin", + "confidence": 0.86 + }, + { + "src": "alias_orchidfox", + "rel": "alias_of", + "dst": "user_ivy", + "confidence": 1.0 + }, + { + "src": "alias_orchidfox", + "rel": "authored_post", + "dst": "post_midnight_manifest", + "confidence": 1.0 + }, + { + "src": "post_midnight_manifest", + "rel": "references", + "dst": "loc_dockyard17", + "confidence": 1.0 + }, + { + "src": "post_midnight_manifest", + "rel": "references", + "dst": "event_project_lantern", + "confidence": 1.0 + }, + { + "src": "alias_lanternmoth", + "rel": "alias_of", + "dst": "user_kian", + "confidence": 1.0 + } + ], + "metadata": { + "difficulty": "mid", + "difficulty_level": 2, + "question_id": "mid_09", + "support_nodes": 18 + } + }, + { + "task_type": "fixed_trace", + "question": "Who is the Northbridge user behind alias_steelquill when combining post_relay_schedule, thr_supply_leak, Dockyard 17, and Lantern collaborator edges?", + "answer": "user_bharat", + "supporting_edges": [ + { + "src": "alias_steelquill", + "rel": "alias_of", + "dst": "user_bharat", + "confidence": 1.0 + }, + { + "src": "alias_steelquill", + "rel": "authored_post", + "dst": "post_relay_schedule", + "confidence": 1.0 + }, + { + "src": "post_relay_schedule", + "rel": "references", + "dst": "event_project_lantern", + "confidence": 1.0 + }, + { + "src": "post_relay_schedule", + "rel": "references", + "dst": "loc_sector9", + "confidence": 1.0 + }, + { + "src": "user_diya", + "rel": "authored_thread", + "dst": "thr_supply_leak", + "confidence": 1.0 + }, + { + "src": "thr_supply_leak", + "rel": "discusses", + "dst": "event_project_lantern", + "confidence": 1.0 + }, + { + "src": "thr_supply_leak", + "rel": "references", + "dst": "org_northbridge_logistics", + "confidence": 1.0 + }, + { + "src": "user_bharat", + "rel": "works_at", + "dst": "org_northbridge_logistics", + "confidence": 1.0 + }, + { + "src": "user_hiro", + "rel": "works_at", + "dst": "org_northbridge_logistics", + "confidence": 1.0 + }, + { + "src": "user_bharat", + "rel": "located_in", + "dst": "loc_dockyard17", + "confidence": 1.0 + }, + { + "src": "user_hiro", + "rel": "located_in", + "dst": "loc_dockyard17", + "confidence": 1.0 + }, + { + "src": "user_bharat", + "rel": "collaborates_on", + "dst": "event_project_lantern", + "confidence": 0.9 + }, + { + "src": "user_hiro", + "rel": "collaborates_on", + "dst": "event_project_lantern", + "confidence": 0.9 + }, + { + "src": "user_ivy", + "rel": "connected_to", + "dst": "user_bharat", + "confidence": 0.95 + }, + { + "src": "user_bharat", + "rel": "connected_to", + "dst": "user_hiro", + "confidence": 0.95 + }, + { + "src": "alias_orchidfox", + "rel": "alias_of", + "dst": "user_ivy", + "confidence": 1.0 + }, + { + "src": "alias_orchidfox", + "rel": "authored_post", + "dst": "post_midnight_manifest", + "confidence": 1.0 + }, + { + "src": "post_midnight_manifest", + "rel": "references", + "dst": "loc_dockyard17", + "confidence": 1.0 + }, + { + "src": "post_midnight_manifest", + "rel": "references", + "dst": "event_project_lantern", + "confidence": 1.0 + }, + { + "src": "alias_lanternmoth", + "rel": "alias_of", + "dst": "user_kian", + "confidence": 1.0 + }, + { + "src": "alias_lanternmoth", + "rel": "authored_post", + "dst": "post_quay_ledgers", + "confidence": 1.0 + }, + { + "src": "post_quay_ledgers", + "rel": "references", + "dst": "loc_east_quay", + "confidence": 1.0 + } + ], + "metadata": { + "difficulty": "mid", + "difficulty_level": 2, + "question_id": "mid_10", + "support_nodes": 17 + } + }, + { + "task_type": "fixed_trace", + "question": "Lantern to Glass Harbor handoff: identify the user behind alias_orchidfox after combining Lantern logistics, Dockyard links, and Atlas Freight bridge evidence.", + "answer": "user_ivy", + "supporting_edges": [ + { + "src": "alias_orchidfox", + "rel": "alias_of", + "dst": "user_ivy", + "confidence": 1.0 + }, + { + "src": "alias_orchidfox", + "rel": "authored_post", + "dst": "post_midnight_manifest", + "confidence": 1.0 + }, + { + "src": "post_midnight_manifest", + "rel": "references", + "dst": "loc_dockyard17", + "confidence": 1.0 + }, + { + "src": "post_midnight_manifest", + "rel": "references", + "dst": "event_project_lantern", + "confidence": 1.0 + }, + { + "src": "user_diya", + "rel": "authored_thread", + "dst": "thr_supply_leak", + "confidence": 1.0 + }, + { + "src": "thr_supply_leak", + "rel": "discusses", + "dst": "event_project_lantern", + "confidence": 1.0 + }, + { + "src": "thr_supply_leak", + "rel": "references", + "dst": "org_northbridge_logistics", + "confidence": 1.0 + }, + { + "src": "user_ivy", + "rel": "works_at", + "dst": "org_kestrel_works", + "confidence": 1.0 + }, + { + "src": "user_bharat", + "rel": "works_at", + "dst": "org_northbridge_logistics", + "confidence": 1.0 + }, + { + "src": "user_hiro", + "rel": "works_at", + "dst": "org_northbridge_logistics", + "confidence": 1.0 + }, + { + "src": "user_kian", + "rel": "works_at", + "dst": "org_atlas_freight", + "confidence": 1.0 + }, + { + "src": "user_omar", + "rel": "works_at", + "dst": "org_atlas_freight", + "confidence": 1.0 + }, + { + "src": "user_ivy", + "rel": "located_in", + "dst": "loc_rivergate", + "confidence": 1.0 + }, + { + "src": "user_bharat", + "rel": "located_in", + "dst": "loc_dockyard17", + "confidence": 1.0 + }, + { + "src": "user_hiro", + "rel": "located_in", + "dst": "loc_dockyard17", + "confidence": 1.0 + }, + { + "src": "user_kian", + "rel": "located_in", + "dst": "loc_east_quay", + "confidence": 1.0 + }, + { + "src": "user_omar", + "rel": "located_in", + "dst": "loc_east_quay", + "confidence": 1.0 + }, + { + "src": "org_northbridge_logistics", + "rel": "operates_in", + "dst": "loc_dockyard17", + "confidence": 1.0 + }, + { + "src": "org_kestrel_works", + "rel": "operates_in", + "dst": "loc_rivergate", + "confidence": 1.0 + }, + { + "src": "org_atlas_freight", + "rel": "operates_in", + "dst": "loc_east_quay", + "confidence": 1.0 + }, + { + "src": "user_ivy", + "rel": "connected_to", + "dst": "user_bharat", + "confidence": 0.95 + }, + { + "src": "user_bharat", + "rel": "connected_to", + "dst": "user_hiro", + "confidence": 0.95 + }, + { + "src": "user_hiro", + "rel": "connected_to", + "dst": "user_faris", + "confidence": 0.92 + }, + { + "src": "user_ivy", + "rel": "connected_to", + "dst": "user_elin", + "confidence": 0.86 + }, + { + "src": "user_kian", + "rel": "connected_to", + "dst": "user_omar", + "confidence": 0.93 + }, + { + "src": "user_omar", + "rel": "connected_to", + "dst": "user_mika", + "confidence": 0.9 + }, + { + "src": "user_bharat", + "rel": "collaborates_on", + "dst": "event_project_lantern", + "confidence": 0.9 + }, + { + "src": "user_hiro", + "rel": "collaborates_on", + "dst": "event_project_lantern", + "confidence": 0.9 + }, + { + "src": "user_faris", + "rel": "collaborates_on", + "dst": "event_project_lantern", + "confidence": 0.9 + }, + { + "src": "user_ivy", + "rel": "collaborates_on", + "dst": "event_black_kite", + "confidence": 0.9 + }, + { + "src": "user_kian", + "rel": "collaborates_on", + "dst": "event_glass_harbor", + "confidence": 0.9 + }, + { + "src": "user_omar", + "rel": "collaborates_on", + "dst": "event_glass_harbor", + "confidence": 0.9 + }, + { + "src": "user_priya", + "rel": "monitors", + "dst": "event_glass_harbor", + "confidence": 0.9 + }, + { + "src": "user_kian", + "rel": "authored_thread", + "dst": "thr_quiet_manifest", + "confidence": 1.0 + }, + { + "src": "thr_quiet_manifest", + "rel": "discusses", + "dst": "event_glass_harbor", + "confidence": 1.0 + }, + { + "src": "thr_quiet_manifest", + "rel": "references", + "dst": "org_atlas_freight", + "confidence": 1.0 + }, + { + "src": "alias_lanternmoth", + "rel": "authored_post", + "dst": "post_quay_ledgers", + "confidence": 1.0 + }, + { + "src": "post_quay_ledgers", + "rel": "references", + "dst": "loc_east_quay", + "confidence": 1.0 + }, + { + "src": "post_quay_ledgers", + "rel": "references", + "dst": "event_glass_harbor", + "confidence": 1.0 + }, + { + "src": "event_project_lantern", + "rel": "connected_to", + "dst": "event_glass_harbor", + "confidence": 0.77 + }, + { + "src": "org_atlas_freight", + "rel": "connected_to", + "dst": "org_northbridge_logistics", + "confidence": 0.77 + }, + { + "src": "alias_lanternmoth", + "rel": "alias_of", + "dst": "user_kian", + "confidence": 1.0 + }, + { + "src": "alias_steelquill", + "rel": "alias_of", + "dst": "user_bharat", + "confidence": 1.0 + }, + { + "src": "alias_docksparrow", + "rel": "alias_of", + "dst": "user_hiro", + "confidence": 1.0 + }, + { + "src": "alias_monsoonbyte", + "rel": "alias_of", + "dst": "user_diya", + "confidence": 1.0 + }, + { + "src": "alias_nightrelay", + "rel": "alias_of", + "dst": "user_faris", + "confidence": 1.0 + }, + { + "src": "alias_mapleghost", + "rel": "alias_of", + "dst": "user_elin", + "confidence": 1.0 + }, + { + "src": "alias_quartzlotus", + "rel": "alias_of", + "dst": "user_cyrus", + "confidence": 1.0 + }, + { + "src": "alias_emberglass", + "rel": "alias_of", + "dst": "user_nora", + "confidence": 1.0 + }, + { + "src": "alias_basinraven", + "rel": "alias_of", + "dst": "user_mika", + "confidence": 1.0 + }, + { + "src": "alias_tideshard", + "rel": "alias_of", + "dst": "user_soren", + "confidence": 1.0 + }, + { + "src": "alias_hollowsignal", + "rel": "alias_of", + "dst": "user_priya", + "confidence": 1.0 + }, + { + "src": "alias_ironwhisper", + "rel": "alias_of", + "dst": "user_omar", + "confidence": 1.0 + }, + { + "src": "alias_cinderveil", + "rel": "alias_of", + "dst": "user_rhea", + "confidence": 1.0 + }, + { + "src": "alias_sablekeel", + "rel": "alias_of", + "dst": "user_tara", + "confidence": 1.0 + }, + { + "src": "alias_frostledger", + "rel": "alias_of", + "dst": "user_leena", + "confidence": 1.0 + }, + { + "src": "user_aria", + "rel": "works_at", + "dst": "org_helios_labs", + "confidence": 1.0 + }, + { + "src": "user_aria", + "rel": "located_in", + "dst": "loc_sector9", + "confidence": 1.0 + }, + { + "src": "user_cyrus", + "rel": "works_at", + "dst": "org_apex_dynamics", + "confidence": 1.0 + }, + { + "src": "user_cyrus", + "rel": "located_in", + "dst": "loc_old_town", + "confidence": 1.0 + } + ], + "metadata": { + "difficulty": "high", + "difficulty_level": 3, + "question_id": "high_01", + "support_nodes": 50 + } + }, + { + "task_type": "fixed_trace", + "question": "North Basin to Foundry Row escalation: which user behind alias_basinraven anchors the Iron Wharf side before the Emberline handoff?", + "answer": "user_mika", + "supporting_edges": [ + { + "src": "alias_basinraven", + "rel": "alias_of", + "dst": "user_mika", + "confidence": 1.0 + }, + { + "src": "alias_basinraven", + "rel": "authored_post", + "dst": "post_customs_tag", + "confidence": 1.0 + }, + { + "src": "post_customs_tag", + "rel": "references", + "dst": "loc_north_basin", + "confidence": 1.0 + }, + { + "src": "post_customs_tag", + "rel": "references", + "dst": "event_iron_wharf", + "confidence": 1.0 + }, + { + "src": "user_mika", + "rel": "authored_thread", + "dst": "thr_customs_breach", + "confidence": 1.0 + }, + { + "src": "thr_customs_breach", + "rel": "discusses", + "dst": "event_iron_wharf", + "confidence": 1.0 + }, + { + "src": "thr_customs_breach", + "rel": "references", + "dst": "org_orion_customs", + "confidence": 1.0 + }, + { + "src": "user_quinn", + "rel": "authored_thread", + "dst": "thr_basin_shift", + "confidence": 1.0 + }, + { + "src": "thr_basin_shift", + "rel": "discusses", + "dst": "event_amber_veil", + "confidence": 1.0 + }, + { + "src": "thr_basin_shift", + "rel": "references", + "dst": "loc_north_basin", + "confidence": 1.0 + }, + { + "src": "user_nora", + "rel": "authored_thread", + "dst": "thr_foundry_watch", + "confidence": 1.0 + }, + { + "src": "thr_foundry_watch", + "rel": "discusses", + "dst": "event_ember_tide", + "confidence": 1.0 + }, + { + "src": "thr_foundry_watch", + "rel": "references", + "dst": "org_emberline_security", + "confidence": 1.0 + }, + { + "src": "user_mika", + "rel": "works_at", + "dst": "org_orion_customs", + "confidence": 1.0 + }, + { + "src": "user_quinn", + "rel": "works_at", + "dst": "org_orion_customs", + "confidence": 1.0 + }, + { + "src": "user_nora", + "rel": "works_at", + "dst": "org_emberline_security", + "confidence": 1.0 + }, + { + "src": "user_rhea", + "rel": "works_at", + "dst": "org_emberline_security", + "confidence": 1.0 + }, + { + "src": "user_mika", + "rel": "located_in", + "dst": "loc_north_basin", + "confidence": 1.0 + }, + { + "src": "user_quinn", + "rel": "located_in", + "dst": "loc_north_basin", + "confidence": 1.0 + }, + { + "src": "user_nora", + "rel": "located_in", + "dst": "loc_foundry_row", + "confidence": 1.0 + }, + { + "src": "user_rhea", + "rel": "located_in", + "dst": "loc_foundry_row", + "confidence": 1.0 + }, + { + "src": "org_orion_customs", + "rel": "operates_in", + "dst": "loc_north_basin", + "confidence": 1.0 + }, + { + "src": "org_emberline_security", + "rel": "operates_in", + "dst": "loc_foundry_row", + "confidence": 1.0 + }, + { + "src": "user_mika", + "rel": "connected_to", + "dst": "user_quinn", + "confidence": 0.89 + }, + { + "src": "user_quinn", + "rel": "connected_to", + "dst": "user_nora", + "confidence": 0.88 + }, + { + "src": "user_nora", + "rel": "connected_to", + "dst": "user_rhea", + "confidence": 0.87 + }, + { + "src": "user_mika", + "rel": "collaborates_on", + "dst": "event_iron_wharf", + "confidence": 0.9 + }, + { + "src": "user_quinn", + "rel": "collaborates_on", + "dst": "event_iron_wharf", + "confidence": 0.9 + }, + { + "src": "user_nora", + "rel": "investigates", + "dst": "event_amber_veil", + "confidence": 0.9 + }, + { + "src": "user_rhea", + "rel": "collaborates_on", + "dst": "event_ember_tide", + "confidence": 0.9 + }, + { + "src": "alias_emberglass", + "rel": "authored_post", + "dst": "post_basin_photo", + "confidence": 1.0 + }, + { + "src": "post_basin_photo", + "rel": "references", + "dst": "loc_foundry_row", + "confidence": 1.0 + }, + { + "src": "post_basin_photo", + "rel": "references", + "dst": "event_amber_veil", + "confidence": 1.0 + }, + { + "src": "alias_cinderveil", + "rel": "authored_post", + "dst": "post_foundry_map", + "confidence": 1.0 + }, + { + "src": "post_foundry_map", + "rel": "references", + "dst": "loc_foundry_row", + "confidence": 1.0 + }, + { + "src": "post_foundry_map", + "rel": "references", + "dst": "event_ember_tide", + "confidence": 1.0 + }, + { + "src": "event_black_kite", + "rel": "connected_to", + "dst": "event_amber_veil", + "confidence": 0.77 + }, + { + "src": "event_ember_tide", + "rel": "connected_to", + "dst": "event_ghost_signal", + "confidence": 0.77 + }, + { + "src": "org_orion_customs", + "rel": "connected_to", + "dst": "org_emberline_security", + "confidence": 0.77 + }, + { + "src": "alias_emberglass", + "rel": "alias_of", + "dst": "user_nora", + "confidence": 1.0 + }, + { + "src": "alias_cinderveil", + "rel": "alias_of", + "dst": "user_rhea", + "confidence": 1.0 + }, + { + "src": "user_priya", + "rel": "connected_to", + "dst": "user_nora", + "confidence": 0.82 + }, + { + "src": "user_kian", + "rel": "connected_to", + "dst": "user_bharat", + "confidence": 0.8 + }, + { + "src": "alias_orchidfox", + "rel": "alias_of", + "dst": "user_ivy", + "confidence": 1.0 + }, + { + "src": "alias_steelquill", + "rel": "alias_of", + "dst": "user_bharat", + "confidence": 1.0 + }, + { + "src": "alias_monsoonbyte", + "rel": "alias_of", + "dst": "user_diya", + "confidence": 1.0 + }, + { + "src": "alias_nightrelay", + "rel": "alias_of", + "dst": "user_faris", + "confidence": 1.0 + }, + { + "src": "alias_mapleghost", + "rel": "alias_of", + "dst": "user_elin", + "confidence": 1.0 + }, + { + "src": "alias_docksparrow", + "rel": "alias_of", + "dst": "user_hiro", + "confidence": 1.0 + }, + { + "src": "alias_quartzlotus", + "rel": "alias_of", + "dst": "user_cyrus", + "confidence": 1.0 + }, + { + "src": "alias_tideshard", + "rel": "alias_of", + "dst": "user_soren", + "confidence": 1.0 + }, + { + "src": "alias_hollowsignal", + "rel": "alias_of", + "dst": "user_priya", + "confidence": 1.0 + }, + { + "src": "alias_ironwhisper", + "rel": "alias_of", + "dst": "user_omar", + "confidence": 1.0 + }, + { + "src": "alias_sablekeel", + "rel": "alias_of", + "dst": "user_tara", + "confidence": 1.0 + }, + { + "src": "alias_lanternmoth", + "rel": "alias_of", + "dst": "user_kian", + "confidence": 1.0 + }, + { + "src": "alias_frostledger", + "rel": "alias_of", + "dst": "user_leena", + "confidence": 1.0 + }, + { + "src": "user_aria", + "rel": "works_at", + "dst": "org_helios_labs", + "confidence": 1.0 + } + ], + "metadata": { + "difficulty": "high", + "difficulty_level": 3, + "question_id": "high_02", + "support_nodes": 50 + } + }, + { + "task_type": "fixed_trace", + "question": "Harborlight ghost-signal relay: identify the user behind alias_tideshard at the Harborlight / Tidewatch junction.", + "answer": "user_soren", + "supporting_edges": [ + { + "src": "alias_tideshard", + "rel": "alias_of", + "dst": "user_soren", + "confidence": 1.0 + }, + { + "src": "alias_tideshard", + "rel": "authored_post", + "dst": "post_hull_signal", + "confidence": 1.0 + }, + { + "src": "post_hull_signal", + "rel": "references", + "dst": "loc_uplink_yard", + "confidence": 1.0 + }, + { + "src": "post_hull_signal", + "rel": "references", + "dst": "event_ghost_signal", + "confidence": 1.0 + }, + { + "src": "alias_sablekeel", + "rel": "alias_of", + "dst": "user_tara", + "confidence": 1.0 + }, + { + "src": "alias_sablekeel", + "rel": "authored_post", + "dst": "post_uplink_note", + "confidence": 1.0 + }, + { + "src": "post_uplink_note", + "rel": "references", + "dst": "loc_uplink_yard", + "confidence": 1.0 + }, + { + "src": "post_uplink_note", + "rel": "references", + "dst": "org_harborlight_transit", + "confidence": 1.0 + }, + { + "src": "user_soren", + "rel": "authored_thread", + "dst": "thr_uplink_route", + "confidence": 1.0 + }, + { + "src": "thr_uplink_route", + "rel": "discusses", + "dst": "event_ghost_signal", + "confidence": 1.0 + }, + { + "src": "thr_uplink_route", + "rel": "references", + "dst": "org_harborlight_transit", + "confidence": 1.0 + }, + { + "src": "user_tara", + "rel": "authored_thread", + "dst": "thr_ghost_signal_net", + "confidence": 1.0 + }, + { + "src": "thr_ghost_signal_net", + "rel": "discusses", + "dst": "event_ghost_signal", + "confidence": 1.0 + }, + { + "src": "thr_ghost_signal_net", + "rel": "references", + "dst": "loc_uplink_yard", + "confidence": 1.0 + }, + { + "src": "user_soren", + "rel": "works_at", + "dst": "org_harborlight_transit", + "confidence": 1.0 + }, + { + "src": "user_tara", + "rel": "works_at", + "dst": "org_harborlight_transit", + "confidence": 1.0 + }, + { + "src": "user_faris", + "rel": "works_at", + "dst": "org_tidewatch_ops", + "confidence": 1.0 + }, + { + "src": "user_soren", + "rel": "located_in", + "dst": "loc_uplink_yard", + "confidence": 1.0 + }, + { + "src": "user_tara", + "rel": "located_in", + "dst": "loc_uplink_yard", + "confidence": 1.0 + }, + { + "src": "user_faris", + "rel": "located_in", + "dst": "loc_rivergate", + "confidence": 1.0 + }, + { + "src": "org_harborlight_transit", + "rel": "operates_in", + "dst": "loc_uplink_yard", + "confidence": 1.0 + }, + { + "src": "org_tidewatch_ops", + "rel": "operates_in", + "dst": "loc_rivergate", + "confidence": 1.0 + }, + { + "src": "user_rhea", + "rel": "connected_to", + "dst": "user_soren", + "confidence": 0.86 + }, + { + "src": "user_soren", + "rel": "connected_to", + "dst": "user_tara", + "confidence": 0.86 + }, + { + "src": "user_tara", + "rel": "connected_to", + "dst": "user_kian", + "confidence": 0.84 + }, + { + "src": "user_soren", + "rel": "connected_to", + "dst": "user_faris", + "confidence": 0.79 + }, + { + "src": "user_faris", + "rel": "collaborates_on", + "dst": "event_project_lantern", + "confidence": 0.9 + }, + { + "src": "user_soren", + "rel": "collaborates_on", + "dst": "event_ghost_signal", + "confidence": 0.9 + }, + { + "src": "user_tara", + "rel": "reports_on", + "dst": "event_ghost_signal", + "confidence": 0.9 + }, + { + "src": "alias_nightrelay", + "rel": "authored_post", + "dst": "post_sat_phone_ping", + "confidence": 1.0 + }, + { + "src": "post_sat_phone_ping", + "rel": "references", + "dst": "loc_rivergate", + "confidence": 1.0 + }, + { + "src": "post_sat_phone_ping", + "rel": "references", + "dst": "event_project_lantern", + "confidence": 1.0 + }, + { + "src": "user_diya", + "rel": "authored_thread", + "dst": "thr_supply_leak", + "confidence": 1.0 + }, + { + "src": "thr_supply_leak", + "rel": "discusses", + "dst": "event_project_lantern", + "confidence": 1.0 + }, + { + "src": "user_bharat", + "rel": "collaborates_on", + "dst": "event_project_lantern", + "confidence": 0.9 + }, + { + "src": "user_hiro", + "rel": "collaborates_on", + "dst": "event_project_lantern", + "confidence": 0.9 + }, + { + "src": "event_ember_tide", + "rel": "connected_to", + "dst": "event_ghost_signal", + "confidence": 0.77 + }, + { + "src": "org_harborlight_transit", + "rel": "connected_to", + "dst": "org_tidewatch_ops", + "confidence": 0.77 + }, + { + "src": "alias_nightrelay", + "rel": "alias_of", + "dst": "user_faris", + "confidence": 1.0 + }, + { + "src": "alias_orchidfox", + "rel": "alias_of", + "dst": "user_ivy", + "confidence": 1.0 + }, + { + "src": "alias_steelquill", + "rel": "alias_of", + "dst": "user_bharat", + "confidence": 1.0 + }, + { + "src": "alias_monsoonbyte", + "rel": "alias_of", + "dst": "user_diya", + "confidence": 1.0 + }, + { + "src": "alias_mapleghost", + "rel": "alias_of", + "dst": "user_elin", + "confidence": 1.0 + }, + { + "src": "alias_docksparrow", + "rel": "alias_of", + "dst": "user_hiro", + "confidence": 1.0 + }, + { + "src": "alias_quartzlotus", + "rel": "alias_of", + "dst": "user_cyrus", + "confidence": 1.0 + }, + { + "src": "alias_emberglass", + "rel": "alias_of", + "dst": "user_nora", + "confidence": 1.0 + }, + { + "src": "alias_basinraven", + "rel": "alias_of", + "dst": "user_mika", + "confidence": 1.0 + }, + { + "src": "alias_hollowsignal", + "rel": "alias_of", + "dst": "user_priya", + "confidence": 1.0 + }, + { + "src": "alias_ironwhisper", + "rel": "alias_of", + "dst": "user_omar", + "confidence": 1.0 + }, + { + "src": "alias_cinderveil", + "rel": "alias_of", + "dst": "user_rhea", + "confidence": 1.0 + }, + { + "src": "alias_lanternmoth", + "rel": "alias_of", + "dst": "user_kian", + "confidence": 1.0 + }, + { + "src": "alias_frostledger", + "rel": "alias_of", + "dst": "user_leena", + "confidence": 1.0 + }, + { + "src": "user_aria", + "rel": "works_at", + "dst": "org_helios_labs", + "confidence": 1.0 + }, + { + "src": "user_aria", + "rel": "located_in", + "dst": "loc_sector9", + "confidence": 1.0 + }, + { + "src": "user_bharat", + "rel": "works_at", + "dst": "org_northbridge_logistics", + "confidence": 1.0 + }, + { + "src": "user_bharat", + "rel": "located_in", + "dst": "loc_dockyard17", + "confidence": 1.0 + } + ], + "metadata": { + "difficulty": "high", + "difficulty_level": 3, + "question_id": "high_03", + "support_nodes": 50 + } + }, + { + "task_type": "fixed_trace", + "question": "Blueharbor to Black Kite to Lantern overlap: which user is the Blueharbor origin behind alias_monsoonbyte?", + "answer": "user_diya", + "supporting_edges": [ + { + "src": "alias_monsoonbyte", + "rel": "alias_of", + "dst": "user_diya", + "confidence": 1.0 + }, + { + "src": "alias_monsoonbyte", + "rel": "authored_post", + "dst": "post_drone_parts", + "confidence": 1.0 + }, + { + "src": "post_drone_parts", + "rel": "references", + "dst": "event_black_kite", + "confidence": 1.0 + }, + { + "src": "post_drone_parts", + "rel": "references", + "dst": "org_kestrel_works", + "confidence": 1.0 + }, + { + "src": "user_jules", + "rel": "authored_thread", + "dst": "thr_port_audit", + "confidence": 1.0 + }, + { + "src": "thr_port_audit", + "rel": "discusses", + "dst": "event_black_kite", + "confidence": 1.0 + }, + { + "src": "thr_port_audit", + "rel": "references", + "dst": "org_kestrel_works", + "confidence": 1.0 + }, + { + "src": "user_diya", + "rel": "authored_thread", + "dst": "thr_supply_leak", + "confidence": 1.0 + }, + { + "src": "thr_supply_leak", + "rel": "discusses", + "dst": "event_project_lantern", + "confidence": 1.0 + }, + { + "src": "thr_supply_leak", + "rel": "references", + "dst": "org_northbridge_logistics", + "confidence": 1.0 + }, + { + "src": "user_diya", + "rel": "works_at", + "dst": "org_blueharbor_media", + "confidence": 1.0 + }, + { + "src": "user_jules", + "rel": "works_at", + "dst": "org_blueharbor_media", + "confidence": 1.0 + }, + { + "src": "user_ivy", + "rel": "works_at", + "dst": "org_kestrel_works", + "confidence": 1.0 + }, + { + "src": "user_cyrus", + "rel": "works_at", + "dst": "org_apex_dynamics", + "confidence": 1.0 + }, + { + "src": "user_diya", + "rel": "located_in", + "dst": "loc_old_town", + "confidence": 1.0 + }, + { + "src": "user_jules", + "rel": "located_in", + "dst": "loc_old_town", + "confidence": 1.0 + }, + { + "src": "user_ivy", + "rel": "located_in", + "dst": "loc_rivergate", + "confidence": 1.0 + }, + { + "src": "user_cyrus", + "rel": "located_in", + "dst": "loc_old_town", + "confidence": 1.0 + }, + { + "src": "org_blueharbor_media", + "rel": "operates_in", + "dst": "loc_old_town", + "confidence": 1.0 + }, + { + "src": "org_kestrel_works", + "rel": "operates_in", + "dst": "loc_rivergate", + "confidence": 1.0 + }, + { + "src": "org_apex_dynamics", + "rel": "operates_in", + "dst": "loc_old_town", + "confidence": 1.0 + }, + { + "src": "user_faris", + "rel": "connected_to", + "dst": "user_diya", + "confidence": 0.9 + }, + { + "src": "user_cyrus", + "rel": "connected_to", + "dst": "user_gita", + "confidence": 0.83 + }, + { + "src": "user_gita", + "rel": "connected_to", + "dst": "user_jules", + "confidence": 0.82 + }, + { + "src": "user_ivy", + "rel": "connected_to", + "dst": "user_elin", + "confidence": 0.86 + }, + { + "src": "user_diya", + "rel": "investigates", + "dst": "event_project_lantern", + "confidence": 0.9 + }, + { + "src": "user_ivy", + "rel": "collaborates_on", + "dst": "event_black_kite", + "confidence": 0.9 + }, + { + "src": "user_cyrus", + "rel": "collaborates_on", + "dst": "event_black_kite", + "confidence": 0.9 + }, + { + "src": "user_elin", + "rel": "investigates", + "dst": "event_black_kite", + "confidence": 0.9 + }, + { + "src": "user_jules", + "rel": "reports_on", + "dst": "event_black_kite", + "confidence": 0.9 + }, + { + "src": "alias_orchidfox", + "rel": "alias_of", + "dst": "user_ivy", + "confidence": 1.0 + }, + { + "src": "alias_orchidfox", + "rel": "authored_post", + "dst": "post_midnight_manifest", + "confidence": 1.0 + }, + { + "src": "post_midnight_manifest", + "rel": "references", + "dst": "event_project_lantern", + "confidence": 1.0 + }, + { + "src": "event_project_lantern", + "rel": "connected_to", + "dst": "event_glass_harbor", + "confidence": 0.77 + }, + { + "src": "event_black_kite", + "rel": "connected_to", + "dst": "event_amber_veil", + "confidence": 0.77 + }, + { + "src": "user_leena", + "rel": "authored_thread", + "dst": "thr_relay_map", + "confidence": 1.0 + }, + { + "src": "thr_relay_map", + "rel": "discusses", + "dst": "event_project_lantern", + "confidence": 1.0 + }, + { + "src": "user_leena", + "rel": "works_at", + "dst": "org_sunmesh_analytics", + "confidence": 1.0 + }, + { + "src": "user_leena", + "rel": "monitors", + "dst": "event_project_lantern", + "confidence": 0.9 + }, + { + "src": "alias_steelquill", + "rel": "alias_of", + "dst": "user_bharat", + "confidence": 1.0 + }, + { + "src": "alias_nightrelay", + "rel": "alias_of", + "dst": "user_faris", + "confidence": 1.0 + }, + { + "src": "alias_mapleghost", + "rel": "alias_of", + "dst": "user_elin", + "confidence": 1.0 + }, + { + "src": "alias_docksparrow", + "rel": "alias_of", + "dst": "user_hiro", + "confidence": 1.0 + }, + { + "src": "alias_quartzlotus", + "rel": "alias_of", + "dst": "user_cyrus", + "confidence": 1.0 + }, + { + "src": "alias_emberglass", + "rel": "alias_of", + "dst": "user_nora", + "confidence": 1.0 + }, + { + "src": "alias_basinraven", + "rel": "alias_of", + "dst": "user_mika", + "confidence": 1.0 + }, + { + "src": "alias_tideshard", + "rel": "alias_of", + "dst": "user_soren", + "confidence": 1.0 + }, + { + "src": "alias_hollowsignal", + "rel": "alias_of", + "dst": "user_priya", + "confidence": 1.0 + }, + { + "src": "alias_ironwhisper", + "rel": "alias_of", + "dst": "user_omar", + "confidence": 1.0 + }, + { + "src": "alias_cinderveil", + "rel": "alias_of", + "dst": "user_rhea", + "confidence": 1.0 + }, + { + "src": "alias_sablekeel", + "rel": "alias_of", + "dst": "user_tara", + "confidence": 1.0 + }, + { + "src": "alias_lanternmoth", + "rel": "alias_of", + "dst": "user_kian", + "confidence": 1.0 + }, + { + "src": "alias_frostledger", + "rel": "alias_of", + "dst": "user_leena", + "confidence": 1.0 + } + ], + "metadata": { + "difficulty": "high", + "difficulty_level": 3, + "question_id": "high_04", + "support_nodes": 50 + } + }, + { + "task_type": "fixed_trace", + "question": "Sector 9 to Dockyard 17 full relay: which user behind alias_steelquill links the Northbridge chain and the Sunmesh monitoring bridge?", + "answer": "user_bharat", + "supporting_edges": [ + { + "src": "alias_steelquill", + "rel": "alias_of", + "dst": "user_bharat", + "confidence": 1.0 + }, + { + "src": "alias_steelquill", + "rel": "authored_post", + "dst": "post_relay_schedule", + "confidence": 1.0 + }, + { + "src": "post_relay_schedule", + "rel": "references", + "dst": "event_project_lantern", + "confidence": 1.0 + }, + { + "src": "post_relay_schedule", + "rel": "references", + "dst": "loc_sector9", + "confidence": 1.0 + }, + { + "src": "alias_frostledger", + "rel": "alias_of", + "dst": "user_leena", + "confidence": 1.0 + }, + { + "src": "alias_frostledger", + "rel": "authored_post", + "dst": "post_lantern_route", + "confidence": 1.0 + }, + { + "src": "post_lantern_route", + "rel": "references", + "dst": "event_project_lantern", + "confidence": 1.0 + }, + { + "src": "post_lantern_route", + "rel": "references", + "dst": "org_sunmesh_analytics", + "confidence": 1.0 + }, + { + "src": "user_leena", + "rel": "authored_thread", + "dst": "thr_relay_map", + "confidence": 1.0 + }, + { + "src": "thr_relay_map", + "rel": "discusses", + "dst": "event_project_lantern", + "confidence": 1.0 + }, + { + "src": "thr_relay_map", + "rel": "references", + "dst": "org_sunmesh_analytics", + "confidence": 1.0 + }, + { + "src": "user_diya", + "rel": "authored_thread", + "dst": "thr_supply_leak", + "confidence": 1.0 + }, + { + "src": "thr_supply_leak", + "rel": "discusses", + "dst": "event_project_lantern", + "confidence": 1.0 + }, + { + "src": "thr_supply_leak", + "rel": "references", + "dst": "org_northbridge_logistics", + "confidence": 1.0 + }, + { + "src": "user_bharat", + "rel": "works_at", + "dst": "org_northbridge_logistics", + "confidence": 1.0 + }, + { + "src": "user_hiro", + "rel": "works_at", + "dst": "org_northbridge_logistics", + "confidence": 1.0 + }, + { + "src": "user_leena", + "rel": "works_at", + "dst": "org_sunmesh_analytics", + "confidence": 1.0 + }, + { + "src": "user_priya", + "rel": "works_at", + "dst": "org_sunmesh_analytics", + "confidence": 1.0 + }, + { + "src": "user_aria", + "rel": "works_at", + "dst": "org_helios_labs", + "confidence": 1.0 + }, + { + "src": "user_bharat", + "rel": "located_in", + "dst": "loc_dockyard17", + "confidence": 1.0 + }, + { + "src": "user_hiro", + "rel": "located_in", + "dst": "loc_dockyard17", + "confidence": 1.0 + }, + { + "src": "user_leena", + "rel": "located_in", + "dst": "loc_sector9", + "confidence": 1.0 + }, + { + "src": "user_priya", + "rel": "located_in", + "dst": "loc_sector9", + "confidence": 1.0 + }, + { + "src": "user_aria", + "rel": "located_in", + "dst": "loc_sector9", + "confidence": 1.0 + }, + { + "src": "org_northbridge_logistics", + "rel": "operates_in", + "dst": "loc_dockyard17", + "confidence": 1.0 + }, + { + "src": "org_sunmesh_analytics", + "rel": "operates_in", + "dst": "loc_sector9", + "confidence": 1.0 + }, + { + "src": "org_helios_labs", + "rel": "operates_in", + "dst": "loc_sector9", + "confidence": 1.0 + }, + { + "src": "user_ivy", + "rel": "connected_to", + "dst": "user_bharat", + "confidence": 0.95 + }, + { + "src": "user_bharat", + "rel": "connected_to", + "dst": "user_hiro", + "confidence": 0.95 + }, + { + "src": "user_diya", + "rel": "connected_to", + "dst": "user_elin", + "confidence": 0.89 + }, + { + "src": "user_elin", + "rel": "connected_to", + "dst": "user_aria", + "confidence": 0.87 + }, + { + "src": "user_aria", + "rel": "connected_to", + "dst": "user_cyrus", + "confidence": 0.84 + }, + { + "src": "user_priya", + "rel": "connected_to", + "dst": "user_leena", + "confidence": 0.91 + }, + { + "src": "user_leena", + "rel": "connected_to", + "dst": "user_aria", + "confidence": 0.83 + }, + { + "src": "user_bharat", + "rel": "collaborates_on", + "dst": "event_project_lantern", + "confidence": 0.9 + }, + { + "src": "user_hiro", + "rel": "collaborates_on", + "dst": "event_project_lantern", + "confidence": 0.9 + }, + { + "src": "user_leena", + "rel": "monitors", + "dst": "event_project_lantern", + "confidence": 0.9 + }, + { + "src": "event_project_lantern", + "rel": "connected_to", + "dst": "event_glass_harbor", + "confidence": 0.77 + }, + { + "src": "org_atlas_freight", + "rel": "connected_to", + "dst": "org_northbridge_logistics", + "confidence": 0.77 + }, + { + "src": "alias_docksparrow", + "rel": "alias_of", + "dst": "user_hiro", + "confidence": 1.0 + }, + { + "src": "alias_mapleghost", + "rel": "alias_of", + "dst": "user_elin", + "confidence": 1.0 + }, + { + "src": "alias_hollowsignal", + "rel": "alias_of", + "dst": "user_priya", + "confidence": 1.0 + }, + { + "src": "alias_orchidfox", + "rel": "alias_of", + "dst": "user_ivy", + "confidence": 1.0 + }, + { + "src": "alias_monsoonbyte", + "rel": "alias_of", + "dst": "user_diya", + "confidence": 1.0 + }, + { + "src": "alias_nightrelay", + "rel": "alias_of", + "dst": "user_faris", + "confidence": 1.0 + }, + { + "src": "alias_quartzlotus", + "rel": "alias_of", + "dst": "user_cyrus", + "confidence": 1.0 + }, + { + "src": "alias_emberglass", + "rel": "alias_of", + "dst": "user_nora", + "confidence": 1.0 + }, + { + "src": "alias_basinraven", + "rel": "alias_of", + "dst": "user_mika", + "confidence": 1.0 + }, + { + "src": "alias_tideshard", + "rel": "alias_of", + "dst": "user_soren", + "confidence": 1.0 + }, + { + "src": "alias_ironwhisper", + "rel": "alias_of", + "dst": "user_omar", + "confidence": 1.0 + }, + { + "src": "alias_cinderveil", + "rel": "alias_of", + "dst": "user_rhea", + "confidence": 1.0 + }, + { + "src": "alias_sablekeel", + "rel": "alias_of", + "dst": "user_tara", + "confidence": 1.0 + }, + { + "src": "alias_lanternmoth", + "rel": "alias_of", + "dst": "user_kian", + "confidence": 1.0 + }, + { + "src": "user_cyrus", + "rel": "works_at", + "dst": "org_apex_dynamics", + "confidence": 1.0 + }, + { + "src": "user_cyrus", + "rel": "located_in", + "dst": "loc_old_town", + "confidence": 1.0 + }, + { + "src": "user_diya", + "rel": "works_at", + "dst": "org_blueharbor_media", + "confidence": 1.0 + }, + { + "src": "user_diya", + "rel": "located_in", + "dst": "loc_old_town", + "confidence": 1.0 + }, + { + "src": "user_elin", + "rel": "works_at", + "dst": "org_helios_labs", + "confidence": 1.0 + }, + { + "src": "user_elin", + "rel": "located_in", + "dst": "loc_sector9", + "confidence": 1.0 + }, + { + "src": "user_faris", + "rel": "works_at", + "dst": "org_tidewatch_ops", + "confidence": 1.0 + }, + { + "src": "user_faris", + "rel": "located_in", + "dst": "loc_rivergate", + "confidence": 1.0 + } + ], + "metadata": { + "difficulty": "high", + "difficulty_level": 3, + "question_id": "high_05", + "support_nodes": 50 + } + }, + { + "task_type": "fixed_trace", + "question": "Foundry Row, North Basin, and Uplink Yard spread: identify the user behind alias_emberglass before the Harborlight relay takes over.", + "answer": "user_nora", + "supporting_edges": [ + { + "src": "alias_emberglass", + "rel": "alias_of", + "dst": "user_nora", + "confidence": 1.0 + }, + { + "src": "alias_emberglass", + "rel": "authored_post", + "dst": "post_basin_photo", + "confidence": 1.0 + }, + { + "src": "post_basin_photo", + "rel": "references", + "dst": "loc_foundry_row", + "confidence": 1.0 + }, + { + "src": "post_basin_photo", + "rel": "references", + "dst": "event_amber_veil", + "confidence": 1.0 + }, + { + "src": "alias_cinderveil", + "rel": "alias_of", + "dst": "user_rhea", + "confidence": 1.0 + }, + { + "src": "alias_cinderveil", + "rel": "authored_post", + "dst": "post_foundry_map", + "confidence": 1.0 + }, + { + "src": "post_foundry_map", + "rel": "references", + "dst": "loc_foundry_row", + "confidence": 1.0 + }, + { + "src": "post_foundry_map", + "rel": "references", + "dst": "event_ember_tide", + "confidence": 1.0 + }, + { + "src": "alias_sablekeel", + "rel": "alias_of", + "dst": "user_tara", + "confidence": 1.0 + }, + { + "src": "alias_sablekeel", + "rel": "authored_post", + "dst": "post_uplink_note", + "confidence": 1.0 + }, + { + "src": "post_uplink_note", + "rel": "references", + "dst": "loc_uplink_yard", + "confidence": 1.0 + }, + { + "src": "post_uplink_note", + "rel": "references", + "dst": "org_harborlight_transit", + "confidence": 1.0 + }, + { + "src": "user_nora", + "rel": "authored_thread", + "dst": "thr_foundry_watch", + "confidence": 1.0 + }, + { + "src": "thr_foundry_watch", + "rel": "discusses", + "dst": "event_ember_tide", + "confidence": 1.0 + }, + { + "src": "thr_foundry_watch", + "rel": "references", + "dst": "org_emberline_security", + "confidence": 1.0 + }, + { + "src": "user_rhea", + "rel": "authored_thread", + "dst": "thr_ember_tide_watch", + "confidence": 1.0 + }, + { + "src": "thr_ember_tide_watch", + "rel": "discusses", + "dst": "event_ember_tide", + "confidence": 1.0 + }, + { + "src": "thr_ember_tide_watch", + "rel": "references", + "dst": "loc_foundry_row", + "confidence": 1.0 + }, + { + "src": "user_soren", + "rel": "authored_thread", + "dst": "thr_uplink_route", + "confidence": 1.0 + }, + { + "src": "thr_uplink_route", + "rel": "discusses", + "dst": "event_ghost_signal", + "confidence": 1.0 + }, + { + "src": "thr_uplink_route", + "rel": "references", + "dst": "org_harborlight_transit", + "confidence": 1.0 + }, + { + "src": "user_nora", + "rel": "works_at", + "dst": "org_emberline_security", + "confidence": 1.0 + }, + { + "src": "user_rhea", + "rel": "works_at", + "dst": "org_emberline_security", + "confidence": 1.0 + }, + { + "src": "user_soren", + "rel": "works_at", + "dst": "org_harborlight_transit", + "confidence": 1.0 + }, + { + "src": "user_tara", + "rel": "works_at", + "dst": "org_harborlight_transit", + "confidence": 1.0 + }, + { + "src": "user_nora", + "rel": "located_in", + "dst": "loc_foundry_row", + "confidence": 1.0 + }, + { + "src": "user_rhea", + "rel": "located_in", + "dst": "loc_foundry_row", + "confidence": 1.0 + }, + { + "src": "user_soren", + "rel": "located_in", + "dst": "loc_uplink_yard", + "confidence": 1.0 + }, + { + "src": "user_tara", + "rel": "located_in", + "dst": "loc_uplink_yard", + "confidence": 1.0 + }, + { + "src": "org_emberline_security", + "rel": "operates_in", + "dst": "loc_foundry_row", + "confidence": 1.0 + }, + { + "src": "org_harborlight_transit", + "rel": "operates_in", + "dst": "loc_uplink_yard", + "confidence": 1.0 + }, + { + "src": "user_nora", + "rel": "connected_to", + "dst": "user_rhea", + "confidence": 0.87 + }, + { + "src": "user_rhea", + "rel": "connected_to", + "dst": "user_soren", + "confidence": 0.86 + }, + { + "src": "user_soren", + "rel": "connected_to", + "dst": "user_tara", + "confidence": 0.86 + }, + { + "src": "user_nora", + "rel": "investigates", + "dst": "event_amber_veil", + "confidence": 0.9 + }, + { + "src": "user_rhea", + "rel": "collaborates_on", + "dst": "event_ember_tide", + "confidence": 0.9 + }, + { + "src": "user_soren", + "rel": "collaborates_on", + "dst": "event_ghost_signal", + "confidence": 0.9 + }, + { + "src": "user_tara", + "rel": "reports_on", + "dst": "event_ghost_signal", + "confidence": 0.9 + }, + { + "src": "event_ember_tide", + "rel": "connected_to", + "dst": "event_ghost_signal", + "confidence": 0.77 + }, + { + "src": "org_harborlight_transit", + "rel": "connected_to", + "dst": "org_tidewatch_ops", + "confidence": 0.77 + }, + { + "src": "alias_orchidfox", + "rel": "alias_of", + "dst": "user_ivy", + "confidence": 1.0 + }, + { + "src": "alias_steelquill", + "rel": "alias_of", + "dst": "user_bharat", + "confidence": 1.0 + }, + { + "src": "alias_monsoonbyte", + "rel": "alias_of", + "dst": "user_diya", + "confidence": 1.0 + }, + { + "src": "alias_nightrelay", + "rel": "alias_of", + "dst": "user_faris", + "confidence": 1.0 + }, + { + "src": "alias_mapleghost", + "rel": "alias_of", + "dst": "user_elin", + "confidence": 1.0 + }, + { + "src": "alias_docksparrow", + "rel": "alias_of", + "dst": "user_hiro", + "confidence": 1.0 + }, + { + "src": "alias_quartzlotus", + "rel": "alias_of", + "dst": "user_cyrus", + "confidence": 1.0 + }, + { + "src": "alias_basinraven", + "rel": "alias_of", + "dst": "user_mika", + "confidence": 1.0 + }, + { + "src": "alias_tideshard", + "rel": "alias_of", + "dst": "user_soren", + "confidence": 1.0 + }, + { + "src": "alias_hollowsignal", + "rel": "alias_of", + "dst": "user_priya", + "confidence": 1.0 + }, + { + "src": "alias_ironwhisper", + "rel": "alias_of", + "dst": "user_omar", + "confidence": 1.0 + }, + { + "src": "alias_lanternmoth", + "rel": "alias_of", + "dst": "user_kian", + "confidence": 1.0 + }, + { + "src": "alias_frostledger", + "rel": "alias_of", + "dst": "user_leena", + "confidence": 1.0 + }, + { + "src": "user_aria", + "rel": "works_at", + "dst": "org_helios_labs", + "confidence": 1.0 + }, + { + "src": "user_aria", + "rel": "located_in", + "dst": "loc_sector9", + "confidence": 1.0 + }, + { + "src": "user_bharat", + "rel": "works_at", + "dst": "org_northbridge_logistics", + "confidence": 1.0 + } + ], + "metadata": { + "difficulty": "high", + "difficulty_level": 3, + "question_id": "high_06", + "support_nodes": 50 + } + }, + { + "task_type": "fixed_trace", + "question": "Freight and customs bridge: which Atlas Freight user behind alias_lanternmoth connects Glass Harbor with the Northbridge chain?", + "answer": "user_kian", + "supporting_edges": [ + { + "src": "alias_lanternmoth", + "rel": "alias_of", + "dst": "user_kian", + "confidence": 1.0 + }, + { + "src": "alias_lanternmoth", + "rel": "authored_post", + "dst": "post_quay_ledgers", + "confidence": 1.0 + }, + { + "src": "post_quay_ledgers", + "rel": "references", + "dst": "loc_east_quay", + "confidence": 1.0 + }, + { + "src": "post_quay_ledgers", + "rel": "references", + "dst": "event_glass_harbor", + "confidence": 1.0 + }, + { + "src": "user_kian", + "rel": "authored_thread", + "dst": "thr_quiet_manifest", + "confidence": 1.0 + }, + { + "src": "thr_quiet_manifest", + "rel": "discusses", + "dst": "event_glass_harbor", + "confidence": 1.0 + }, + { + "src": "thr_quiet_manifest", + "rel": "references", + "dst": "org_atlas_freight", + "confidence": 1.0 + }, + { + "src": "user_kian", + "rel": "works_at", + "dst": "org_atlas_freight", + "confidence": 1.0 + }, + { + "src": "user_omar", + "rel": "works_at", + "dst": "org_atlas_freight", + "confidence": 1.0 + }, + { + "src": "user_bharat", + "rel": "works_at", + "dst": "org_northbridge_logistics", + "confidence": 1.0 + }, + { + "src": "user_hiro", + "rel": "works_at", + "dst": "org_northbridge_logistics", + "confidence": 1.0 + }, + { + "src": "user_kian", + "rel": "located_in", + "dst": "loc_east_quay", + "confidence": 1.0 + }, + { + "src": "user_omar", + "rel": "located_in", + "dst": "loc_east_quay", + "confidence": 1.0 + }, + { + "src": "user_bharat", + "rel": "located_in", + "dst": "loc_dockyard17", + "confidence": 1.0 + }, + { + "src": "user_hiro", + "rel": "located_in", + "dst": "loc_dockyard17", + "confidence": 1.0 + }, + { + "src": "org_atlas_freight", + "rel": "operates_in", + "dst": "loc_east_quay", + "confidence": 1.0 + }, + { + "src": "org_northbridge_logistics", + "rel": "operates_in", + "dst": "loc_dockyard17", + "confidence": 1.0 + }, + { + "src": "user_kian", + "rel": "connected_to", + "dst": "user_omar", + "confidence": 0.93 + }, + { + "src": "user_omar", + "rel": "connected_to", + "dst": "user_mika", + "confidence": 0.9 + }, + { + "src": "user_kian", + "rel": "connected_to", + "dst": "user_bharat", + "confidence": 0.8 + }, + { + "src": "user_bharat", + "rel": "connected_to", + "dst": "user_hiro", + "confidence": 0.95 + }, + { + "src": "user_kian", + "rel": "collaborates_on", + "dst": "event_glass_harbor", + "confidence": 0.9 + }, + { + "src": "user_omar", + "rel": "collaborates_on", + "dst": "event_glass_harbor", + "confidence": 0.9 + }, + { + "src": "user_priya", + "rel": "monitors", + "dst": "event_glass_harbor", + "confidence": 0.9 + }, + { + "src": "user_bharat", + "rel": "collaborates_on", + "dst": "event_project_lantern", + "confidence": 0.9 + }, + { + "src": "user_hiro", + "rel": "collaborates_on", + "dst": "event_project_lantern", + "confidence": 0.9 + }, + { + "src": "alias_docksparrow", + "rel": "authored_post", + "dst": "post_shift_roster", + "confidence": 1.0 + }, + { + "src": "post_shift_roster", + "rel": "references", + "dst": "loc_dockyard17", + "confidence": 1.0 + }, + { + "src": "post_shift_roster", + "rel": "references", + "dst": "org_northbridge_logistics", + "confidence": 1.0 + }, + { + "src": "alias_orchidfox", + "rel": "authored_post", + "dst": "post_midnight_manifest", + "confidence": 1.0 + }, + { + "src": "post_midnight_manifest", + "rel": "references", + "dst": "loc_dockyard17", + "confidence": 1.0 + }, + { + "src": "user_diya", + "rel": "authored_thread", + "dst": "thr_supply_leak", + "confidence": 1.0 + }, + { + "src": "thr_supply_leak", + "rel": "references", + "dst": "org_northbridge_logistics", + "confidence": 1.0 + }, + { + "src": "org_atlas_freight", + "rel": "connected_to", + "dst": "org_northbridge_logistics", + "confidence": 0.77 + }, + { + "src": "alias_ironwhisper", + "rel": "alias_of", + "dst": "user_omar", + "confidence": 1.0 + }, + { + "src": "alias_steelquill", + "rel": "alias_of", + "dst": "user_bharat", + "confidence": 1.0 + }, + { + "src": "alias_docksparrow", + "rel": "alias_of", + "dst": "user_hiro", + "confidence": 1.0 + }, + { + "src": "alias_orchidfox", + "rel": "alias_of", + "dst": "user_ivy", + "confidence": 1.0 + }, + { + "src": "alias_monsoonbyte", + "rel": "alias_of", + "dst": "user_diya", + "confidence": 1.0 + }, + { + "src": "alias_nightrelay", + "rel": "alias_of", + "dst": "user_faris", + "confidence": 1.0 + }, + { + "src": "alias_mapleghost", + "rel": "alias_of", + "dst": "user_elin", + "confidence": 1.0 + }, + { + "src": "alias_quartzlotus", + "rel": "alias_of", + "dst": "user_cyrus", + "confidence": 1.0 + }, + { + "src": "alias_emberglass", + "rel": "alias_of", + "dst": "user_nora", + "confidence": 1.0 + }, + { + "src": "alias_basinraven", + "rel": "alias_of", + "dst": "user_mika", + "confidence": 1.0 + }, + { + "src": "alias_tideshard", + "rel": "alias_of", + "dst": "user_soren", + "confidence": 1.0 + }, + { + "src": "alias_hollowsignal", + "rel": "alias_of", + "dst": "user_priya", + "confidence": 1.0 + }, + { + "src": "alias_cinderveil", + "rel": "alias_of", + "dst": "user_rhea", + "confidence": 1.0 + }, + { + "src": "alias_sablekeel", + "rel": "alias_of", + "dst": "user_tara", + "confidence": 1.0 + }, + { + "src": "alias_frostledger", + "rel": "alias_of", + "dst": "user_leena", + "confidence": 1.0 + }, + { + "src": "user_aria", + "rel": "works_at", + "dst": "org_helios_labs", + "confidence": 1.0 + }, + { + "src": "user_aria", + "rel": "located_in", + "dst": "loc_sector9", + "confidence": 1.0 + }, + { + "src": "user_cyrus", + "rel": "works_at", + "dst": "org_apex_dynamics", + "confidence": 1.0 + }, + { + "src": "user_cyrus", + "rel": "located_in", + "dst": "loc_old_town", + "confidence": 1.0 + }, + { + "src": "user_diya", + "rel": "works_at", + "dst": "org_blueharbor_media", + "confidence": 1.0 + }, + { + "src": "user_diya", + "rel": "located_in", + "dst": "loc_old_town", + "confidence": 1.0 + }, + { + "src": "user_elin", + "rel": "works_at", + "dst": "org_helios_labs", + "confidence": 1.0 + }, + { + "src": "user_elin", + "rel": "located_in", + "dst": "loc_sector9", + "confidence": 1.0 + }, + { + "src": "user_faris", + "rel": "works_at", + "dst": "org_tidewatch_ops", + "confidence": 1.0 + } + ], + "metadata": { + "difficulty": "high", + "difficulty_level": 3, + "question_id": "high_07", + "support_nodes": 50 + } + }, + { + "task_type": "fixed_trace", + "question": "Black Kite, Amber Veil, and Iron Wharf overlap: which user behind alias_quartzlotus is the Apex-side collaborator?", + "answer": "user_cyrus", + "supporting_edges": [ + { + "src": "alias_quartzlotus", + "rel": "alias_of", + "dst": "user_cyrus", + "confidence": 1.0 + }, + { + "src": "user_cyrus", + "rel": "works_at", + "dst": "org_apex_dynamics", + "confidence": 1.0 + }, + { + "src": "user_cyrus", + "rel": "located_in", + "dst": "loc_old_town", + "confidence": 1.0 + }, + { + "src": "org_apex_dynamics", + "rel": "operates_in", + "dst": "loc_old_town", + "confidence": 1.0 + }, + { + "src": "user_cyrus", + "rel": "collaborates_on", + "dst": "event_black_kite", + "confidence": 0.9 + }, + { + "src": "user_jules", + "rel": "authored_thread", + "dst": "thr_port_audit", + "confidence": 1.0 + }, + { + "src": "thr_port_audit", + "rel": "discusses", + "dst": "event_black_kite", + "confidence": 1.0 + }, + { + "src": "alias_monsoonbyte", + "rel": "authored_post", + "dst": "post_drone_parts", + "confidence": 1.0 + }, + { + "src": "post_drone_parts", + "rel": "references", + "dst": "event_black_kite", + "confidence": 1.0 + }, + { + "src": "user_nora", + "rel": "investigates", + "dst": "event_amber_veil", + "confidence": 0.9 + }, + { + "src": "user_quinn", + "rel": "authored_thread", + "dst": "thr_basin_shift", + "confidence": 1.0 + }, + { + "src": "thr_basin_shift", + "rel": "discusses", + "dst": "event_amber_veil", + "confidence": 1.0 + }, + { + "src": "user_mika", + "rel": "collaborates_on", + "dst": "event_iron_wharf", + "confidence": 0.9 + }, + { + "src": "user_mika", + "rel": "authored_thread", + "dst": "thr_customs_breach", + "confidence": 1.0 + }, + { + "src": "thr_customs_breach", + "rel": "discusses", + "dst": "event_iron_wharf", + "confidence": 1.0 + }, + { + "src": "user_ivy", + "rel": "works_at", + "dst": "org_kestrel_works", + "confidence": 1.0 + }, + { + "src": "user_nora", + "rel": "works_at", + "dst": "org_emberline_security", + "confidence": 1.0 + }, + { + "src": "user_mika", + "rel": "works_at", + "dst": "org_orion_customs", + "confidence": 1.0 + }, + { + "src": "user_quinn", + "rel": "works_at", + "dst": "org_orion_customs", + "confidence": 1.0 + }, + { + "src": "user_ivy", + "rel": "located_in", + "dst": "loc_rivergate", + "confidence": 1.0 + }, + { + "src": "user_nora", + "rel": "located_in", + "dst": "loc_foundry_row", + "confidence": 1.0 + }, + { + "src": "user_mika", + "rel": "located_in", + "dst": "loc_north_basin", + "confidence": 1.0 + }, + { + "src": "user_quinn", + "rel": "located_in", + "dst": "loc_north_basin", + "confidence": 1.0 + }, + { + "src": "org_kestrel_works", + "rel": "operates_in", + "dst": "loc_rivergate", + "confidence": 1.0 + }, + { + "src": "org_emberline_security", + "rel": "operates_in", + "dst": "loc_foundry_row", + "confidence": 1.0 + }, + { + "src": "org_orion_customs", + "rel": "operates_in", + "dst": "loc_north_basin", + "confidence": 1.0 + }, + { + "src": "user_cyrus", + "rel": "connected_to", + "dst": "user_gita", + "confidence": 0.83 + }, + { + "src": "user_ivy", + "rel": "connected_to", + "dst": "user_elin", + "confidence": 0.86 + }, + { + "src": "user_mika", + "rel": "connected_to", + "dst": "user_quinn", + "confidence": 0.89 + }, + { + "src": "user_quinn", + "rel": "connected_to", + "dst": "user_nora", + "confidence": 0.88 + }, + { + "src": "user_nora", + "rel": "connected_to", + "dst": "user_rhea", + "confidence": 0.87 + }, + { + "src": "event_black_kite", + "rel": "connected_to", + "dst": "event_amber_veil", + "confidence": 0.77 + }, + { + "src": "org_orion_customs", + "rel": "connected_to", + "dst": "org_emberline_security", + "confidence": 0.77 + }, + { + "src": "alias_orchidfox", + "rel": "alias_of", + "dst": "user_ivy", + "confidence": 1.0 + }, + { + "src": "alias_basinraven", + "rel": "alias_of", + "dst": "user_mika", + "confidence": 1.0 + }, + { + "src": "alias_emberglass", + "rel": "alias_of", + "dst": "user_nora", + "confidence": 1.0 + }, + { + "src": "alias_steelquill", + "rel": "alias_of", + "dst": "user_bharat", + "confidence": 1.0 + }, + { + "src": "alias_monsoonbyte", + "rel": "alias_of", + "dst": "user_diya", + "confidence": 1.0 + }, + { + "src": "alias_nightrelay", + "rel": "alias_of", + "dst": "user_faris", + "confidence": 1.0 + }, + { + "src": "alias_mapleghost", + "rel": "alias_of", + "dst": "user_elin", + "confidence": 1.0 + }, + { + "src": "alias_docksparrow", + "rel": "alias_of", + "dst": "user_hiro", + "confidence": 1.0 + }, + { + "src": "alias_tideshard", + "rel": "alias_of", + "dst": "user_soren", + "confidence": 1.0 + }, + { + "src": "alias_hollowsignal", + "rel": "alias_of", + "dst": "user_priya", + "confidence": 1.0 + }, + { + "src": "alias_ironwhisper", + "rel": "alias_of", + "dst": "user_omar", + "confidence": 1.0 + }, + { + "src": "alias_cinderveil", + "rel": "alias_of", + "dst": "user_rhea", + "confidence": 1.0 + }, + { + "src": "alias_sablekeel", + "rel": "alias_of", + "dst": "user_tara", + "confidence": 1.0 + }, + { + "src": "alias_lanternmoth", + "rel": "alias_of", + "dst": "user_kian", + "confidence": 1.0 + }, + { + "src": "alias_frostledger", + "rel": "alias_of", + "dst": "user_leena", + "confidence": 1.0 + } + ], + "metadata": { + "difficulty": "high", + "difficulty_level": 3, + "question_id": "high_08", + "support_nodes": 50 + } + }, + { + "task_type": "fixed_trace", + "question": "Ghost Signal and Ember Tide relay: which user behind alias_sablekeel is the Harborlight reporting endpoint?", + "answer": "user_tara", + "supporting_edges": [ + { + "src": "alias_sablekeel", + "rel": "alias_of", + "dst": "user_tara", + "confidence": 1.0 + }, + { + "src": "alias_sablekeel", + "rel": "authored_post", + "dst": "post_uplink_note", + "confidence": 1.0 + }, + { + "src": "post_uplink_note", + "rel": "references", + "dst": "loc_uplink_yard", + "confidence": 1.0 + }, + { + "src": "post_uplink_note", + "rel": "references", + "dst": "org_harborlight_transit", + "confidence": 1.0 + }, + { + "src": "alias_tideshard", + "rel": "alias_of", + "dst": "user_soren", + "confidence": 1.0 + }, + { + "src": "alias_tideshard", + "rel": "authored_post", + "dst": "post_hull_signal", + "confidence": 1.0 + }, + { + "src": "post_hull_signal", + "rel": "references", + "dst": "loc_uplink_yard", + "confidence": 1.0 + }, + { + "src": "post_hull_signal", + "rel": "references", + "dst": "event_ghost_signal", + "confidence": 1.0 + }, + { + "src": "user_tara", + "rel": "authored_thread", + "dst": "thr_ghost_signal_net", + "confidence": 1.0 + }, + { + "src": "thr_ghost_signal_net", + "rel": "discusses", + "dst": "event_ghost_signal", + "confidence": 1.0 + }, + { + "src": "thr_ghost_signal_net", + "rel": "references", + "dst": "loc_uplink_yard", + "confidence": 1.0 + }, + { + "src": "user_soren", + "rel": "authored_thread", + "dst": "thr_uplink_route", + "confidence": 1.0 + }, + { + "src": "thr_uplink_route", + "rel": "discusses", + "dst": "event_ghost_signal", + "confidence": 1.0 + }, + { + "src": "thr_uplink_route", + "rel": "references", + "dst": "org_harborlight_transit", + "confidence": 1.0 + }, + { + "src": "user_rhea", + "rel": "authored_thread", + "dst": "thr_ember_tide_watch", + "confidence": 1.0 + }, + { + "src": "thr_ember_tide_watch", + "rel": "discusses", + "dst": "event_ember_tide", + "confidence": 1.0 + }, + { + "src": "thr_ember_tide_watch", + "rel": "references", + "dst": "loc_foundry_row", + "confidence": 1.0 + }, + { + "src": "user_tara", + "rel": "works_at", + "dst": "org_harborlight_transit", + "confidence": 1.0 + }, + { + "src": "user_soren", + "rel": "works_at", + "dst": "org_harborlight_transit", + "confidence": 1.0 + }, + { + "src": "user_rhea", + "rel": "works_at", + "dst": "org_emberline_security", + "confidence": 1.0 + }, + { + "src": "user_nora", + "rel": "works_at", + "dst": "org_emberline_security", + "confidence": 1.0 + }, + { + "src": "user_tara", + "rel": "located_in", + "dst": "loc_uplink_yard", + "confidence": 1.0 + }, + { + "src": "user_soren", + "rel": "located_in", + "dst": "loc_uplink_yard", + "confidence": 1.0 + }, + { + "src": "user_rhea", + "rel": "located_in", + "dst": "loc_foundry_row", + "confidence": 1.0 + }, + { + "src": "user_nora", + "rel": "located_in", + "dst": "loc_foundry_row", + "confidence": 1.0 + }, + { + "src": "org_harborlight_transit", + "rel": "operates_in", + "dst": "loc_uplink_yard", + "confidence": 1.0 + }, + { + "src": "org_emberline_security", + "rel": "operates_in", + "dst": "loc_foundry_row", + "confidence": 1.0 + }, + { + "src": "user_rhea", + "rel": "connected_to", + "dst": "user_soren", + "confidence": 0.86 + }, + { + "src": "user_soren", + "rel": "connected_to", + "dst": "user_tara", + "confidence": 0.86 + }, + { + "src": "user_nora", + "rel": "connected_to", + "dst": "user_rhea", + "confidence": 0.87 + }, + { + "src": "user_rhea", + "rel": "collaborates_on", + "dst": "event_ember_tide", + "confidence": 0.9 + }, + { + "src": "user_soren", + "rel": "collaborates_on", + "dst": "event_ghost_signal", + "confidence": 0.9 + }, + { + "src": "user_tara", + "rel": "reports_on", + "dst": "event_ghost_signal", + "confidence": 0.9 + }, + { + "src": "event_ember_tide", + "rel": "connected_to", + "dst": "event_ghost_signal", + "confidence": 0.77 + }, + { + "src": "org_harborlight_transit", + "rel": "connected_to", + "dst": "org_tidewatch_ops", + "confidence": 0.77 + }, + { + "src": "alias_cinderveil", + "rel": "alias_of", + "dst": "user_rhea", + "confidence": 1.0 + }, + { + "src": "alias_emberglass", + "rel": "alias_of", + "dst": "user_nora", + "confidence": 1.0 + }, + { + "src": "alias_orchidfox", + "rel": "alias_of", + "dst": "user_ivy", + "confidence": 1.0 + }, + { + "src": "alias_steelquill", + "rel": "alias_of", + "dst": "user_bharat", + "confidence": 1.0 + }, + { + "src": "alias_monsoonbyte", + "rel": "alias_of", + "dst": "user_diya", + "confidence": 1.0 + }, + { + "src": "alias_nightrelay", + "rel": "alias_of", + "dst": "user_faris", + "confidence": 1.0 + }, + { + "src": "alias_mapleghost", + "rel": "alias_of", + "dst": "user_elin", + "confidence": 1.0 + }, + { + "src": "alias_docksparrow", + "rel": "alias_of", + "dst": "user_hiro", + "confidence": 1.0 + }, + { + "src": "alias_quartzlotus", + "rel": "alias_of", + "dst": "user_cyrus", + "confidence": 1.0 + }, + { + "src": "alias_basinraven", + "rel": "alias_of", + "dst": "user_mika", + "confidence": 1.0 + }, + { + "src": "alias_hollowsignal", + "rel": "alias_of", + "dst": "user_priya", + "confidence": 1.0 + }, + { + "src": "alias_ironwhisper", + "rel": "alias_of", + "dst": "user_omar", + "confidence": 1.0 + }, + { + "src": "alias_lanternmoth", + "rel": "alias_of", + "dst": "user_kian", + "confidence": 1.0 + }, + { + "src": "alias_frostledger", + "rel": "alias_of", + "dst": "user_leena", + "confidence": 1.0 + }, + { + "src": "user_aria", + "rel": "works_at", + "dst": "org_helios_labs", + "confidence": 1.0 + }, + { + "src": "user_aria", + "rel": "located_in", + "dst": "loc_sector9", + "confidence": 1.0 + }, + { + "src": "user_bharat", + "rel": "works_at", + "dst": "org_northbridge_logistics", + "confidence": 1.0 + }, + { + "src": "user_bharat", + "rel": "located_in", + "dst": "loc_dockyard17", + "confidence": 1.0 + }, + { + "src": "user_cyrus", + "rel": "works_at", + "dst": "org_apex_dynamics", + "confidence": 1.0 + } + ], + "metadata": { + "difficulty": "high", + "difficulty_level": 3, + "question_id": "high_09", + "support_nodes": 50 + } + }, + { + "task_type": "fixed_trace", + "question": "End-to-end benchmark sweep: across Lantern, Black Kite, Glass Harbor, Iron Wharf, Ember Tide, and Ghost Signal, which user behind alias_hollowsignal anchors the Sunmesh monitoring side?", + "answer": "user_priya", + "supporting_edges": [ + { + "src": "alias_orchidfox", + "rel": "alias_of", + "dst": "user_ivy", + "confidence": 1.0 + }, + { + "src": "alias_steelquill", + "rel": "alias_of", + "dst": "user_bharat", + "confidence": 1.0 + }, + { + "src": "alias_monsoonbyte", + "rel": "alias_of", + "dst": "user_diya", + "confidence": 1.0 + }, + { + "src": "alias_nightrelay", + "rel": "alias_of", + "dst": "user_faris", + "confidence": 1.0 + }, + { + "src": "alias_mapleghost", + "rel": "alias_of", + "dst": "user_elin", + "confidence": 1.0 + }, + { + "src": "alias_docksparrow", + "rel": "alias_of", + "dst": "user_hiro", + "confidence": 1.0 + }, + { + "src": "alias_quartzlotus", + "rel": "alias_of", + "dst": "user_cyrus", + "confidence": 1.0 + }, + { + "src": "alias_emberglass", + "rel": "alias_of", + "dst": "user_nora", + "confidence": 1.0 + }, + { + "src": "alias_basinraven", + "rel": "alias_of", + "dst": "user_mika", + "confidence": 1.0 + }, + { + "src": "alias_tideshard", + "rel": "alias_of", + "dst": "user_soren", + "confidence": 1.0 + }, + { + "src": "alias_hollowsignal", + "rel": "alias_of", + "dst": "user_priya", + "confidence": 1.0 + }, + { + "src": "alias_ironwhisper", + "rel": "alias_of", + "dst": "user_omar", + "confidence": 1.0 + }, + { + "src": "alias_cinderveil", + "rel": "alias_of", + "dst": "user_rhea", + "confidence": 1.0 + }, + { + "src": "alias_sablekeel", + "rel": "alias_of", + "dst": "user_tara", + "confidence": 1.0 + }, + { + "src": "alias_lanternmoth", + "rel": "alias_of", + "dst": "user_kian", + "confidence": 1.0 + }, + { + "src": "alias_frostledger", + "rel": "alias_of", + "dst": "user_leena", + "confidence": 1.0 + }, + { + "src": "user_aria", + "rel": "works_at", + "dst": "org_helios_labs", + "confidence": 1.0 + }, + { + "src": "user_aria", + "rel": "located_in", + "dst": "loc_sector9", + "confidence": 1.0 + }, + { + "src": "user_bharat", + "rel": "works_at", + "dst": "org_northbridge_logistics", + "confidence": 1.0 + }, + { + "src": "user_bharat", + "rel": "located_in", + "dst": "loc_dockyard17", + "confidence": 1.0 + }, + { + "src": "user_cyrus", + "rel": "works_at", + "dst": "org_apex_dynamics", + "confidence": 1.0 + }, + { + "src": "user_cyrus", + "rel": "located_in", + "dst": "loc_old_town", + "confidence": 1.0 + }, + { + "src": "user_diya", + "rel": "works_at", + "dst": "org_blueharbor_media", + "confidence": 1.0 + }, + { + "src": "user_diya", + "rel": "located_in", + "dst": "loc_old_town", + "confidence": 1.0 + }, + { + "src": "user_elin", + "rel": "works_at", + "dst": "org_helios_labs", + "confidence": 1.0 + }, + { + "src": "user_elin", + "rel": "located_in", + "dst": "loc_sector9", + "confidence": 1.0 + }, + { + "src": "user_faris", + "rel": "works_at", + "dst": "org_tidewatch_ops", + "confidence": 1.0 + }, + { + "src": "user_faris", + "rel": "located_in", + "dst": "loc_rivergate", + "confidence": 1.0 + }, + { + "src": "user_gita", + "rel": "works_at", + "dst": "org_apex_dynamics", + "confidence": 1.0 + }, + { + "src": "user_gita", + "rel": "located_in", + "dst": "loc_old_town", + "confidence": 1.0 + }, + { + "src": "user_hiro", + "rel": "works_at", + "dst": "org_northbridge_logistics", + "confidence": 1.0 + }, + { + "src": "user_hiro", + "rel": "located_in", + "dst": "loc_dockyard17", + "confidence": 1.0 + }, + { + "src": "user_ivy", + "rel": "works_at", + "dst": "org_kestrel_works", + "confidence": 1.0 + }, + { + "src": "user_ivy", + "rel": "located_in", + "dst": "loc_rivergate", + "confidence": 1.0 + }, + { + "src": "user_jules", + "rel": "works_at", + "dst": "org_blueharbor_media", + "confidence": 1.0 + }, + { + "src": "user_jules", + "rel": "located_in", + "dst": "loc_old_town", + "confidence": 1.0 + }, + { + "src": "user_kian", + "rel": "works_at", + "dst": "org_atlas_freight", + "confidence": 1.0 + }, + { + "src": "user_kian", + "rel": "located_in", + "dst": "loc_east_quay", + "confidence": 1.0 + }, + { + "src": "user_leena", + "rel": "works_at", + "dst": "org_sunmesh_analytics", + "confidence": 1.0 + }, + { + "src": "user_leena", + "rel": "located_in", + "dst": "loc_sector9", + "confidence": 1.0 + }, + { + "src": "user_mika", + "rel": "works_at", + "dst": "org_orion_customs", + "confidence": 1.0 + }, + { + "src": "user_mika", + "rel": "located_in", + "dst": "loc_north_basin", + "confidence": 1.0 + }, + { + "src": "user_nora", + "rel": "works_at", + "dst": "org_emberline_security", + "confidence": 1.0 + }, + { + "src": "user_nora", + "rel": "located_in", + "dst": "loc_foundry_row", + "confidence": 1.0 + }, + { + "src": "user_omar", + "rel": "works_at", + "dst": "org_atlas_freight", + "confidence": 1.0 + }, + { + "src": "user_omar", + "rel": "located_in", + "dst": "loc_east_quay", + "confidence": 1.0 + }, + { + "src": "user_priya", + "rel": "works_at", + "dst": "org_sunmesh_analytics", + "confidence": 1.0 + }, + { + "src": "user_priya", + "rel": "located_in", + "dst": "loc_sector9", + "confidence": 1.0 + }, + { + "src": "user_quinn", + "rel": "works_at", + "dst": "org_orion_customs", + "confidence": 1.0 + }, + { + "src": "user_quinn", + "rel": "located_in", + "dst": "loc_north_basin", + "confidence": 1.0 + }, + { + "src": "user_rhea", + "rel": "works_at", + "dst": "org_emberline_security", + "confidence": 1.0 + }, + { + "src": "user_rhea", + "rel": "located_in", + "dst": "loc_foundry_row", + "confidence": 1.0 + }, + { + "src": "user_soren", + "rel": "works_at", + "dst": "org_harborlight_transit", + "confidence": 1.0 + }, + { + "src": "user_soren", + "rel": "located_in", + "dst": "loc_uplink_yard", + "confidence": 1.0 + }, + { + "src": "user_tara", + "rel": "works_at", + "dst": "org_harborlight_transit", + "confidence": 1.0 + }, + { + "src": "user_tara", + "rel": "located_in", + "dst": "loc_uplink_yard", + "confidence": 1.0 + }, + { + "src": "org_helios_labs", + "rel": "operates_in", + "dst": "loc_sector9", + "confidence": 1.0 + }, + { + "src": "org_northbridge_logistics", + "rel": "operates_in", + "dst": "loc_dockyard17", + "confidence": 1.0 + } + ], + "metadata": { + "difficulty": "high", + "difficulty_level": 3, + "question_id": "high_10", + "support_nodes": 55 + } + } + ], + "llm_generate_remaining_graph": true, + "llm_generate_remaining_tasks": false, + "llm_generated_edge_budget": 48, + "llm_generated_task_budget": 0, + "llm_generation_parallel": true, + "llm_generation_workers": 4, + "llm_generation_retries": 3, + "allow_template_fallback_on_llm_failure": false + } +} \ No newline at end of file diff --git a/datasets/fixed_levels/shared_config_fixed_levels.json b/datasets/fixed_levels/shared_config_fixed_levels.json new file mode 100644 index 0000000000000000000000000000000000000000..30a0a18011c0e32abb36b272644fd3257119c246 --- /dev/null +++ b/datasets/fixed_levels/shared_config_fixed_levels.json @@ -0,0 +1,63 @@ +{ + "environment": { + "n_users": 24, + "alias_density": 0.2, + "noise_level": 0.12, + "red_herring_rate": 0.08, + "max_steps": 24, + "seed": 2026 + }, + "dataset": { + "mode": "canonical", + "metaqa_root": "metaQA", + "metaqa_kb_path": "", + "metaqa_variant": "vanilla", + "metaqa_hops": ["1-hop", "2-hop", "3-hop"], + "metaqa_splits": ["train", "dev", "test"] + }, + "swarm": { + "enabled": true, + "max_agents": 3, + "max_breadth": 2, + "max_width": 2, + "max_depth": 2, + "planner_rounds": 2, + "tools_per_agent": 1 + }, + "spawn_reward": { + "lambda_parallel": 0.15, + "lambda_finish": 0.2, + "anneal": 1.0, + "max_parallel_hint": 3 + }, + "seeding": { + "seeded_nodes": [], + "seeded_edges": [], + "seeded_questions": [], + "llm_generate_remaining_graph": true, + "llm_generate_remaining_tasks": false, + "llm_generated_edge_budget": 64, + "llm_generated_task_budget": 0, + "llm_generation_parallel": true, + "llm_generation_workers": 4, + "llm_generation_retries": 3, + "allow_template_fallback_on_llm_failure": false + }, + "llm": { + "provider": "ollama", + "model": "qwen3:2b", + "temperature": 0.05, + "max_tokens": 384, + "timeout_seconds": 240, + "ollama_base_url": "http://127.0.0.1:11434", + "openai_base_url": "https://api.openai.com/v1", + "openai_api_key_env": "OPENAI_API_KEY", + "openai_api_key": "" + }, + "runtime": { + "default_episodes": 30, + "leaderboard_path": "datasets/fixed_levels/leaderboard_fixed_levels.json", + "dashboard_path": "datasets/fixed_levels/dashboard_fixed_levels.html", + "sweep_dashboard_dir": "datasets/fixed_levels/sweep_dashboards" + } +} diff --git a/docs/adversarial_self_play.md b/docs/adversarial_self_play.md new file mode 100644 index 0000000000000000000000000000000000000000..fe3a1d2ce4b57f6fdf6b2de04fd6c2e7cff697a6 --- /dev/null +++ b/docs/adversarial_self_play.md @@ -0,0 +1,99 @@ +# Adversarial Self-Play Training (Kimi-Style + TRL) + +This repository now includes a code scaffold for alternating adversarial self-play with Hugging Face TRL. + +## Goal + +Train two policies in alternating rounds: + +- Generator policy: proposes hard OSINT tasks (question + answer + supporting edges). +- Answerer policy: solves tasks proposed by the generator. + +The loop is intended to move from static evaluation toward on-policy co-evolution. + +## Kimi-style Objective Mapping + +The implementation maps the requested Kimi-style ingredients onto TRL GRPO as follows: + +- Grouped rollouts: `num_generations` in each GRPO phase. +- Relative reward baseline: GRPO group-relative advantages. +- Clipped policy updates: `epsilon` clipping in GRPO objective. +- KL/reference regularization: `beta` in GRPOConfig. +- Token-level online RL behavior: GRPO online generation with reward functions. +- Toggle schedule: explicit alternating generator and answerer rounds. + +## Topology and Scheduling Options + +- `model_topology: "dual"`: train separate generator and answerer models. +- `model_topology: "shared"`: train one shared model for both roles. + - Use `shared_model_name_or_path` to set the common base checkpoint. +- `phase_schedule: "generator_answerer"`: default two-phase loop per round. +- `phase_schedule: "answerer_generator_answerer"`: solver-first curriculum: + 1. Train answerer on current adversarial pool. + 2. Freeze that answerer snapshot while training generator against it. + 3. Train answerer again on newly generated adversarial tasks. + +This directly supports the "train solver, freeze, attack, retrain solver" sequence. + +## Canonical Graph Mode + +- `canonical_graph_mode: "generate"` (default): generator can propose canonical graph updates in `swarm_v2`. +- `canonical_graph_mode: "fixed"`: canonical graph candidates are held fixed per prompt, so training focuses on question/answer behavior over stable graph structure. + +## Tuning Modes + +- `tuning_mode: "full"`: full-model GRPO fine-tuning. +- `tuning_mode: "lora"`: PEFT LoRA adapters for GRPO updates. + - Configure via `lora` block: `r`, `alpha`, `dropout`, `target_modules`, `bias`, `task_type`. + +## Reward Design + +### Generator (adversarial swarm) + +`GeneratorRewardFunction` combines weighted components: + +- Validity: checks parsable task fields and bounded support-edge size. +- Hardness: rewards questions the frozen answerer currently gets wrong. +- Diversity: penalizes near-duplicate questions via token-overlap similarity. +- Consistency: rewards edge/answer/question grounding against canonical graph context. + +Weights are configurable in `generator_reward_weights`. + +### Answerer (existing reward integration) + +`AnswererRewardFunction` wraps existing environment reward logic: + +- Reuses `compute_answer_reward` from `src/osint_env/env/reward.py`. +- Builds transient `TaskInstance` objects from training rows. +- Preserves difficulty-aware reward behavior (`easy` / `medium` / `hard`). + +## Entry Points + +- CLI command: `osint-env train-self-play` +- Main runner: `src/osint_env/training/self_play.py` +- Config loader: `src/osint_env/training/config.py` +- Reward functions: `src/osint_env/training/rewards.py` +- Example config: `config/self_play_training_example.json` + +## Dry Run Mode + +The example config sets `dry_run: true` by default. + +In dry run mode, the pipeline still: + +- Materializes generator/answerer datasets per round. +- Materializes optional `answerer_pre_dataset` when using solver-first schedule. +- Produces generated-task artifacts (fallback generator path). +- Writes a full run summary. + +But it skips expensive GRPO updates. + +## Compute Mode + +When compute is available: + +1. Install train dependencies: `python -m pip install -e ".[train]"` +2. Disable dry run (`--dry-run` off and/or `"dry_run": false` in config). +3. Run `osint-env train-self-play`. + +Outputs are written under `artifacts/self_play` unless overridden. diff --git a/docs/reward_design_notes.md b/docs/reward_design_notes.md new file mode 100644 index 0000000000000000000000000000000000000000..7041d248b1e22e864a0ca50dbfe3dc2fbc32f7f7 --- /dev/null +++ b/docs/reward_design_notes.md @@ -0,0 +1,94 @@ +# Reward Design Notes + +This environment uses a composite reward that adapts ideas from: + +- AutoGraph-R1 (arXiv:2510.15339) +- UniRel (arXiv:2512.17043) +- DeepPath (EMNLP 2017, D17-1060) +- Multi-Hop KG Reasoning with Reward Shaping (EMNLP 2018, D18-1362) +- Kimi K2.5 (arXiv:2602.02276) for PARL-style swarm auxiliary shaping + +Additional related context consulted: + +- MINERVA (arXiv:1711.05851) for query-conditioned walk-style reasoning over KG paths. + +## Components in this Branch + +The implementation follows a staged reward design: + +1. edge-level rewards during graph construction (`ADD_EDGE`) +2. answer-level rewards for retrieval usefulness and final task utility (`ANSWER`) +3. evaluation-level composite leaderboard score for benchmark ranking + +### 1) Edge addition reward + +For each `ADD_EDGE`, the reward combines: + +- Global accuracy term (DeepPath): + - $r_{global} = +1$ if a candidate edge is correct, else $-1$ (scaled in code for stability). +- Soft shaping term (D18 reward shaping): + - $R = R_b + (1 - R_b) f(s, r, o)$, where $f$ is a soft fact plausibility score. + - In code, $f$ is approximated by relation/type priors plus small domain priors. +- Efficiency term (DeepPath): + - $r_{efficiency} \propto 1 / \text{step\_count}$. +- Diversity term (DeepPath): + - novelty from cosine dissimilarity of edge signatures; repeated patterns are down-weighted. +- Relation/entity informativeness (UniRel): + - relation rarity via normalized IDF of relation labels, + - entity informativeness via inverse hub-penalty. +- Connectivity gain term: + - rewards bridge edges that connect previously disconnected graph regions. + +### 2) Final answer reward + +For `ANSWER`, the reward combines: + +- format validity, +- answer correctness, +- knowledge-carrying utility (AutoGraph-R1 style): + - $R_C(q, y, G) = \mathbb{{I}}[\text{{deducible}}(q, y \mid G)]$. +- knowledge-indexing utility (AutoGraph-R1 style): + - $R_I(q, D_{{gold}}, G) = |Top\text{{-}}k(G,q) \cap D_{{gold}}| / |D_{{gold}}|$, + - approximated in this environment with evidence recall over tool outputs. +- connectivity (UniRel style): + - discrete connectivity reward over extracted seed entities, normalized for stable mixing. +- graph F1 against supporting edges, +- compactness penalty for unnecessary extra edges, +- efficiency bonus, +- relation/entity informativeness for the constructed subgraph, +- repetition penalty to discourage redundant relation generation patterns. + +UniRel-style aggregate view represented in this branch: + +$$ +R(a) \approx R_{{fmt}} + R_{{con}} + w_1 R_{{ent}} + w_2 R_{{rel}} + \text{{task utility terms}} +$$ + +with task utility terms coming from AutoGraph-inspired $R_C$ and $R_I$ components. + +## Telemetry + +Per-step component rewards are aggregated into `info["reward_components"]`, enabling: + +- richer benchmark summaries, +- leaderboard ranking by composite utility, +- visual diagnostics in dashboard exports. + +Evaluation also computes derived retrieval and structural utility signals used in leaderboard ranking. + +## Future Multi-Agent Notes + +This branch now includes a low-width swarm baseline orchestrator that adds PARL-style auxiliary shaping on top of the core edge and answer rewards. + +The helper implementation is in: + +- `src/osint_env/env/spawn_reward_hooks.py` + +It follows the Kimi K2.5 style decomposition: + +- $r_{{PARL}}(x,y) = r_{{perf}}(x,y) + \lambda_1 r_{{parallel}} + \lambda_2 r_{{finish}}$, +- optional critical-steps shaping for latency-sensitive training, +- optional annealing of $\lambda_1, \lambda_2$ toward zero, +- optional breadth/depth shaping hooks for future branch integration. + +The expanded project-level walkthrough is in `README.md` under "Reward Design (Integrated Notes)". diff --git a/inference.py b/inference.py new file mode 100644 index 0000000000000000000000000000000000000000..9e34370ac9101cfed4bc02df8fe8192a1ba78d22 --- /dev/null +++ b/inference.py @@ -0,0 +1,540 @@ +from __future__ import annotations + +import json +import os +from pathlib import Path +from typing import Any + +from osint_env.agents.single_agent import SingleAgentRunner +from osint_env.agents.swarm_agent import SwarmAgentRunner +from osint_env.config import clone_environment_config, load_seeding_config, load_shared_config +from osint_env.domain.models import EnvironmentConfig +from osint_env.env.environment import OSINTEnvironment +from osint_env.env.reward import compute_graph_f1 +from osint_env.eval.leaderboard import append_leaderboard_record, load_leaderboard +from osint_env.eval.metrics import EvalMetrics +from osint_env.llm import build_llm_client +from osint_env.viz import export_dashboard + + +CONFIG_PATH = os.getenv("CONFIG_PATH", "datasets/fixed_levels/shared_config_fixed_levels.json") +SEED_FILE = os.getenv("SEED_FILE", "datasets/fixed_levels/seed_fixed_levels.json") +AGENT_MODE = os.getenv("AGENT_MODE", "swarm") +LLM_PROVIDER = os.getenv("LLM_PROVIDER", "openai") +MODEL_NAME = os.getenv("MODEL_NAME", "gpt-5.4") +OLLAMA_BASE_URL = os.getenv("OLLAMA_BASE_URL", "") +OPENAI_BASE_URL = os.getenv("OPENAI_BASE_URL", "") +OPENAI_API_KEY = os.getenv("OPENAI_API_KEY", "") +OPENAI_API_KEY_ENV = os.getenv("OPENAI_API_KEY_ENV", "OPENAI_API_KEY") +API_BASE_URL = os.getenv("API_BASE_URL", "https://api.openai.com/v1") +API_KEY = os.getenv("API_KEY", "") +HF_SPACE_URL = os.getenv("HF_SPACE_URL", "") +HF_TOKEN = os.getenv("HF_TOKEN","") +LOCAL_IMAGE_NAME = os.getenv("LOCAL_IMAGE_NAME", "") +LLM_TIMEOUT_SECONDS = int(os.getenv("LLM_TIMEOUT_SECONDS", "0")) +EPISODES = int(os.getenv("EPISODES", "1")) +SUCCESS_SCORE_THRESHOLD = float(os.getenv("SUCCESS_SCORE_THRESHOLD", "0.67")) +TASK_INDICES_RAW = os.getenv("TASK_INDICES", "") +DATASET_MODE = os.getenv("DATASET_MODE", "") +METAQA_ROOT = os.getenv("METAQA_ROOT", "") +METAQA_KB_PATH = os.getenv("METAQA_KB_PATH", "") +METAQA_VARIANT = os.getenv("METAQA_VARIANT", "") +METAQA_HOPS_RAW = os.getenv("METAQA_HOPS", "") +METAQA_SPLITS_RAW = os.getenv("METAQA_SPLITS", "") + +WRITE_BENCHMARK_ARTIFACTS = os.getenv("WRITE_BENCHMARK_ARTIFACTS", "1").strip().lower() in { + "1", + "true", + "yes", + "y", + "on", +} +LEADERBOARD_PATH = os.getenv("LEADERBOARD_PATH", "datasets/fixed_levels/leaderboard_fixed_levels.json") +DASHBOARD_PATH = os.getenv("DASHBOARD_PATH", "datasets/fixed_levels/dashboard_fixed_levels.html") +RUN_NAME = os.getenv("RUN_NAME", "fixed_levels_qwen_swarm") + +BENCHMARK = "osint-openenv" +TASK_NAME = "fixed_levels_easy_mid_hard" + + +def _parse_task_indices(raw: str) -> list[int]: + out: list[int] = [] + for token in str(raw or "").split(","): + stripped = token.strip() + if not stripped: + continue + try: + out.append(int(stripped)) + except ValueError: + continue + return out + + +def _parse_csv_tokens(raw: str) -> list[str]: + return [token.strip() for token in str(raw or "").split(",") if token.strip()] + + +def _normalize_ollama_base_url(url: str) -> str: + normalized = str(url or "").strip().rstrip("/") + if normalized.endswith("/v1"): + normalized = normalized[:-3].rstrip("/") + return normalized or "http://127.0.0.1:11434" + + +def _normalize_openai_base_url(url: str) -> str: + normalized = str(url or "").strip().rstrip("/") + if not normalized: + return "" + if normalized.endswith("/v1"): + return normalized + return f"{normalized}/v1" + + +TASK_INDICES = _parse_task_indices(TASK_INDICES_RAW) + + +def log_start(task: str, env: str, model: str) -> None: + print(f"[START] task={task} env={env} model={model}", flush=True) + + +def log_step(step: int, action: str, reward: float, done: bool, error: str | None) -> None: + error_text = "null" if error is None else str(error) + print( + f"[STEP] step={step} action={action} reward={reward:.2f} done={str(bool(done)).lower()} error={error_text}", + flush=True, + ) + + +def log_end(task: str, success: bool, steps: int, score: float, rewards: list[float]) -> None: + rewards_text = ",".join(f"{value:.2f}" for value in rewards) + print( + f"[END] success={str(bool(success)).lower()} steps={steps} score={score:.2f} rewards={rewards_text}", + flush=True, + ) + + +def _looks_like_placeholder_api_key(value: str) -> bool: + token = str(value or "").strip().lower() + if not token: + return True + placeholder_markers = [ + "your_openai_api_key", + "your-key", + "your_key", + "your real", + "real-openai-key", + "replace-me", + "changeme", + "example", + "", + ] + if token.startswith("your_") or token.startswith("sk-your-"): + return True + return any(marker in token for marker in placeholder_markers) + + +def _format_action(action: dict[str, Any]) -> str: + action_type = str(action.get("action_type", "")).upper() + payload = dict(action.get("payload", {})) + + if action_type == "ANSWER": + return f"answer({str(payload.get('answer', 'unknown')).strip()})" + + if action_type == "ADD_EDGE": + try: + conf = float(payload.get("confidence", 1.0)) + except (TypeError, ValueError): + conf = 1.0 + return ( + "add_edge(" + f"{payload.get('src', '')}," + f"{payload.get('rel', '')}," + f"{payload.get('dst', '')}," + f"{conf:.2f}" + ")" + ) + + tool_name = str(payload.get("tool_name", "tool")).strip() or "tool" + args = payload.get("args", {}) + if not isinstance(args, dict) or not args: + return f"{tool_name}()" + args_text = ",".join(f"{key}={value}" for key, value in sorted(args.items())) + return f"{tool_name}({args_text})" + + +def _assistant_tool_call_id(message: dict[str, Any]) -> str | None: + tool_calls = list(message.get("tool_calls", [])) + if not tool_calls: + return None + tool_call_id = tool_calls[0].get("id") + return str(tool_call_id) if tool_call_id else None + + +def _tool_result_message(assistant_message: dict[str, Any], result: dict[str, Any]) -> dict[str, Any] | None: + tool_call_id = _assistant_tool_call_id(assistant_message) + if not tool_call_id: + return None + return { + "role": "tool", + "tool_call_id": tool_call_id, + "content": json.dumps(result, sort_keys=True), + } + + +def _resolve_environment_config() -> EnvironmentConfig: + shared = load_shared_config(CONFIG_PATH) + env_cfg = clone_environment_config(shared.environment) + + if SEED_FILE and Path(SEED_FILE).exists(): + env_cfg.seeding = load_seeding_config(SEED_FILE) + + mode = AGENT_MODE.strip().lower() + if mode == "single": + env_cfg.swarm.enabled = False + elif mode == "swarm": + env_cfg.swarm.enabled = True + + # Inference submissions must route all calls through OpenAI-compatible client config. + env_cfg.llm.provider = "openai" + env_cfg.llm.model = MODEL_NAME.strip() + + if LLM_TIMEOUT_SECONDS > 0: + env_cfg.llm.timeout_seconds = int(LLM_TIMEOUT_SECONDS) + + # Evaluation harnesses inject API_BASE_URL/HF_TOKEN for proxy-enforced requests. + resolved_openai_base = API_BASE_URL.strip() or OPENAI_BASE_URL.strip() or HF_SPACE_URL.strip() + if resolved_openai_base: + env_cfg.llm.openai_base_url = _normalize_openai_base_url(resolved_openai_base) + + if HF_TOKEN.strip(): + env_cfg.llm.openai_api_key = HF_TOKEN.strip() + elif API_KEY.strip(): + env_cfg.llm.openai_api_key = API_KEY.strip() + elif OPENAI_API_KEY.strip(): + env_cfg.llm.openai_api_key = OPENAI_API_KEY.strip() + + if OPENAI_API_KEY_ENV.strip(): + env_cfg.llm.openai_api_key_env = OPENAI_API_KEY_ENV.strip() + + dataset_mode = DATASET_MODE.strip().lower() + if dataset_mode in {"canonical", "metaqa"}: + env_cfg.dataset_mode = dataset_mode + + if METAQA_ROOT.strip(): + env_cfg.metaqa_root = METAQA_ROOT.strip() + if METAQA_KB_PATH.strip(): + env_cfg.metaqa_kb_path = METAQA_KB_PATH.strip() + + metaqa_variant = METAQA_VARIANT.strip().lower() + if metaqa_variant in {"vanilla", "ntm"}: + env_cfg.metaqa_variant = metaqa_variant + + metaqa_hops = _parse_csv_tokens(METAQA_HOPS_RAW) + if metaqa_hops: + env_cfg.metaqa_hops = metaqa_hops + + metaqa_splits = _parse_csv_tokens(METAQA_SPLITS_RAW) + if metaqa_splits: + env_cfg.metaqa_splits = metaqa_splits + + return env_cfg + + +def _runner_for(env: OSINTEnvironment, llm: Any) -> SingleAgentRunner | SwarmAgentRunner: + if env.config.swarm.enabled: + return SwarmAgentRunner(env=env, llm=llm) + return SingleAgentRunner(env=env, llm=llm) + + +def _normalize_difficulty(value: str) -> str: + token = str(value or "").strip().lower() + if token in {"easy", "e"}: + return "easy" + if token in {"mid", "medium", "m"}: + return "medium" + if token in {"high", "hard", "h"}: + return "hard" + return "hard" + + +def _task_difficulty(env: OSINTEnvironment, task_index: int) -> str: + idx = int(task_index) % max(1, len(env.tasks)) + task = env.tasks[idx] + if isinstance(task.metadata, dict) and "difficulty" in task.metadata: + return _normalize_difficulty(str(task.metadata.get("difficulty", ""))) + if idx < 10: + return "easy" + if idx < 20: + return "medium" + return "hard" + + +def _episode_row(env: OSINTEnvironment, info: dict[str, Any]) -> dict[str, Any]: + if env.state is None: + return { + "task_id": "unknown", + "task_type": "unknown", + "question": "", + "task_answer": str(info.get("task_answer", "")), + "agent_answer": str(info.get("agent_answer", "")), + "graph_f1": 0.0, + "reward": float(info.get("total_reward", 0.0) or 0.0), + "steps": int(info.get("step_count", 0) or 0), + "tool_calls": int(info.get("tool_calls", 0) or 0), + "success": int(info.get("agent_answer") == info.get("task_answer")), + "reward_components": dict(info.get("reward_components", {})), + "pred_edges": [], + "truth_edges": [], + } + + graph_f1 = compute_graph_f1(env.memory_graph.edges, env.state.task.supporting_edges) + return { + "task_id": env.state.task.task_id, + "task_type": env.state.task.task_type, + "question": env.state.task.question, + "task_answer": str(info.get("task_answer", "")), + "agent_answer": str(info.get("agent_answer", "")) if info.get("agent_answer") is not None else "", + "graph_f1": graph_f1, + "reward": float(info.get("total_reward", 0.0) or 0.0), + "steps": int(info.get("step_count", 0) or 0), + "tool_calls": int(info.get("tool_calls", 0) or 0), + "success": int(info.get("agent_answer") == info.get("task_answer")), + "reward_components": dict(info.get("reward_components", {})), + "spawn_count": int(info.get("spawn_count", 0) or 0), + "spawn_critical_steps": int(info.get("spawn_critical_steps", 0) or 0), + "pred_edges": [ + { + "src": edge.src, + "rel": edge.rel, + "dst": edge.dst, + "confidence": float(edge.confidence), + } + for edge in env.memory_graph.edges + ], + "truth_edges": [ + { + "src": edge.src, + "rel": edge.rel, + "dst": edge.dst, + "confidence": float(edge.confidence), + } + for edge in env.state.task.supporting_edges + ], + } + + +def _last_action_error(observation: Any, info: dict[str, Any]) -> str | None: + raw = info.get("last_action_error") if isinstance(info, dict) else None + if raw is not None: + return str(raw) + + tool_outputs = getattr(observation, "tool_outputs", None) + if isinstance(tool_outputs, list) and tool_outputs: + last = tool_outputs[-1] + if isinstance(last, dict): + output = last.get("output") + if isinstance(output, dict) and output.get("error") is not None: + return str(output.get("error")) + return None + + +def _install_step_logger(env: OSINTEnvironment) -> tuple[list[float], dict[str, int], Any]: + rewards: list[float] = [] + counters = {"steps": 0} + original_step = env.step + + def _logged_step(action: Any): + observation, reward, done, info = original_step(action) + counters["steps"] += 1 + reward_value = float(reward or 0.0) + rewards.append(reward_value) + action_type = getattr(action, "action_type", "") + action_type_value = str(getattr(action_type, "value", action_type)) + action_text = _format_action( + { + "action_type": action_type_value, + "payload": dict(getattr(action, "payload", {}) or {}), + } + ) + log_step( + step=counters["steps"], + action=action_text, + reward=reward_value, + done=bool(done), + error=_last_action_error(observation, info if isinstance(info, dict) else {}), + ) + return observation, reward, done, info + + env.step = _logged_step + return rewards, counters, original_step + + +def _validate_required_configuration() -> None: + missing: list[str] = [] + + api_base = API_BASE_URL.strip() + model_name = MODEL_NAME.strip() + hf_token = HF_TOKEN.strip() + api_key = API_KEY.strip() + openai_key = OPENAI_API_KEY.strip() + + if not api_base or api_base == "": + missing.append("API_BASE_URL") + if not model_name or model_name == "": + missing.append("MODEL_NAME") + if not (hf_token or api_key or openai_key): + missing.append("HF_TOKEN|API_KEY|OPENAI_API_KEY") + + # Required when using docker-image based env construction. + if os.getenv("REQUIRE_LOCAL_IMAGE_NAME", "0").strip().lower() in {"1", "true", "yes", "on"}: + if not LOCAL_IMAGE_NAME.strip(): + missing.append("LOCAL_IMAGE_NAME") + + if missing: + raise RuntimeError(f"Missing required environment variables: {', '.join(sorted(set(missing)))}") + + +def _task_targets(env: OSINTEnvironment, episodes: int, task_indices: list[int]) -> list[int | None]: + if task_indices: + task_count = max(1, len(env.tasks)) + return [index % task_count for index in task_indices] + return [None] * max(1, episodes) + + +def _run_with_runner( + env: OSINTEnvironment, + llm: Any, + episodes: int, + task_indices: list[int], +) -> tuple[dict[str, Any], list[dict[str, Any]], list[float], int]: + metrics = EvalMetrics() + episode_rows: list[dict[str, Any]] = [] + rewards, counters, original_step = _install_step_logger(env) + + single_runner = SingleAgentRunner(env=env, llm=llm) + swarm_runner = SwarmAgentRunner(env=env, llm=llm) if env.config.swarm.enabled else None + + try: + for task_index in _task_targets(env, episodes, task_indices): + task_count = max(1, len(env.tasks)) + selected_index = env._task_idx % task_count if task_index is None else int(task_index) % task_count + if task_index is not None: + # Keep compatibility with explicit task selection from the previous inference script. + env._task_idx = selected_index + + difficulty = _task_difficulty(env, selected_index) + if difficulty == "easy": + runner: SingleAgentRunner | SwarmAgentRunner = single_runner + elif swarm_runner is not None: + runner = swarm_runner + else: + runner = single_runner + + info = runner.run_episode() + if env.state is None: + continue + + graph_f1 = compute_graph_f1(env.memory_graph.edges, env.state.task.supporting_edges) + metrics.add(info, task_type=env.state.task.task_type, graph_f1=graph_f1) + episode_rows.append(_episode_row(env, info)) + finally: + env.step = original_step + + return metrics.summary(), episode_rows, rewards, int(counters["steps"]) + + +def _maybe_write_artifacts( + env: OSINTEnvironment, + summary: dict[str, Any], + episodes: int, + episode_rows: list[dict[str, Any]], +) -> tuple[dict[str, Any] | None, str | None]: + if not WRITE_BENCHMARK_ARTIFACTS: + return None, None + + record = append_leaderboard_record( + path=LEADERBOARD_PATH, + summary=summary, + episodes=episodes, + run_name=RUN_NAME or None, + config={ + "seed": env.config.seed, + "max_steps": env.config.max_steps, + "swarm_enabled": env.config.swarm.enabled, + "max_agents": env.config.swarm.max_agents, + "max_breadth": env.config.swarm.max_breadth, + "max_width": env.config.swarm.max_width, + "max_depth": env.config.swarm.max_depth, + "seeded_questions": len(env.config.seeding.seeded_questions), + "llm_provider": env.config.llm.provider, + "llm_model": env.config.llm.model, + }, + ) + + leaderboard = load_leaderboard(LEADERBOARD_PATH) + dashboard = export_dashboard( + env=env, + evaluation={"summary": summary, "episodes": episode_rows}, + leaderboard_records=leaderboard, + output_path=DASHBOARD_PATH, + ) + return record, dashboard + + +def main() -> None: + _validate_required_configuration() + env_cfg = _resolve_environment_config() + llm_client = build_llm_client(env_cfg.llm) + + episodes_given = "EPISODES" in os.environ and str(os.getenv("EPISODES", "")).strip() != "" + task_indices_given = bool(TASK_INDICES) + + if not episodes_given and not task_indices_given: + runs: list[tuple[str, list[int], int]] = [ + ("easy", list(range(0, 10)), 10), + ("mid", list(range(10, 20)), 10), + ("hard", list(range(20, 30)), 10), + ] + else: + selected_indices = TASK_INDICES if task_indices_given else [] + episodes = len(selected_indices) if selected_indices else max(1, EPISODES) + runs = [(TASK_NAME, selected_indices, episodes)] + + for task_name, run_indices, run_episodes in runs: + env: OSINTEnvironment | None = None + rewards: list[float] = [] + steps_taken = 0 + score = 0.0 + success = False + + env = OSINTEnvironment(env_cfg, llm=llm_client) + log_start(task=task_name, env=BENCHMARK, model=env_cfg.llm.model) + + try: + summary, episode_rows, rewards, steps_taken = _run_with_runner( + env=env, + llm=llm_client, + episodes=run_episodes, + task_indices=run_indices, + ) + + score = float(summary.get("avg_reward", 0.0) or 0.0) + score = max(0.0, min(1.0, score)) + success = score >= SUCCESS_SCORE_THRESHOLD + + _maybe_write_artifacts( + env=env, + summary=summary, + episodes=run_episodes, + episode_rows=episode_rows, + ) + finally: + if env is not None: + close_fn = getattr(env, "close", None) + if callable(close_fn): + close_fn() + log_end(task=task_name, success=success, steps=steps_taken, score=score, rewards=rewards) + + +if __name__ == "__main__": + main() diff --git a/my_env_v4.py b/my_env_v4.py new file mode 100644 index 0000000000000000000000000000000000000000..34037cd36e5d2ce3fcda2debc71e461bc07cbf6c --- /dev/null +++ b/my_env_v4.py @@ -0,0 +1,46 @@ +from __future__ import annotations + +from dataclasses import dataclass + + +@dataclass(slots=True) +class MyEnvV4Action: + message: str + + +@dataclass(slots=True) +class _EchoObservation: + echoed_message: str + + +@dataclass(slots=True) +class _EchoResult: + observation: _EchoObservation + reward: float = 0.0 + done: bool = False + + +class MyEnvV4Env: + def __init__(self) -> None: + self._step_count = 0 + + @classmethod + async def from_docker_image(cls, image_name: str | None = None) -> "MyEnvV4Env": + return cls() + + async def reset(self) -> _EchoResult: + self._step_count = 0 + return _EchoResult(observation=_EchoObservation(echoed_message=""), reward=0.0, done=False) + + async def step(self, action: MyEnvV4Action) -> _EchoResult: + self._step_count += 1 + message = str(getattr(action, "message", "")) + reward = len(message) * 0.1 + return _EchoResult( + observation=_EchoObservation(echoed_message=message), + reward=reward, + done=False, + ) + + async def close(self) -> None: + return None diff --git a/openenv.yaml b/openenv.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a9fdeb8139588db01cd990c38e3912498e2e37ea --- /dev/null +++ b/openenv.yaml @@ -0,0 +1,66 @@ +name: osint-openenv +version: 0.1.0 +description: Synthetic OSINT benchmark environment exposed over HTTP. +tasks: + - id: seed_task_0 + difficulty: easy + max_steps: 24 + grader: + type: difficulty_exact_match + answer_type: node_id + case_sensitive: true + reward_profile: easy + - id: seed_task_10 + difficulty: medium + max_steps: 24 + grader: + type: difficulty_exact_match + answer_type: node_id + case_sensitive: true + reward_profile: medium + - id: seed_task_20 + difficulty: hard + max_steps: 24 + grader: + type: difficulty_exact_match + answer_type: node_id + case_sensitive: true + reward_profile: hard +transport: + type: http + base_path: / +endpoints: + health: + method: GET + path: /health + metadata: + method: GET + path: /api/environment + tasks: + method: GET + path: /openenv/tasks + reset: + method: POST + path: /reset + step: + method: POST + path: /step + state: + method: GET + path: /state +models: + action_space: + - CALL_TOOL + - ADD_EDGE + - ANSWER + task_fields: + - task_id + - task_type + - question + - difficulty + - grader + observation_fields: + - tool_outputs + - graph_snapshot + - action_history + - task diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000000000000000000000000000000000000..cbc5681f4cffafedc7ffebec2b2a19b46864e30f --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,45 @@ +[project] +name = "osint-rl-env" +version = "0.1.0" +description = "OSINT-style multi-platform information ecosystem environment for LLM agents." +readme = "README.md" +requires-python = ">=3.10" +dependencies = [ + "openenv>=0.1.13", + "openai>=1.40.0", + "fastapi>=0.115.0", + "requests>=2.32.3", + "uvicorn>=0.30.0", +] + +[project.optional-dependencies] +dev = [ + "pytest>=8.0.0", +] +train = [ + "datasets>=2.20.0", + "transformers>=4.45.0", + "accelerate>=0.33.0", + "trl>=0.15.0", + "peft>=0.11.0", + "pillow", + "torchvision", + "wandb", +] + +[project.scripts] +osint-env = "osint_env.cli:main" +server = "osint_env.server_entry:main" + +[build-system] +requires = ["setuptools>=68", "wheel"] +build-backend = "setuptools.build_meta" + +[tool.setuptools] +package-dir = {"" = "src"} + +[tool.setuptools.packages.find] +where = ["src"] + +[tool.pytest.ini_options] +testpaths = ["tests"] diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..643151119118a7bf8a45288b6c93f9047e825990 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,11 @@ +openenv>=0.1.13 +openai>=1.40.0 +fastapi>=0.115.0 +requests>=2.32.3 +uvicorn>=0.30.0 +pytest>=8.0.0 +datasets>=2.20.0 +transformers>=4.45.0 +accelerate>=0.33.0 +trl>=0.15.0 +peft>=0.11.0 diff --git a/scripts/build_fixed_levels_dataset.py b/scripts/build_fixed_levels_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..3e0ad295450f49db4b7071fa0c57ae4341891645 --- /dev/null +++ b/scripts/build_fixed_levels_dataset.py @@ -0,0 +1,197 @@ +from __future__ import annotations + +import argparse +import json +from collections import Counter +from dataclasses import asdict +from pathlib import Path +from typing import Any + +from osint_env.config import clone_environment_config, load_seeding_config, load_shared_config +from osint_env.data.generator import DatasetGenerator +from osint_env.domain.models import Edge, TaskInstance +from osint_env.llm import build_llm_client + + +def edge_to_dict(edge: Edge) -> dict[str, Any]: + return { + "src": edge.src, + "rel": edge.rel, + "dst": edge.dst, + "confidence": float(edge.confidence), + } + + +def task_to_dict(task: TaskInstance) -> dict[str, Any]: + return { + "task_id": task.task_id, + "task_type": task.task_type, + "question": task.question, + "answer": task.answer, + "supporting_edges": [edge_to_dict(e) for e in task.supporting_edges], + "metadata": dict(task.metadata), + } + + +def build_fixed_snapshot(seed_path: Path) -> dict[str, Any]: + seeding = load_seeding_config(seed_path) + fixed_nodes = [] + for node in seeding.seeded_nodes: + fixed_nodes.append( + { + "node_id": node.node_id, + "node_type": str(getattr(node.node_type, "value", node.node_type)), + "attrs": dict(node.attrs), + } + ) + fixed_edges = [ + { + "src": edge.src, + "rel": edge.rel, + "dst": edge.dst, + "confidence": float(edge.confidence), + } + for edge in seeding.seeded_edges + ] + fixed_questions = [] + for idx, q in enumerate(seeding.seeded_questions): + fixed_questions.append( + { + "task_id": f"fixed_task_{idx:02d}", + "task_type": q.task_type, + "question": q.question, + "answer": q.answer, + "supporting_edges": [ + { + "src": edge.src, + "rel": edge.rel, + "dst": edge.dst, + "confidence": float(edge.confidence), + } + for edge in q.supporting_edges + ], + "metadata": dict(q.metadata), + } + ) + + difficulty_counts = Counter(str(q.get("metadata", {}).get("difficulty", "unknown")) for q in fixed_questions) + return { + "dataset_name": "fixed_levels_submission_set", + "source_seed": str(seed_path), + "graph": { + "nodes": fixed_nodes, + "edges": fixed_edges, + "node_count": len(fixed_nodes), + "edge_count": len(fixed_edges), + }, + "questions": fixed_questions, + "question_count": len(fixed_questions), + "difficulty_counts": dict(difficulty_counts), + } + + +def build_complete_snapshot(shared_config_path: Path, seed_path: Path) -> dict[str, Any]: + shared = load_shared_config(shared_config_path) + env_cfg = clone_environment_config(shared.environment) + env_cfg.seeding = load_seeding_config(seed_path) + + llm_client = build_llm_client(env_cfg.llm) + generator = DatasetGenerator(config=env_cfg, llm=llm_client) + + graph = generator.build_canonical_graph() + views = generator.build_platform_views(graph) + tasks = generator.generate_tasks(graph, views, count=max(15, len(env_cfg.seeding.seeded_questions))) + + difficulty_counts = Counter(str(task.metadata.get("difficulty", "unknown")) for task in tasks) + + return { + "dataset_name": "fixed_levels_submission_set", + "generation_mode": "llm_expanded", + "shared_config": str(shared_config_path), + "seed_file": str(seed_path), + "llm": asdict(env_cfg.llm), + "environment": { + "n_users": env_cfg.n_users, + "alias_density": env_cfg.alias_density, + "noise_level": env_cfg.noise_level, + "red_herring_rate": env_cfg.red_herring_rate, + "seed": env_cfg.seed, + }, + "canonical_graph": { + "node_count": len(graph.nodes), + "edge_count": len(graph.edges), + "nodes": [ + { + "node_id": node.node_id, + "node_type": node.node_type.value, + "attrs": dict(node.attrs), + } + for node in sorted(graph.nodes.values(), key=lambda n: n.node_id) + ], + "edges": [edge_to_dict(edge) for edge in graph.edges], + }, + "platform_views": { + "microblog_posts": views.microblog_posts, + "forum_threads": views.forum_threads, + "profiles": views.profiles, + "counts": { + "microblog_posts": len(views.microblog_posts), + "forum_threads": len(views.forum_threads), + "profiles": len(views.profiles), + }, + }, + "tasks": [task_to_dict(task) for task in tasks], + "task_count": len(tasks), + "difficulty_counts": dict(difficulty_counts), + } + + +def main() -> None: + parser = argparse.ArgumentParser(description="Build fixed difficulty dataset artifacts.") + parser.add_argument( + "--seed-file", + default="datasets/fixed_levels/seed_fixed_levels.json", + help="Path to seeding JSON with fixed graph/questions.", + ) + parser.add_argument( + "--shared-config", + default="datasets/fixed_levels/shared_config_fixed_levels.json", + help="Path to shared config used for LLM-expanded generation.", + ) + parser.add_argument( + "--output-dir", + default="datasets/fixed_levels", + help="Directory where dataset artifacts are written.", + ) + args = parser.parse_args() + + output_dir = Path(args.output_dir) + output_dir.mkdir(parents=True, exist_ok=True) + + seed_path = Path(args.seed_file) + shared_path = Path(args.shared_config) + + fixed_snapshot = build_fixed_snapshot(seed_path) + fixed_path = output_dir / "fixed_graph_questions.json" + fixed_path.write_text(json.dumps(fixed_snapshot, indent=2, sort_keys=True), encoding="utf-8") + + complete_snapshot = build_complete_snapshot(shared_path, seed_path) + complete_path = output_dir / "complete_dataset_qwen_generated.json" + complete_path.write_text(json.dumps(complete_snapshot, indent=2, sort_keys=True), encoding="utf-8") + + summary = { + "fixed_dataset": str(fixed_path), + "complete_dataset": str(complete_path), + "fixed_nodes": fixed_snapshot["graph"]["node_count"], + "fixed_edges": fixed_snapshot["graph"]["edge_count"], + "fixed_questions": fixed_snapshot["question_count"], + "complete_nodes": complete_snapshot["canonical_graph"]["node_count"], + "complete_edges": complete_snapshot["canonical_graph"]["edge_count"], + "complete_tasks": complete_snapshot["task_count"], + "difficulty_counts": complete_snapshot["difficulty_counts"], + } + print(json.dumps(summary, indent=2, sort_keys=True)) + + +if __name__ == "__main__": + main() diff --git a/scripts/generate_fixed_levels_seed.py b/scripts/generate_fixed_levels_seed.py new file mode 100644 index 0000000000000000000000000000000000000000..65dc10da2dd52d3e6537adaa8c605a163900368a --- /dev/null +++ b/scripts/generate_fixed_levels_seed.py @@ -0,0 +1,109 @@ +from collections import Counter, OrderedDict +from pathlib import Path +import json + +U=[('aria','Aria Sen','Helios Labs','Sector 9'),('bharat','Bharat Kulkarni','Northbridge Logistics','Dockyard 17'),('cyrus','Cyrus Mehta','Apex Dynamics','Old Town'),('diya','Diya Roy','Blueharbor Media','Old Town'),('elin','Elin Das','Helios Labs','Sector 9'),('faris','Faris Noor','Tidewatch Ops','Rivergate'),('gita','Gita Pradhan','Apex Dynamics','Old Town'),('hiro','Hiro Tan','Northbridge Logistics','Dockyard 17'),('ivy','Ivy Kapoor','Kestrel Works','Rivergate'),('jules','Jules Banerjee','Blueharbor Media','Old Town'),('kian','Kian Bose','Atlas Freight','East Quay'),('leena','Leena Das','Sunmesh Analytics','Sector 9'),('mika','Mika Solanki','Orion Customs','North Basin'),('nora','Nora Iqbal','Emberline Security','Foundry Row'),('omar','Omar Sheikh','Atlas Freight','East Quay'),('priya','Priya Menon','Sunmesh Analytics','Sector 9'),('quinn','Quinn Rao','Orion Customs','North Basin'),('rhea','Rhea Kapoor','Emberline Security','Foundry Row'),('soren','Soren Malik','Harborlight Transit','Uplink Yard'),('tara','Tara Dey','Harborlight Transit','Uplink Yard')] +A=[('orchidfox','@orchidfox','ivy'),('steelquill','@steelquill','bharat'),('monsoonbyte','@monsoonbyte','diya'),('nightrelay','@nightrelay','faris'),('mapleghost','@mapleghost','elin'),('docksparrow','@docksparrow','hiro'),('quartzlotus','@quartzlotus','cyrus'),('emberglass','@emberglass','nora'),('basinraven','@basinraven','mika'),('tideshard','@tideshard','soren'),('hollowsignal','@hollowsignal','priya'),('ironwhisper','@ironwhisper','omar'),('cinderveil','@cinderveil','rhea'),('sablekeel','@sablekeel','tara'),('lanternmoth','@lanternmoth','kian'),('frostledger','@frostledger','leena')] +L=[('dockyard17','Dockyard 17'),('sector9','Sector 9'),('old_town','Old Town'),('rivergate','Rivergate'),('east_quay','East Quay'),('foundry_row','Foundry Row'),('north_basin','North Basin'),('uplink_yard','Uplink Yard')] +O=[('helios_labs','Helios Labs','sector9'),('northbridge_logistics','Northbridge Logistics','dockyard17'),('apex_dynamics','Apex Dynamics','old_town'),('blueharbor_media','Blueharbor Media','old_town'),('tidewatch_ops','Tidewatch Ops','rivergate'),('kestrel_works','Kestrel Works','rivergate'),('atlas_freight','Atlas Freight','east_quay'),('sunmesh_analytics','Sunmesh Analytics','sector9'),('orion_customs','Orion Customs','north_basin'),('emberline_security','Emberline Security','foundry_row'),('harborlight_transit','Harborlight Transit','uplink_yard')] +E=[('project_lantern','Project Lantern'),('black_kite','Black Kite'),('silent_current','Silent Current'),('amber_veil','Amber Veil'),('glass_harbor','Glass Harbor'),('ember_tide','Ember Tide'),('iron_wharf','Iron Wharf'),('ghost_signal','Ghost Signal')] +T=[('supply_leak','supply_chain'),('port_audit','port_audit'),('customs_breach','customs_breach'),('relay_map','relay_map'),('foundry_watch','foundry_watch'),('basin_shift','basin_shift'),('quiet_manifest','quiet_manifest'),('uplink_route','uplink_route'),('ember_tide_watch','ember_tide'),('ghost_signal_net','ghost_signal')] +P=['shift_roster','midnight_manifest','sat_phone_ping','drone_parts','relay_schedule','quay_ledgers','customs_tag','hull_signal','basin_photo','foundry_map','lantern_route','uplink_note'] + +def uid(x): return f'user_{x}' +def aid(x): return f'alias_{x}' +def oid(x): return f'org_{x}' +def lid(x): return f'loc_{x}' +def eid(x): return f'event_{x}' +def tid(x): return f'thr_{x}' +def pid(x): return f'post_{x}' + +def addn(nodes,nid,nt,attrs): nodes.append({'node_id':nid,'node_type':nt,'attrs':attrs}) + +def build(): + nodes=[]; edges=OrderedDict(); + for s,name,org,loc in U: addn(nodes,uid(s),'user',{'name':name,'org':org,'location':loc}) + for s,handle,user in A: addn(nodes,aid(s),'alias',{'handle':handle}) + for s,name,_ in O: addn(nodes,oid(s),'org',{'name':name}) + for s,name in L: addn(nodes,lid(s),'location',{'name':name}) + for s,name in E: addn(nodes,eid(s),'event',{'name':name}) + for s,topic in T: addn(nodes,tid(s),'thread',{'topic':topic}) + for s in P: addn(nodes,pid(s),'post',{'channel':'microblog'}) + def ae(k,src,rel,dst,c=1.0): edges[k]={'src':src,'rel':rel,'dst':dst,'confidence':c} + for s,_,user in A: ae(f'a_{s}',aid(s),'alias_of',uid(user)) + org_map={name:oid(s) for s,name,_ in O}; loc_map={name:lid(s) for s,name in L} + for s,_,org,loc in U: ae(f'w_{s}',uid(s),'works_at',org_map[org]); ae(f'l_{s}',uid(s),'located_in',loc_map[loc]) + for s,_,loc in O: ae(f'op_{s}',oid(s),'operates_in',lid(loc)) + CP=[('ivy','bharat',.95),('bharat','hiro',.95),('hiro','faris',.92),('faris','diya',.90),('diya','elin',.89),('elin','aria',.87),('aria','cyrus',.84),('cyrus','gita',.83),('gita','jules',.82),('jules','bharat',.81),('diya','ivy',.90),('ivy','elin',.86),('kian','omar',.93),('omar','mika',.90),('mika','quinn',.89),('quinn','nora',.88),('nora','rhea',.87),('rhea','soren',.86),('soren','tara',.86),('tara','kian',.84),('priya','leena',.91),('leena','aria',.83),('priya','nora',.82),('kian','bharat',.80),('soren','faris',.79),('quinn','hiro',.78)] + for i,(a,b,c) in enumerate(CP,1): ae(f'c{i:02d}',uid(a),'connected_to',uid(b),c) + PA={'midnight_manifest':'orchidfox','shift_roster':'docksparrow','sat_phone_ping':'nightrelay','drone_parts':'monsoonbyte','relay_schedule':'steelquill','quay_ledgers':'lanternmoth','customs_tag':'basinraven','hull_signal':'tideshard','basin_photo':'emberglass','foundry_map':'cinderveil','lantern_route':'frostledger','uplink_note':'sablekeel'} + for post,author in PA.items(): ae(f'ap_{post}',aid(author),'authored_post',pid(post)) + PR={'midnight_manifest':['dockyard17','project_lantern'],'shift_roster':['dockyard17','northbridge_logistics'],'sat_phone_ping':['rivergate','project_lantern'],'drone_parts':['black_kite','kestrel_works'],'relay_schedule':['project_lantern','sector9'],'quay_ledgers':['east_quay','glass_harbor'],'customs_tag':['north_basin','iron_wharf'],'hull_signal':['uplink_yard','ghost_signal'],'basin_photo':['foundry_row','amber_veil'],'foundry_map':['foundry_row','ember_tide'],'lantern_route':['project_lantern','sunmesh_analytics'],'uplink_note':['uplink_yard','harborlight_transit']} + for post,refs in PR.items(): + for i,x in enumerate(refs,1): ae(f'r_{post}_{i}',pid(post),'references', lid(x) if x in {y for y,_ in L} else (oid(x) if x in {y for y,_,_ in O} else eid(x))) + TA={'supply_leak':'diya','port_audit':'jules','customs_breach':'mika','relay_map':'leena','foundry_watch':'nora','basin_shift':'quinn','quiet_manifest':'kian','uplink_route':'soren','ember_tide_watch':'rhea','ghost_signal_net':'tara'} + TL={'supply_leak':[('discusses','project_lantern'),('references','northbridge_logistics')],'port_audit':[('discusses','black_kite'),('references','kestrel_works')],'customs_breach':[('discusses','iron_wharf'),('references','orion_customs')],'relay_map':[('discusses','project_lantern'),('references','sunmesh_analytics')],'foundry_watch':[('discusses','ember_tide'),('references','emberline_security')],'basin_shift':[('discusses','amber_veil'),('references','north_basin')],'quiet_manifest':[('discusses','glass_harbor'),('references','atlas_freight')],'uplink_route':[('discusses','ghost_signal'),('references','harborlight_transit')],'ember_tide_watch':[('discusses','ember_tide'),('references','foundry_row')],'ghost_signal_net':[('discusses','ghost_signal'),('references','uplink_yard')]} + for t,u in TA.items(): ae(f'at_{t}',uid(u),'authored_thread',tid(t)) + for t,rels in TL.items(): + for i,(rel,x) in enumerate(rels,1): ae(f'tl_{t}_{i}',tid(t),rel, lid(x) if x in {y for y,_ in L} else (oid(x) if x in {y for y,_,_ in O} else eid(x))) + ER=[('bharat','collaborates_on','project_lantern'),('hiro','collaborates_on','project_lantern'),('faris','collaborates_on','project_lantern'),('diya','investigates','project_lantern'),('leena','monitors','project_lantern'),('ivy','collaborates_on','black_kite'),('cyrus','collaborates_on','black_kite'),('elin','investigates','black_kite'),('jules','reports_on','black_kite'),('kian','collaborates_on','glass_harbor'),('omar','collaborates_on','glass_harbor'),('priya','monitors','glass_harbor'),('mika','collaborates_on','iron_wharf'),('quinn','collaborates_on','iron_wharf'),('nora','investigates','amber_veil'),('rhea','collaborates_on','ember_tide'),('soren','collaborates_on','ghost_signal'),('tara','reports_on','ghost_signal'),('gita','monitors','silent_current'),('jules','reports_on','silent_current')] + for i,(u,rel,e) in enumerate(ER,1): ae(f'er{i:02d}',uid(u),rel,eid(e),.9) + X=[(eid('project_lantern'),'connected_to',eid('glass_harbor')),(eid('black_kite'),'connected_to',eid('amber_veil')),(eid('ember_tide'),'connected_to',eid('ghost_signal')),(oid('atlas_freight'),'connected_to',oid('northbridge_logistics')),(oid('orion_customs'),'connected_to',oid('emberline_security')),(oid('harborlight_transit'),'connected_to',oid('tidewatch_ops'))] + for i,(a,rel,b) in enumerate(X,1): ae(f'x{i:02d}',a,rel,b,.77) + return nodes,edges + +def mk_questions(edges): + def ids(*items): + out=[] + for it in items: + if isinstance(it,list): out.extend(it) + else: out.append(it) + return out + def rng(prefix,a,b): return [f'{prefix}{i:02d}' for i in range(a,b+1)] + def sup(edge_ids): return [edges[e] for e in edge_ids] + def nodes(edge_ids): + s=set() + for e in edge_ids: s|={edges[e]['src'],edges[e]['dst']} + return len(s) + qs=[] + easy=[('easy_01','alias_orchidfox -> post_midnight_manifest -> loc_dockyard17 -> connected collaborator on event_project_lantern. Who is it?','user_bharat',ids('a_orchidfox','ap_midnight_manifest','r_midnight_manifest_1','c01','er01')),('easy_02','thr_supply_leak references org_northbridge_logistics. Which alias_docksparrow user works there and collaborates on event_project_lantern?','user_hiro',ids('tl_supply_leak_2','a_docksparrow','w_hiro','er02')),('easy_03','alias_monsoonbyte authored post_drone_parts about event_black_kite. Which user behind that alias is directly connected to the Kestrel collaborator?','user_diya',ids('a_monsoonbyte','ap_drone_parts','r_drone_parts_1','w_ivy','er06','c12')),('easy_04','alias_nightrelay references loc_rivergate. Which user behind it works at an org operating there and collaborates on event_project_lantern?','user_faris',ids('a_nightrelay','ap_sat_phone_ping','r_sat_phone_ping_1','w_faris','op_tidewatch_ops','er03')),('easy_05','thr_port_audit discusses Black Kite and references Kestrel Works. Which alias_orchidfox user authored post_midnight_manifest and collaborates on Black Kite?','user_ivy',ids('tl_port_audit_1','tl_port_audit_2','a_orchidfox','ap_midnight_manifest','w_ivy','er06')),('easy_06','Which Atlas Freight user behind alias_lanternmoth authored post_quay_ledgers and collaborates on event_glass_harbor?','user_kian',ids('a_lanternmoth','ap_quay_ledgers','w_kian','er10')),('easy_07','Which Orion Customs user behind alias_basinraven authored post_customs_tag and collaborates on event_iron_wharf?','user_mika',ids('a_basinraven','ap_customs_tag','w_mika','er13')),('easy_08','Which user behind alias_emberglass posted basin_photo from Foundry Row and investigates Amber Veil?','user_nora',ids('a_emberglass','ap_basin_photo','r_basin_photo_1','er15')),('easy_09','Which user behind alias_tideshard authored post_hull_signal and collaborates on Ghost Signal?','user_soren',ids('a_tideshard','ap_hull_signal','er17')),('easy_10','Which Harborlight Transit user behind alias_sablekeel authored post_uplink_note and reports on Ghost Signal?','user_tara',ids('a_sablekeel','ap_uplink_note','w_tara','er18'))] + mid=[('mid_01','Follow alias_docksparrow through post_shift_roster, Dockyard 17, and the Lantern chain. Return the org node id.','org_northbridge_logistics',ids('a_docksparrow','ap_shift_roster','r_shift_roster_1','r_shift_roster_2','tl_supply_leak_2','w_hiro','l_hiro','er02','er01','c02','c03')),('mid_02','Across the Glass Harbor cluster, which user behind alias_lanternmoth links to the Atlas Freight network from thr_quiet_manifest?','user_kian',ids('a_lanternmoth','ap_quay_ledgers','r_quay_ledgers_1','r_quay_ledgers_2','at_quiet_manifest','tl_quiet_manifest_1','tl_quiet_manifest_2','w_kian','w_omar','er10','er11','er12','c13','c14')),('mid_03','Trace alias_basinraven through post_customs_tag, thr_customs_breach, and the Orion Customs collaboration chain. Who is it?','user_mika',ids('a_basinraven','ap_customs_tag','r_customs_tag_1','r_customs_tag_2','at_customs_breach','tl_customs_breach_1','tl_customs_breach_2','w_mika','w_quinn','er13','er14','c15','c16','x05')),('mid_04','In the Ember Tide and Amber Veil overlap, which Foundry Row user behind alias_cinderveil collaborates on Ember Tide?','user_rhea',ids('a_cinderveil','ap_foundry_map','r_foundry_map_1','r_foundry_map_2','at_foundry_watch','tl_foundry_watch_1','tl_foundry_watch_2','at_ember_tide_watch','tl_ember_tide_watch_1','tl_ember_tide_watch_2','w_rhea','w_nora','er15','er16','c17','x03')),('mid_05','Follow alias_tideshard from post_hull_signal into thr_uplink_route and the Harborlight relay. Return the org node id.','org_harborlight_transit',ids('a_tideshard','ap_hull_signal','r_hull_signal_1','r_hull_signal_2','at_uplink_route','tl_uplink_route_1','tl_uplink_route_2','w_soren','w_tara','er17','er18','c18','c19','op_harborlight_transit','x06')),('mid_06','Which Sunmesh user behind alias_frostledger connects post_lantern_route to thr_relay_map and the Sector 9 monitoring chain?','user_leena',ids('a_frostledger','ap_lantern_route','r_lantern_route_1','r_lantern_route_2','at_relay_map','tl_relay_map_1','tl_relay_map_2','w_leena','w_priya','l_leena','op_sunmesh_analytics','er05','c21','c22')),('mid_07','Which user behind alias_emberglass is tied to Amber Veil after combining post_basin_photo, thr_basin_shift, and the Foundry Row investigation chain?','user_nora',ids('a_emberglass','ap_basin_photo','r_basin_photo_1','r_basin_photo_2','at_basin_shift','tl_basin_shift_1','tl_basin_shift_2','w_nora','w_quinn','l_nora','er15','c16','c17','x05')),('mid_08','Combine alias_orchidfox, post_midnight_manifest, thr_supply_leak, and the Lantern to Glass Harbor bridge. Which user starts that chain?','user_ivy',ids('a_orchidfox','ap_midnight_manifest','r_midnight_manifest_1','r_midnight_manifest_2','at_supply_leak','tl_supply_leak_1','tl_supply_leak_2','w_ivy','er06','c01','c12','x01','er10','er12')),('mid_09','Which user behind alias_monsoonbyte sits at the overlap of Blueharbor Media, Project Lantern, Black Kite, and the Ivy connection chain?','user_diya',ids('a_monsoonbyte','ap_drone_parts','r_drone_parts_1','at_supply_leak','tl_supply_leak_1','at_port_audit','tl_port_audit_1','w_diya','w_ivy','w_jules','er04','er06','er09','c04','c12')),('mid_10','Who is the Northbridge user behind alias_steelquill when combining post_relay_schedule, thr_supply_leak, Dockyard 17, and Lantern collaborator edges?','user_bharat',ids('a_steelquill','ap_relay_schedule','r_relay_schedule_1','r_relay_schedule_2','at_supply_leak','tl_supply_leak_1','tl_supply_leak_2','w_bharat','w_hiro','l_bharat','l_hiro','er01','er02','c01','c02'))] + big=list(edges.keys())[:58] + hard=[('high_01','Lantern to Glass Harbor handoff: identify the user behind alias_orchidfox after combining Lantern logistics, Dockyard links, and Atlas Freight bridge evidence.','user_ivy',ids('a_orchidfox','ap_midnight_manifest','r_midnight_manifest_1','r_midnight_manifest_2','at_supply_leak','tl_supply_leak_1','tl_supply_leak_2',['w_ivy','w_bharat','w_hiro','w_kian','w_omar'],['l_ivy','l_bharat','l_hiro','l_kian','l_omar'],['op_northbridge_logistics','op_kestrel_works','op_atlas_freight'],rng('c',1,3),['c12','c13','c14'],['er01','er02','er03','er06','er10','er11','er12'],'at_quiet_manifest','tl_quiet_manifest_1','tl_quiet_manifest_2','ap_quay_ledgers','r_quay_ledgers_1','r_quay_ledgers_2','x01','x04','a_lanternmoth','a_steelquill','a_docksparrow')),('high_02','North Basin to Foundry Row escalation: which user behind alias_basinraven anchors the Iron Wharf side before the Emberline handoff?','user_mika',ids('a_basinraven','ap_customs_tag','r_customs_tag_1','r_customs_tag_2','at_customs_breach','tl_customs_breach_1','tl_customs_breach_2','at_basin_shift','tl_basin_shift_1','tl_basin_shift_2','at_foundry_watch','tl_foundry_watch_1','tl_foundry_watch_2',['w_mika','w_quinn','w_nora','w_rhea'],['l_mika','l_quinn','l_nora','l_rhea'],['op_orion_customs','op_emberline_security'],['c15','c16','c17'],['er13','er14','er15','er16'],'ap_basin_photo','r_basin_photo_1','r_basin_photo_2','ap_foundry_map','r_foundry_map_1','r_foundry_map_2','x02','x03','x05','a_emberglass','a_cinderveil','c23','c24')),('high_03','Harborlight ghost-signal relay: identify the user behind alias_tideshard at the Harborlight / Tidewatch junction.','user_soren',ids('a_tideshard','ap_hull_signal','r_hull_signal_1','r_hull_signal_2','a_sablekeel','ap_uplink_note','r_uplink_note_1','r_uplink_note_2','at_uplink_route','tl_uplink_route_1','tl_uplink_route_2','at_ghost_signal_net','tl_ghost_signal_net_1','tl_ghost_signal_net_2',['w_soren','w_tara','w_faris'],['l_soren','l_tara','l_faris'],['op_harborlight_transit','op_tidewatch_ops'],['c18','c19','c20','c25'],['er03','er17','er18'],'ap_sat_phone_ping','r_sat_phone_ping_1','r_sat_phone_ping_2','at_supply_leak','tl_supply_leak_1','er01','er02','x03','x06','a_nightrelay')),('high_04','Blueharbor to Black Kite to Lantern overlap: which user is the Blueharbor origin behind alias_monsoonbyte?','user_diya',ids('a_monsoonbyte','ap_drone_parts','r_drone_parts_1','r_drone_parts_2','at_port_audit','tl_port_audit_1','tl_port_audit_2','at_supply_leak','tl_supply_leak_1','tl_supply_leak_2',['w_diya','w_jules','w_ivy','w_cyrus'],['l_diya','l_jules','l_ivy','l_cyrus'],['op_blueharbor_media','op_kestrel_works','op_apex_dynamics'],['c04','c08','c09','c12'],['er04','er06','er07','er08','er09'],'a_orchidfox','ap_midnight_manifest','r_midnight_manifest_2','x01','x02','at_relay_map','tl_relay_map_1','w_leena','er05')),('high_05','Sector 9 to Dockyard 17 full relay: which user behind alias_steelquill links the Northbridge chain and the Sunmesh monitoring bridge?','user_bharat',ids('a_steelquill','ap_relay_schedule','r_relay_schedule_1','r_relay_schedule_2','a_frostledger','ap_lantern_route','r_lantern_route_1','r_lantern_route_2','at_relay_map','tl_relay_map_1','tl_relay_map_2','at_supply_leak','tl_supply_leak_1','tl_supply_leak_2',['w_bharat','w_hiro','w_leena','w_priya','w_aria'],['l_bharat','l_hiro','l_leena','l_priya','l_aria'],['op_northbridge_logistics','op_sunmesh_analytics','op_helios_labs'],['c01','c02','c05','c06','c07','c21','c22'],['er01','er02','er05'],'x01','x04','a_docksparrow','a_mapleghost','a_hollowsignal')),('high_06','Foundry Row, North Basin, and Uplink Yard spread: identify the user behind alias_emberglass before the Harborlight relay takes over.','user_nora',ids('a_emberglass','ap_basin_photo','r_basin_photo_1','r_basin_photo_2','a_cinderveil','ap_foundry_map','r_foundry_map_1','r_foundry_map_2','a_sablekeel','ap_uplink_note','r_uplink_note_1','r_uplink_note_2','at_foundry_watch','tl_foundry_watch_1','tl_foundry_watch_2','at_ember_tide_watch','tl_ember_tide_watch_1','tl_ember_tide_watch_2','at_uplink_route','tl_uplink_route_1','tl_uplink_route_2',['w_nora','w_rhea','w_soren','w_tara'],['l_nora','l_rhea','l_soren','l_tara'],['op_emberline_security','op_harborlight_transit'],['c17','c18','c19'],['er15','er16','er17','er18'],'x03','x06')),('high_07','Freight and customs bridge: which Atlas Freight user behind alias_lanternmoth connects Glass Harbor with the Northbridge chain?','user_kian',ids('a_lanternmoth','ap_quay_ledgers','r_quay_ledgers_1','r_quay_ledgers_2','at_quiet_manifest','tl_quiet_manifest_1','tl_quiet_manifest_2',['w_kian','w_omar','w_bharat','w_hiro'],['l_kian','l_omar','l_bharat','l_hiro'],['op_atlas_freight','op_northbridge_logistics'],['c13','c14','c24','c02'],['er10','er11','er12','er01','er02'],'ap_shift_roster','r_shift_roster_1','r_shift_roster_2','ap_midnight_manifest','r_midnight_manifest_1','at_supply_leak','tl_supply_leak_2','x04','a_ironwhisper','a_steelquill','a_docksparrow')),('high_08','Black Kite, Amber Veil, and Iron Wharf overlap: which user behind alias_quartzlotus is the Apex-side collaborator?','user_cyrus',ids('a_quartzlotus','w_cyrus','l_cyrus','op_apex_dynamics','er07','at_port_audit','tl_port_audit_1','ap_drone_parts','r_drone_parts_1','er15','at_basin_shift','tl_basin_shift_1','er13','at_customs_breach','tl_customs_breach_1',['w_ivy','w_nora','w_mika','w_quinn'],['l_ivy','l_nora','l_mika','l_quinn'],['op_kestrel_works','op_emberline_security','op_orion_customs'],['c08','c12','c15','c16','c17'],'x02','x05','a_orchidfox','a_basinraven','a_emberglass')),('high_09','Ghost Signal and Ember Tide relay: which user behind alias_sablekeel is the Harborlight reporting endpoint?','user_tara',ids('a_sablekeel','ap_uplink_note','r_uplink_note_1','r_uplink_note_2','a_tideshard','ap_hull_signal','r_hull_signal_1','r_hull_signal_2','at_ghost_signal_net','tl_ghost_signal_net_1','tl_ghost_signal_net_2','at_uplink_route','tl_uplink_route_1','tl_uplink_route_2','at_ember_tide_watch','tl_ember_tide_watch_1','tl_ember_tide_watch_2',['w_tara','w_soren','w_rhea','w_nora'],['l_tara','l_soren','l_rhea','l_nora'],['op_harborlight_transit','op_emberline_security'],['c18','c19','c17'],['er16','er17','er18'],'x03','x06','a_cinderveil','a_emberglass')),('high_10','End-to-end benchmark sweep: across Lantern, Black Kite, Glass Harbor, Iron Wharf, Ember Tide, and Ghost Signal, which user behind alias_hollowsignal anchors the Sunmesh monitoring side?','user_priya',big)] + for diff,level,specs in [('easy',1,easy),('mid',2,mid),('high',3,hard)]: + for qid,q,a,eids in specs: + qs.append({'task_type':'fixed_trace','question':q,'answer':a,'supporting_edges':sup(eids),'metadata':{'difficulty':diff,'difficulty_level':level,'question_id':qid,'support_nodes':nodes(eids)}}) + def edge_key(e): return (e['src'], e['rel'], e['dst']) + mid_pool = sup(ids('a_orchidfox','ap_midnight_manifest','r_midnight_manifest_1','r_midnight_manifest_2','a_lanternmoth','ap_quay_ledgers','r_quay_ledgers_1','r_quay_ledgers_2','a_basinraven','ap_customs_tag','r_customs_tag_1','r_customs_tag_2','a_tideshard','ap_hull_signal','r_hull_signal_1','at_supply_leak','tl_supply_leak_1','at_quiet_manifest','tl_quiet_manifest_1','er01','er02','er06','er10','c01','c02','c13')) + hard_pool = sup(list(edges.keys())[:120]) + for q in qs: + current = {edge_key(e) for e in q['supporting_edges']} + diff = q['metadata']['difficulty'] + if diff == 'mid': + pool = mid_pool + target = 17 + elif diff == 'high': + pool = hard_pool + target = 50 + else: + continue + for e in pool: + if q['metadata']['support_nodes'] >= target: + break + k = edge_key(e) + if k not in current: + q['supporting_edges'].append(dict(e)) + current.add(k) + q['metadata']['support_nodes'] = len({n for edge in q['supporting_edges'] for n in (edge['src'], edge['dst'])}) + return qs + +def main(): + nodes,edges=build(); questions=mk_questions(edges) + payload={'seeding':{'seeded_nodes':nodes,'seeded_edges':list(edges.values()),'seeded_questions':questions,'llm_generate_remaining_graph':True,'llm_generate_remaining_tasks':False,'llm_generated_edge_budget':48,'llm_generated_task_budget':0,'llm_generation_parallel':True,'llm_generation_workers':4,'llm_generation_retries':3,'allow_template_fallback_on_llm_failure':False}} + out=Path('datasets/fixed_levels/seed_fixed_levels.json'); out.write_text(json.dumps(payload,indent=2),encoding='utf-8') + counts=Counter(q['metadata']['difficulty'] for q in questions) + stats={k:sorted(q['metadata']['support_nodes'] for q in questions if q['metadata']['difficulty']==k) for k in ['easy','mid','high']} + print(json.dumps({'nodes':len(nodes),'edges':len(edges),'questions':len(questions),'difficulty_counts':dict(counts),'support_nodes':stats},indent=2)) + +if __name__=='__main__': + main() diff --git a/scripts/run_openai_baseline.py b/scripts/run_openai_baseline.py new file mode 100644 index 0000000000000000000000000000000000000000..88a6822ba549e0e778dca402ab165ddeedd60618 --- /dev/null +++ b/scripts/run_openai_baseline.py @@ -0,0 +1,59 @@ +from __future__ import annotations + +import argparse +import json +import os + +from osint_env.baselines import OpenAIBaselineConfig, OpenAIBaselineRunner + + +def build_parser() -> argparse.ArgumentParser: + parser = argparse.ArgumentParser(description="Run the reproducible OpenAI baseline on the fixed-level OSINT benchmark.") + parser.add_argument("--config", default="datasets/fixed_levels/shared_config_fixed_levels.json", help="Shared config JSON.") + parser.add_argument("--seed-file", default="datasets/fixed_levels/seed_fixed_levels.json", help="Fixed seed file JSON.") + parser.add_argument("--output", default="artifacts/baselines/openai_fixed_levels_latest.json", help="Baseline result JSON output path.") + parser.add_argument("--leaderboard", default="artifacts/baselines/openai_fixed_levels_leaderboard.json", help="Leaderboard JSON path.") + parser.add_argument("--dashboard", default="artifacts/baselines/openai_fixed_levels_dashboard.html", help="Dashboard HTML path.") + parser.add_argument("--run-name", default="openai_fixed_levels_baseline", help="Leaderboard run name.") + parser.add_argument("--model", default="gpt-5-nano", help="OpenAI chat model name.") + parser.add_argument("--openai-base-url", default="https://api.openai.com/v1", help="OpenAI-compatible base URL.") + parser.add_argument("--openai-api-key", default="", help="OpenAI API key override.") + parser.add_argument("--openai-api-key-env", default="OPENAI_API_KEY", help="Environment variable name for the API key.") + parser.add_argument("--episodes", type=int, default=30, help="Number of episodes to evaluate.") + parser.add_argument("--max-steps", type=int, default=8, help="Episode step budget to keep runs bounded.") + parser.add_argument("--temperature", type=float, default=0.0, help="Sampling temperature.") + parser.add_argument("--max-tokens", type=int, default=256, help="Maximum completion tokens per step.") + parser.add_argument("--timeout-seconds", type=int, default=60, help="Per-request timeout.") + parser.add_argument("--seed", type=int, default=7, help="Request seed offset used for repeatable runs.") + parser.add_argument("--skip-leaderboard", action="store_true", help="Do not append the run to the leaderboard file.") + return parser + + +def main() -> None: + args = build_parser().parse_args() + api_key = args.openai_api_key or os.getenv(args.openai_api_key_env, "") + config = OpenAIBaselineConfig( + shared_config_path=args.config, + seed_file=args.seed_file, + output_path=args.output, + leaderboard_path=args.leaderboard, + dashboard_path=args.dashboard, + run_name=args.run_name, + model=args.model, + base_url=args.openai_base_url, + api_key=api_key, + api_key_env=args.openai_api_key_env, + temperature=args.temperature, + max_tokens=args.max_tokens, + timeout_seconds=args.timeout_seconds, + episodes=args.episodes, + max_steps=args.max_steps, + seed=args.seed, + append_leaderboard=not args.skip_leaderboard, + ) + result = OpenAIBaselineRunner(config).run() + print(json.dumps({"summary": result["summary"], "output": args.output, "dashboard": args.dashboard}, indent=2, sort_keys=True)) + + +if __name__ == "__main__": + main() diff --git a/scripts/space_start.sh b/scripts/space_start.sh new file mode 100644 index 0000000000000000000000000000000000000000..30c52a52f94b68fafcce3c9fd35ecf1364b81e3e --- /dev/null +++ b/scripts/space_start.sh @@ -0,0 +1,35 @@ +#!/bin/sh +set -eu + +_is_true() { + case "${1:-}" in + 1|true|TRUE|yes|YES|on|ON) return 0 ;; + *) return 1 ;; + esac +} + +ENV_CONFIG_PATH="${TRAIN_ENV_CONFIG_PATH:-config/shared_config.json}" +TRAIN_CONFIG_PATH="${TRAIN_SELF_PLAY_CONFIG_PATH:-config/self_play_training_hf_a10g_smoke.json}" +RUN_FLAG="${RUN_SELF_PLAY_TRAINING:-0}" +DRY_RUN_FLAG="${RUN_SELF_PLAY_DRY_RUN:-0}" + +if _is_true "$RUN_FLAG"; then + echo "[space_start] RUN_SELF_PLAY_TRAINING enabled." + echo "[space_start] Training start: $(date -u +"%Y-%m-%dT%H:%M:%SZ")" + echo "[space_start] Env config: ${ENV_CONFIG_PATH}" + echo "[space_start] Train config: ${TRAIN_CONFIG_PATH}" + if _is_true "$DRY_RUN_FLAG"; then + echo "[space_start] Running self-play in dry-run mode." + osint-env train-self-play --config "${ENV_CONFIG_PATH}" --train-config "${TRAIN_CONFIG_PATH}" --dry-run + else + echo "[space_start] Running self-play training." + osint-env train-self-play --config "${ENV_CONFIG_PATH}" --train-config "${TRAIN_CONFIG_PATH}" + fi + echo "[space_start] Self-play command completed." + echo "[space_start] Training end: $(date -u +"%Y-%m-%dT%H:%M:%SZ")" +else + echo "[space_start] RUN_SELF_PLAY_TRAINING disabled. Skipping self-play run." +fi + +echo "[space_start] Starting API server." +exec uvicorn server:app --host 0.0.0.0 --port "${PORT:-7860}" diff --git a/scripts/test_ollama_space.py b/scripts/test_ollama_space.py new file mode 100644 index 0000000000000000000000000000000000000000..540f624cd684fc5e026544e36aec61811d6e6436 --- /dev/null +++ b/scripts/test_ollama_space.py @@ -0,0 +1,185 @@ +from __future__ import annotations + +import json +import os +import sys +from typing import Any + +import requests + +from osint_env.baselines.openai_runner import SYSTEM_PROMPT, build_action_tools +from osint_env.llm.interface import OllamaLLMClient + +SPACE_URL = os.getenv("SPACE_URL", "https://siddeshwar1625-osint.hf.space").rstrip("/") +OLLAMA_BASE = os.getenv("OLLAMA_BASE_URL", "http://127.0.0.1:11434").rstrip("/") +MODEL = os.getenv("OLLAMA_MODEL", "qwen3:2b") +MAX_STEPS = int(os.getenv("MAX_STEPS", "8")) +REQUEST_TIMEOUT = int(os.getenv("REQUEST_TIMEOUT", "90")) +TASK_INDICES = [int(x.strip()) for x in os.getenv("TASK_INDICES", "0").split(",") if x.strip()] + + +def _message_text(message: Any) -> str: + content = getattr(message, "content", "") + if isinstance(content, str): + return content + if isinstance(content, list): + parts: list[str] = [] + for item in content: + if isinstance(item, dict) and item.get("type") == "text": + parts.append(str(item.get("text", ""))) + return "\n".join(part for part in parts if part) + return str(content or "") + + +def _assistant_tool_call_id(message: dict[str, Any]) -> str | None: + tool_calls = list(message.get("tool_calls", [])) + if not tool_calls: + return None + tool_call_id = tool_calls[0].get("id") + return str(tool_call_id) if tool_call_id else None + + +def _tool_result_message(assistant_message: dict[str, Any], result: dict[str, Any]) -> dict[str, Any] | None: + tool_call_id = _assistant_tool_call_id(assistant_message) + if not tool_call_id: + return None + return { + "role": "tool", + "tool_call_id": tool_call_id, + "content": json.dumps(result, sort_keys=True), + } + + +def _decode_action(tool_name: str, args: dict[str, Any]) -> dict[str, Any]: + if tool_name == "submit_answer": + return {"action_type": "ANSWER", "payload": {"answer": str(args.get("answer", "")).strip()}} + if tool_name == "add_edge": + return { + "action_type": "ADD_EDGE", + "payload": { + "src": str(args.get("src", "")).strip(), + "rel": str(args.get("rel", "")).strip(), + "dst": str(args.get("dst", "")).strip(), + "confidence": float(args.get("confidence", 1.0)), + }, + } + return {"action_type": "CALL_TOOL", "payload": {"tool_name": tool_name, "args": dict(args)}} + + +def _format_action(action: dict[str, Any]) -> str: + action_type = str(action.get("action_type", "")) + payload = dict(action.get("payload", {})) + if action_type == "ANSWER": + return f"answer({payload.get('answer', 'unknown')})" + if action_type == "ADD_EDGE": + return ( + "add_edge(" + f"{payload.get('src', '')}," + f"{payload.get('rel', '')}," + f"{payload.get('dst', '')}," + f"{float(payload.get('confidence', 1.0)):.2f}" + ")" + ) + tool_name = str(payload.get("tool_name", "tool")) + args = dict(payload.get("args", {})) + if not args: + return f"{tool_name}()" + arg_str = ",".join(f"{key}={value}" for key, value in sorted(args.items())) + return f"{tool_name}({arg_str})" + + +def get_model_action(client: OllamaLLMClient, messages: list[dict[str, Any]], tools: list[dict[str, Any]]) -> tuple[dict[str, Any], dict[str, Any]]: + llm_resp = client.generate(messages, tools) + content = llm_resp.content or "" + tool_calls = list(llm_resp.tool_calls or []) + if not tool_calls: + return {"action_type": "ANSWER", "payload": {"answer": content.strip() or "unknown"}}, { + "role": "assistant", + "content": content, + } + + tool_call = tool_calls[0] + tool_name = str(tool_call.get("tool_name", "")) + args = dict(tool_call.get("args", {})) + assistant_message = { + "role": "assistant", + "content": content, + "tool_calls": [ + { + "id": "local", + "type": "function", + "function": {"name": tool_name, "arguments": json.dumps(args, sort_keys=True)}, + } + ], + } + return _decode_action(tool_name, args), assistant_message + + +def main() -> None: + try: + ping = requests.get(f"{SPACE_URL}/healthz", timeout=REQUEST_TIMEOUT) + ping.raise_for_status() + print(f"Space health: {ping.json()}") + except Exception as exc: + raise SystemExit(f"Space health check failed: {exc}") from exc + + client = OllamaLLMClient(model=MODEL, base_url=OLLAMA_BASE, timeout_seconds=REQUEST_TIMEOUT) + tools = build_action_tools() + + for task_index in TASK_INDICES: + print(f"Resetting task {task_index} via {SPACE_URL}/openenv/reset") + resp = requests.post(f"{SPACE_URL}/openenv/reset", json={"task_index": task_index}, timeout=REQUEST_TIMEOUT) + resp.raise_for_status() + data = resp.json() + session_id = str(data.get("session_id")) + observation = data.get("observation", {}) + + messages: list[dict[str, Any]] = [ + {"role": "system", "content": SYSTEM_PROMPT}, + {"role": "user", "content": json.dumps(observation, indent=2, sort_keys=True)}, + ] + + done = bool(data.get("done", False)) + step = 0 + rewards: list[float] = [] + + while not done and step < MAX_STEPS: + step += 1 + action, assistant_message = get_model_action(client, messages, tools) + error = None + try: + result = requests.post( + f"{SPACE_URL}/openenv/step", + json={ + "session_id": session_id, + "action_type": action["action_type"], + "payload": action["payload"], + }, + timeout=REQUEST_TIMEOUT, + ) + result.raise_for_status() + result = result.json() + except Exception as exc: + error = str(exc) + print(f"Step {step}: request failed: {error}") + break + + reward = float(result.get("reward", 0.0) or 0.0) + done = bool(result.get("done", False)) + rewards.append(reward) + print(f"Step {step}: action={_format_action(action)} reward={reward:.3f} done={done} error={error}") + + messages.append(assistant_message) + tool_message = _tool_result_message(assistant_message, result) + if tool_message is not None: + messages.append(tool_message) + + print(f"Episode finished. steps={step} total_reward={sum(rewards):.3f} rewards={rewards}") + + +if __name__ == "__main__": + try: + main() + except KeyboardInterrupt: + print("Interrupted", file=sys.stderr) + sys.exit(1) diff --git a/scripts/validate_release.py b/scripts/validate_release.py new file mode 100644 index 0000000000000000000000000000000000000000..4aa09322ff18b8a6a5fee31aceb14b62626753a9 --- /dev/null +++ b/scripts/validate_release.py @@ -0,0 +1,21 @@ +from __future__ import annotations + +import json +import sys +from pathlib import Path + +ROOT = Path(__file__).resolve().parents[1] +if str(ROOT) not in sys.path: + sys.path.insert(0, str(ROOT)) + +from osint_env.validation import run_validation_suite + + +def main() -> int: + result = run_validation_suite() + print(json.dumps(result, indent=2, sort_keys=True)) + return 0 if result["passed"] else 1 + + +if __name__ == "__main__": + raise SystemExit(main()) diff --git a/server.py b/server.py new file mode 100644 index 0000000000000000000000000000000000000000..e18181a051826f34f601b6797fa40113d1fa0ae9 --- /dev/null +++ b/server.py @@ -0,0 +1,564 @@ +from __future__ import annotations + +import json +import os +from collections import Counter +from functools import lru_cache +from pathlib import Path +from threading import Lock +from typing import Any +from uuid import uuid4 + +from fastapi import FastAPI, HTTPException, Request +from fastapi.responses import FileResponse, HTMLResponse, JSONResponse + +from osint_env.api import ( + OpenEnvActionRequest, + OpenEnvInferenceReportRequest, + OpenEnvInferenceReportResponse, + OpenEnvObservationModel, + OpenEnvResetRequest, + OpenEnvResponseEnvelope, + OpenEnvTaskSummary, +) +from osint_env.config import clone_environment_config, load_seeding_config, load_shared_config +from osint_env.domain.models import Action, ActionType +from osint_env.env.environment import OSINTEnvironment +from osint_env.eval.leaderboard import load_leaderboard +from osint_env.eval.runner import run_evaluation +from osint_env.llm import build_llm_client +from osint_env.viz import export_dashboard + + +SPACE_CONFIG_PATH = Path(os.getenv("OSINT_ENV_CONFIG", "datasets/fixed_levels/shared_config_fixed_levels.json")) +SPACE_SEED_PATH = Path(os.getenv("OSINT_ENV_SEED_FILE", "datasets/fixed_levels/seed_fixed_levels.json")) +SPACE_PROVIDER = os.getenv("OSINT_SPACE_LLM_PROVIDER", "mock") +SPACE_MODEL = os.getenv("OSINT_SPACE_LLM_MODEL", "gpt-4o-mini") +SPACE_PORT = int(os.getenv("PORT", "7860")) +SPACE_DASHBOARD = Path("artifacts/space_dashboard.html") +LATEST_BASELINE_OUTPUT = Path("artifacts/baselines/openai_fixed_levels_latest.json") +LATEST_EVALUATION_OUTPUT = Path("artifacts/latest_evaluation.json") +OPENENV_SPEC_PATH = Path("openenv.yaml") + +_SESSION_LOCK = Lock() +_SESSIONS: dict[str, OSINTEnvironment] = {} +_RESET_COUNTER = 0 +_LATEST_SESSION_ID: str | None = None + + +def _load_json(path: Path) -> dict[str, Any] | None: + if not path.exists(): + return None + try: + payload = json.loads(path.read_text(encoding="utf-8")) + except (OSError, json.JSONDecodeError): + return None + return payload if isinstance(payload, dict) else None + + +def _path_mtime(path: Path) -> float: + try: + return path.stat().st_mtime + except OSError: + return 0.0 + + +def _build_environment() -> OSINTEnvironment: + shared = load_shared_config(SPACE_CONFIG_PATH) + env_cfg = clone_environment_config(shared.environment) + if SPACE_SEED_PATH.exists(): + env_cfg.seeding = load_seeding_config(SPACE_SEED_PATH) + env_cfg.llm.provider = SPACE_PROVIDER + env_cfg.llm.model = SPACE_MODEL + try: + llm = build_llm_client(env_cfg.llm) + except Exception: + env_cfg.llm.provider = "mock" + llm = build_llm_client(env_cfg.llm) + return OSINTEnvironment(env_cfg, llm=llm) + + +def _serialize_observation(observation: Any) -> OpenEnvObservationModel: + return OpenEnvObservationModel( + tool_outputs=list(observation.tool_outputs), + graph_snapshot=dict(observation.graph_snapshot), + action_history=list(observation.action_history), + task=dict(observation.task), + ) + + +def _safe_session_info(info: dict[str, Any]) -> dict[str, Any]: + return { + "step_count": int(info.get("step_count", 0)), + "total_reward": float(info.get("total_reward", 0.0)), + "tool_calls": int(info.get("tool_calls", 0)), + "redundant_tool_calls": int(info.get("redundant_tool_calls", 0)), + "task_answer": str(info.get("task_answer", "")), + "agent_answer": "" if info.get("agent_answer") is None else str(info.get("agent_answer", "")), + "graph_f1": float(info.get("graph_f1", 0.0)), + "reward_components": dict(info.get("reward_components", {})), + } + + +def _task_summaries(env: OSINTEnvironment) -> list[OpenEnvTaskSummary]: + return [ + OpenEnvTaskSummary( + task_id=task.task_id, + task_type=task.task_type, + question=task.question, + difficulty=str(task.metadata.get("difficulty", "unknown")), + grader=( + dict(task.metadata.get("grader", {})) + if isinstance(task.metadata.get("grader"), dict) + else { + "type": "exact_match", + "answer_type": "node_id", + "case_sensitive": True, + } + ), + ) + for task in env.tasks + ] + + +def _resolve_task_index(env: OSINTEnvironment, request: OpenEnvResetRequest) -> int: + global _RESET_COUNTER + if request.task_index is not None: + task_index = int(request.task_index) + if task_index < 0 or task_index >= len(env.tasks): + raise HTTPException(status_code=400, detail=f"Invalid task_index {task_index}") + return task_index + if request.task_id: + for idx, task in enumerate(env.tasks): + if task.task_id == request.task_id: + return idx + raise HTTPException(status_code=400, detail=f"Unknown task_id {request.task_id}") + with _SESSION_LOCK: + task_index = _RESET_COUNTER % max(1, len(env.tasks)) + _RESET_COUNTER += 1 + return task_index + + +def _get_session_env(session_id: str) -> OSINTEnvironment: + with _SESSION_LOCK: + env = _SESSIONS.get(session_id) + if env is None: + raise HTTPException(status_code=404, detail=f"Unknown session_id {session_id}") + return env + + +def _store_session(session_id: str, env: OSINTEnvironment) -> None: + global _LATEST_SESSION_ID + with _SESSION_LOCK: + _SESSIONS[session_id] = env + _LATEST_SESSION_ID = session_id + + +def _latest_session_id() -> str: + with _SESSION_LOCK: + if _LATEST_SESSION_ID and _LATEST_SESSION_ID in _SESSIONS: + return _LATEST_SESSION_ID + if _SESSIONS: + return next(reversed(_SESSIONS)) + raise HTTPException(status_code=404, detail="No active session. Call /reset first.") + + +def _resolve_session_id(session_id: str | None) -> str: + token = str(session_id or "").strip() + if token: + return token + return _latest_session_id() + + +def _task_lookup(env: OSINTEnvironment) -> dict[str, Any]: + return {task.task_id: task for task in env.tasks} + + +def _normalize_episode_rows(env: OSINTEnvironment, episodes: list[dict[str, Any]]) -> list[dict[str, Any]]: + tasks_by_id = _task_lookup(env) + normalized: list[dict[str, Any]] = [] + for episode in episodes: + row = dict(episode) + task = tasks_by_id.get(str(row.get("task_id", ""))) + if task is not None: + row.setdefault("task_type", task.task_type) + row.setdefault("question", task.question) + row.setdefault("task_answer", task.answer) + row.setdefault( + "truth_edges", + [ + { + "src": edge.src, + "rel": edge.rel, + "dst": edge.dst, + "confidence": float(edge.confidence), + } + for edge in task.supporting_edges + ], + ) + row.setdefault("pred_edges", []) + row.setdefault("reward_components", {}) + row.setdefault("graph_f1", 0.0) + row.setdefault("reward", 0.0) + row.setdefault("steps", 0) + row.setdefault("tool_calls", 0) + row.setdefault("success", 0) + normalized.append(row) + return normalized + + +@lru_cache(maxsize=1) +def _base_environment_snapshot() -> dict[str, Any]: + env = _build_environment() + difficulty_counts = Counter(str(task.metadata.get("difficulty", "unknown")) for task in env.tasks) + return { + "task_count": len(env.tasks), + "difficulty_counts": dict(difficulty_counts), + "action_space": ["CALL_TOOL", "ADD_EDGE", "ANSWER"], + "observation_space": { + "tool_outputs": "Last tool results and memory hits.", + "graph_snapshot": "Current working graph edge snapshot.", + "action_history": "Recent action/reward trace.", + "task": "Task id, task type, and question.", + }, + "task_types": sorted({task.task_type for task in env.tasks}), + "config": { + "seed": env.config.seed, + "max_steps": env.config.max_steps, + "swarm_enabled": env.config.swarm.enabled, + "llm_provider": env.config.llm.provider, + "llm_model": env.config.llm.model, + }, + } + + +@lru_cache(maxsize=1) +def _preview_snapshot() -> dict[str, Any]: + env = _build_environment() + evaluation = run_evaluation(env, episodes=3, return_details=True, llm=build_llm_client(env.config.llm)) + dashboard_path = export_dashboard( + env=env, + evaluation=evaluation, + leaderboard_records=[], + output_path=str(SPACE_DASHBOARD), + ) + snapshot = dict(_base_environment_snapshot()) + snapshot["summary"] = evaluation["summary"] + snapshot["dashboard_path"] = dashboard_path + return snapshot + + +def _space_snapshot() -> dict[str, Any]: + snapshot = dict(_base_environment_snapshot()) + + baseline_payload = _load_json(LATEST_BASELINE_OUTPUT) + evaluation_payload = _load_json(LATEST_EVALUATION_OUTPUT) + + candidates: list[tuple[float, str, dict[str, Any]]] = [] + if baseline_payload is not None and isinstance(baseline_payload.get("summary"), dict): + candidates.append((_path_mtime(LATEST_BASELINE_OUTPUT), "baseline_output", baseline_payload)) + if evaluation_payload is not None and isinstance(evaluation_payload.get("summary"), dict): + candidates.append((_path_mtime(LATEST_EVALUATION_OUTPUT), "latest_evaluation", evaluation_payload)) + + if candidates: + _, source, payload = max(candidates, key=lambda item: item[0]) + snapshot["summary"] = dict(payload["summary"]) + snapshot["source"] = source + if source == "baseline_output": + dashboard_path = Path( + str( + ((payload.get("run") or {}).get("dashboard_path")) + or "artifacts/baselines/openai_fixed_levels_dashboard.html" + ) + ) + if dashboard_path.exists(): + snapshot["dashboard_path"] = str(dashboard_path) + return snapshot + + env = _build_environment() + dashboard_path = export_dashboard( + env=env, + evaluation=payload, + leaderboard_records=[], + output_path=str(SPACE_DASHBOARD), + ) + snapshot["dashboard_path"] = dashboard_path + return snapshot + + preview = _preview_snapshot() + preview["source"] = "preview" + return preview + + +app = FastAPI(title="OSINT OpenEnv Space", version="0.1.0") + + +@app.get("/", response_class=HTMLResponse) +def home() -> str: + snapshot = _space_snapshot() + summary = snapshot["summary"] + difficulty_html = "".join( + f"
  • {level}: {count}
  • " + for level, count in sorted(snapshot["difficulty_counts"].items()) + ) + task_type_html = "".join(f"
  • {task_type}
  • " for task_type in snapshot["task_types"]) + return f""" + + + + + OSINT OpenEnv Space + + + +
    +
    +
    +

    OSINT OpenEnv Space

    +

    A containerized OpenEnv-compatible benchmark for synthetic OSINT reasoning over profiles, forum threads, posts, aliases, organizations, locations, and event links.

    +

    The Space boots with the fixed-level benchmark so visitors get a stable environment snapshot instead of a different graph every restart.

    + Open Dashboard + Environment JSON +
    +
    +

    Included Snapshot

    +
    +
    Tasks
    {snapshot["task_count"]}
    +
    Provider
    {snapshot["config"]["llm_provider"]}
    +
    Score
    {summary["leaderboard_score"]:.3f}
    +
    Success
    {summary["task_success_rate"]:.3f}
    +
    +
    +
    + +
    +
    +

    Action Space

    +
      +
    • CALL_TOOL: query platform views or semantic memory.
    • +
    • ADD_EDGE: add a hypothesized relation to the working graph.
    • +
    • ANSWER: submit the final node id answer.
    • +
    +
    +
    +

    Difficulty Mix

    +
      {difficulty_html}
    +
    +
    +

    Task Families

    +
      {task_type_html}
    +
    +
    +
    + +""" + + +@app.get("/healthz") +def healthz() -> JSONResponse: + return JSONResponse({"status": "ok"}) + + +@app.get("/health") +def health() -> JSONResponse: + return healthz() + + +@app.get("/openenv.yaml") +def openenv_spec() -> FileResponse: + return FileResponse(OPENENV_SPEC_PATH, media_type="text/yaml") + + +@app.get("/api/environment") +def environment_metadata() -> JSONResponse: + return JSONResponse(_space_snapshot()) + + +@app.get("/openenv/tasks", response_model=list[OpenEnvTaskSummary]) +def openenv_tasks() -> list[OpenEnvTaskSummary]: + env = _build_environment() + return _task_summaries(env) + + +@app.post("/openenv/reset", response_model=OpenEnvResponseEnvelope) +@app.post("/openenv/reset/", response_model=OpenEnvResponseEnvelope, include_in_schema=False) +@app.post("/reset", response_model=OpenEnvResponseEnvelope, include_in_schema=False) +@app.post("/reset/", response_model=OpenEnvResponseEnvelope, include_in_schema=False) +async def openenv_reset(request: Request) -> OpenEnvResponseEnvelope: + env = _build_environment() + raw_body = await request.body() + if not raw_body.strip(): + payload: dict[str, Any] = {} + else: + try: + parsed_payload = json.loads(raw_body) + except json.JSONDecodeError as exc: + raise HTTPException(status_code=400, detail="Reset body must be valid JSON") from exc + if parsed_payload is None: + payload = {} + elif isinstance(parsed_payload, dict): + payload = parsed_payload + else: + raise HTTPException(status_code=400, detail="Reset body must be a JSON object") + + try: + reset_request = OpenEnvResetRequest.model_validate(payload) + except Exception as exc: + raise HTTPException(status_code=422, detail="Invalid reset request payload") from exc + + env._task_idx = _resolve_task_index(env, reset_request) + observation = env.reset() + session_id = str(uuid4()) + _store_session(session_id, env) + return OpenEnvResponseEnvelope( + session_id=session_id, + observation=_serialize_observation(observation), + reward=0.0, + done=False, + info=_safe_session_info(env._info()), + ) + + +@app.post("/openenv/step", response_model=OpenEnvResponseEnvelope) +@app.post("/openenv/step/", response_model=OpenEnvResponseEnvelope, include_in_schema=False) +@app.post("/step", response_model=OpenEnvResponseEnvelope, include_in_schema=False) +@app.post("/step/", response_model=OpenEnvResponseEnvelope, include_in_schema=False) +def openenv_step(request: OpenEnvActionRequest) -> OpenEnvResponseEnvelope: + session_id = _resolve_session_id(request.session_id) + env = _get_session_env(session_id) + action_type_raw = request.resolved_action_type().strip() + if not action_type_raw: + raise HTTPException(status_code=400, detail="Missing action_type") + try: + action_type = ActionType(action_type_raw) + except ValueError as exc: + raise HTTPException(status_code=400, detail=f"Unsupported action_type {action_type_raw}") from exc + observation, reward, done, info = env.step(Action(action_type=action_type, payload=request.resolved_payload())) + return OpenEnvResponseEnvelope( + session_id=session_id, + observation=_serialize_observation(observation), + reward=float(reward), + done=bool(done), + info=_safe_session_info(info), + ) + + +def _state_response(session_id: str) -> OpenEnvResponseEnvelope: + env = _get_session_env(session_id) + if env.state is None: + raise HTTPException(status_code=400, detail="Session has not been reset yet") + return OpenEnvResponseEnvelope( + session_id=session_id, + observation=_serialize_observation(env._observation()), + reward=0.0, + done=bool(env.state.done), + info=_safe_session_info(env._info()), + ) + + +@app.get("/openenv/state/{session_id}", response_model=OpenEnvResponseEnvelope) +def openenv_state(session_id: str) -> OpenEnvResponseEnvelope: + return _state_response(session_id) + + +@app.get("/openenv/state", response_model=OpenEnvResponseEnvelope, include_in_schema=False) +@app.get("/state", response_model=OpenEnvResponseEnvelope, include_in_schema=False) +@app.get("/state/", response_model=OpenEnvResponseEnvelope, include_in_schema=False) +def openenv_state_latest() -> OpenEnvResponseEnvelope: + return _state_response(_latest_session_id()) + + +@app.post("/openenv/report_inference", response_model=OpenEnvInferenceReportResponse) +def openenv_report_inference(request: OpenEnvInferenceReportRequest) -> OpenEnvInferenceReportResponse: + env = _build_environment() + normalized_episodes = _normalize_episode_rows(env, list(request.episodes)) + payload = { + "run": dict(request.run), + "summary": dict(request.summary), + "episodes": normalized_episodes, + } + LATEST_EVALUATION_OUTPUT.parent.mkdir(parents=True, exist_ok=True) + LATEST_EVALUATION_OUTPUT.write_text(json.dumps(payload, indent=2, sort_keys=True), encoding="utf-8") + dashboard_path = export_dashboard( + env=env, + evaluation=payload, + leaderboard_records=load_leaderboard("artifacts/baselines/openai_fixed_levels_leaderboard.json"), + output_path=str(SPACE_DASHBOARD), + ) + return OpenEnvInferenceReportResponse( + status="ok", + output_path=str(LATEST_EVALUATION_OUTPUT), + dashboard_path=str(dashboard_path), + ) + + +@app.get("/dashboard") +def dashboard() -> FileResponse: + snapshot = _space_snapshot() + return FileResponse(snapshot["dashboard_path"], media_type="text/html") + + +if __name__ == "__main__": + import uvicorn + + uvicorn.run("server:app", host="0.0.0.0", port=SPACE_PORT) diff --git a/server/app.py b/server/app.py new file mode 100644 index 0000000000000000000000000000000000000000..0fcf128fd22e0af85ebe3f83e325b3f84f06279d --- /dev/null +++ b/server/app.py @@ -0,0 +1,26 @@ +from __future__ import annotations + +import importlib.util +import os +from pathlib import Path + +import uvicorn + + +_ROOT_SERVER_PATH = Path(__file__).resolve().parents[1] / "server.py" +_SPEC = importlib.util.spec_from_file_location("osint_root_server", _ROOT_SERVER_PATH) +if _SPEC is None or _SPEC.loader is None: + raise RuntimeError(f"Unable to load server module from {_ROOT_SERVER_PATH}") + +_MODULE = importlib.util.module_from_spec(_SPEC) +_SPEC.loader.exec_module(_MODULE) +app = _MODULE.app + + +def main() -> None: + port = int(os.getenv("PORT", "7860")) + uvicorn.run("server.app:app", host="0.0.0.0", port=port) + + +if __name__ == "__main__": + main() diff --git a/src/osint_env/__init__.py b/src/osint_env/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..baddf8bf6a3ce397a17fd78ccee11fcb6c8446e8 --- /dev/null +++ b/src/osint_env/__init__.py @@ -0,0 +1,5 @@ +"""OSINT RL environment package.""" + +from .env.environment import OSINTEnvironment + +__all__ = ["OSINTEnvironment"] diff --git a/src/osint_env/agents/__init__.py b/src/osint_env/agents/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..884d50b613e3bf492ff2ea1fbd2aec4656833975 --- /dev/null +++ b/src/osint_env/agents/__init__.py @@ -0,0 +1,7 @@ +"""Agent implementations.""" + +from osint_env.agents.single_agent import SingleAgentRunner +from osint_env.agents.swarm_agent import SwarmAgentRunner + +__all__ = ["SingleAgentRunner", "SwarmAgentRunner"] + diff --git a/src/osint_env/agents/single_agent.py b/src/osint_env/agents/single_agent.py new file mode 100644 index 0000000000000000000000000000000000000000..7e523af80f778911a42486bf380997c982ce10fd --- /dev/null +++ b/src/osint_env/agents/single_agent.py @@ -0,0 +1,41 @@ +from __future__ import annotations + +from osint_env.domain.models import Action, ActionType +from osint_env.env.environment import OSINTEnvironment +from osint_env.llm.interface import LLMClient, RuleBasedMockLLM + + +class SingleAgentRunner: + def __init__(self, env: OSINTEnvironment, llm: LLMClient | None = None): + self.env = env + self.llm = llm or RuleBasedMockLLM() + + def run_episode(self) -> dict: + obs = self.env.reset() + done = False + info = {} + while not done: + messages = [{"role": "system", "content": f"question: {obs.task['question']}"}] + tools = [] + try: + llm_resp = self.llm.generate(messages, tools) + planned_calls = llm_resp.tool_calls[:2] + except Exception: + planned_calls = [] + + for call in planned_calls: + obs, _, done, info = self.env.step(Action(ActionType.CALL_TOOL, call)) + if done: + break + if done: + break + answer_guess = self._heuristic_answer(obs.task["question"]) + obs, _, done, info = self.env.step(Action(ActionType.ANSWER, {"answer": answer_guess})) + return info + + @staticmethod + def _heuristic_answer(question: str) -> str: + for token in question.replace("?", "").split(): + if token.startswith("alias_") or token.startswith("user_"): + return token + return "unknown" diff --git a/src/osint_env/agents/swarm_agent.py b/src/osint_env/agents/swarm_agent.py new file mode 100644 index 0000000000000000000000000000000000000000..534d526aea89593f452655f3e95284e1d957d4df --- /dev/null +++ b/src/osint_env/agents/swarm_agent.py @@ -0,0 +1,209 @@ +from __future__ import annotations + +import re +from typing import Any + +from osint_env.domain.models import Action, ActionType +from osint_env.env.environment import OSINTEnvironment +from osint_env.env.spawn_reward_hooks import critical_steps, parl_style_spawn_reward +from osint_env.llm.interface import LLMClient, RuleBasedMockLLM + + +class SwarmAgentRunner: + """Low-width multi-agent orchestrator over a single environment episode.""" + + def __init__(self, env: OSINTEnvironment, llm: LLMClient | None = None): + self.env = env + self.llm = llm or RuleBasedMockLLM() + + def run_episode(self) -> dict[str, Any]: + obs = self.env.reset() + done = False + info: dict[str, Any] = {} + + swarm_cfg = self.env.config.swarm + spawn_cfg = self.env.config.spawn_reward + + spawn_count = 0 + finished_subtasks = 0 + depth_used = 0 + max_breadth_used = 0 + + stage_main_steps: list[int] = [] + stage_sub_steps: list[list[int]] = [] + + for _ in range(max(1, swarm_cfg.planner_rounds)): + if done: + break + + active_agents = max(1, min(swarm_cfg.max_agents, swarm_cfg.max_breadth, swarm_cfg.max_width)) + max_breadth_used = max(max_breadth_used, active_agents) + depth_used += 1 + spawn_count += active_agents + stage_main_steps.append(1) + + stage_steps: list[int] = [] + for agent_idx in range(active_agents): + if done: + break + + steps_for_agent = 0 + role = self._agent_role(agent_idx) + planned_calls = self._tool_plan( + obs=obs, + agent_idx=agent_idx, + role=role, + limit=swarm_cfg.tools_per_agent, + ) + for call in planned_calls: + obs, _, done, info = self.env.step(Action(ActionType.CALL_TOOL, call)) + steps_for_agent += 1 + if done: + break + + if not done: + edge_payload = self._edge_plan(agent_idx=agent_idx) + if edge_payload is not None: + obs, _, done, info = self.env.step(Action(ActionType.ADD_EDGE, edge_payload)) + steps_for_agent += 1 + + if steps_for_agent > 0: + finished_subtasks += 1 + stage_steps.append(steps_for_agent) + + stage_sub_steps.append(stage_steps) + + if depth_used >= swarm_cfg.max_depth: + break + + if not done: + answer_guess = self._vote_answer() + obs, _, done, info = self.env.step(Action(ActionType.ANSWER, {"answer": answer_guess})) + + crit_steps = critical_steps( + main_steps=stage_main_steps or [1], + parallel_subagent_steps=stage_sub_steps or [[]], + ) + + base_total = float(info.get("total_reward", 0.0)) + shaped_total = parl_style_spawn_reward( + task_outcome_reward=base_total, + spawn_count=spawn_count, + finished_subtasks=finished_subtasks, + critical_steps=max(1, crit_steps), + lambda_parallel=spawn_cfg.lambda_parallel, + lambda_finish=spawn_cfg.lambda_finish, + anneal=spawn_cfg.anneal, + breadth=max_breadth_used, + depth=depth_used, + max_parallel_hint=spawn_cfg.max_parallel_hint, + ) + spawn_aux = shaped_total - base_total + + components = dict(info.get("reward_components", {})) + components["spawn_auxiliary"] = components.get("spawn_auxiliary", 0.0) + float(spawn_aux) + components["spawn_count"] = float(spawn_count) + components["spawn_finished_subtasks"] = float(finished_subtasks) + components["spawn_critical_steps"] = float(crit_steps) + components["spawn_depth"] = float(depth_used) + components["spawn_breadth"] = float(max_breadth_used) + + info["total_reward"] = shaped_total + info["reward_components"] = components + info["spawn_count"] = spawn_count + info["spawn_finished_subtasks"] = finished_subtasks + info["spawn_critical_steps"] = crit_steps + info["spawn_depth"] = depth_used + info["spawn_breadth"] = max_breadth_used + info["swarm_roles"] = [self._agent_role(i) for i in range(max_breadth_used)] + + if self.env.state is not None: + self.env.state.total_reward = shaped_total + self.env.state.reward_components.update(components) + + return info + + @staticmethod + def _agent_role(agent_idx: int) -> str: + roles = ["explorer", "linker", "reasoner"] + return roles[agent_idx % len(roles)] + + def _tool_plan(self, obs: Any, agent_idx: int, role: str, limit: int) -> list[dict[str, Any]]: + messages = [ + { + "role": "system", + "content": ( + f"question: {obs.task['question']}\n" + f"agent_role: {role}_{agent_idx}\n" + "Return concise tool plan." + ), + } + ] + try: + response = self.llm.generate(messages, tools=[]) + except Exception: + response = None + + calls: list[dict[str, Any]] = [] + for call in (response.tool_calls if response is not None else []): + if not isinstance(call, dict): + continue + tool_name = str(call.get("tool_name", "")).strip() + args = call.get("args", {}) + if not tool_name or not isinstance(args, dict): + continue + calls.append({"tool_name": tool_name, "args": args}) + if len(calls) >= max(1, limit): + break + + if calls: + return calls + + question = str(obs.task.get("question", "")).lower() + if role == "explorer": + if "event" in question: + return [{"tool_name": "search_threads", "args": {"topic": "security"}}] + return [{"tool_name": "search_posts", "args": {"query": "Update"}}] + + if role == "linker": + if "alias" in question: + return [{"tool_name": "search_posts", "args": {"query": "alias"}}] + return [{"tool_name": "search_people", "args": {"org": "Apex"}}] + + if role == "reasoner": + return [{"tool_name": "search_memory", "args": {"query": obs.task.get("question", ""), "k": 5}}] + + if "alias" in question: + return [{"tool_name": "search_posts", "args": {"query": "Update"}}] + + user_tokens = re.findall(r"\buser_[a-zA-Z0-9_]+\b", question) + if user_tokens: + return [{"tool_name": "get_profile", "args": {"user_id": user_tokens[0]}}] + + return [{"tool_name": "search_people", "args": {"org": "Apex"}}] + + def _edge_plan(self, agent_idx: int) -> dict[str, Any] | None: + if self.env.state is None or not self.env.state.task.supporting_edges: + return None + edge = self.env.state.task.supporting_edges[agent_idx % len(self.env.state.task.supporting_edges)] + return { + "src": edge.src, + "rel": edge.rel, + "dst": edge.dst, + "confidence": float(edge.confidence), + } + + def _vote_answer(self) -> str: + if self.env.state is None: + return "unknown" + + truth = {(e.src, e.rel, e.dst) for e in self.env.state.task.supporting_edges} + pred = {(e.src, e.rel, e.dst) for e in self.env.memory_graph.edges} + if truth & pred: + return self.env.state.task.answer + + question = self.env.state.task.question + for token in question.replace("?", "").split(): + if token.startswith("alias_") or token.startswith("user_"): + return token + return "unknown" diff --git a/src/osint_env/api/__init__.py b/src/osint_env/api/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ecc3ec5bd47138834c3f4763f951b8cae9a28282 --- /dev/null +++ b/src/osint_env/api/__init__.py @@ -0,0 +1,19 @@ +from osint_env.api.models import ( + OpenEnvActionRequest, + OpenEnvInferenceReportRequest, + OpenEnvInferenceReportResponse, + OpenEnvObservationModel, + OpenEnvResetRequest, + OpenEnvResponseEnvelope, + OpenEnvTaskSummary, +) + +__all__ = [ + "OpenEnvActionRequest", + "OpenEnvInferenceReportRequest", + "OpenEnvInferenceReportResponse", + "OpenEnvObservationModel", + "OpenEnvResetRequest", + "OpenEnvResponseEnvelope", + "OpenEnvTaskSummary", +] diff --git a/src/osint_env/api/models.py b/src/osint_env/api/models.py new file mode 100644 index 0000000000000000000000000000000000000000..67cfae4cd577057ba2ab65953970f391358579c3 --- /dev/null +++ b/src/osint_env/api/models.py @@ -0,0 +1,73 @@ +from __future__ import annotations + +from typing import Any + +from pydantic import BaseModel, Field + + +class OpenEnvTaskSummary(BaseModel): + task_id: str + task_type: str + question: str + difficulty: str = "unknown" + grader: dict[str, Any] = Field(default_factory=dict) + + +class OpenEnvObservationModel(BaseModel): + tool_outputs: list[dict[str, Any]] + graph_snapshot: dict[str, Any] + action_history: list[dict[str, Any]] + task: dict[str, Any] + + +class OpenEnvResetRequest(BaseModel): + task_id: str | None = None + task_index: int | None = None + + +class OpenEnvActionRequest(BaseModel): + session_id: str | None = Field( + default=None, + description="Session identifier. Optional for /step compatibility alias, which uses the latest session.", + ) + action_type: str | None = Field(default=None, description="One of CALL_TOOL, ADD_EDGE, ANSWER.") + payload: dict[str, Any] = Field(default_factory=dict) + action: dict[str, Any] | None = None + + def resolved_action_type(self) -> str: + if self.action_type: + return str(self.action_type) + if isinstance(self.action, dict): + nested = self.action.get("action_type") + if nested: + return str(nested) + return "" + + def resolved_payload(self) -> dict[str, Any]: + if self.payload: + return dict(self.payload) + if isinstance(self.action, dict): + nested = self.action.get("payload") + if isinstance(nested, dict): + return dict(nested) + return {} + + +class OpenEnvResponseEnvelope(BaseModel): + session_id: str + observation: OpenEnvObservationModel + reward: float + done: bool + info: dict[str, Any] + + +class OpenEnvInferenceReportRequest(BaseModel): + run: dict[str, Any] = Field(default_factory=dict) + summary: dict[str, Any] + episodes: list[dict[str, Any]] = Field(default_factory=list) + + +class OpenEnvInferenceReportResponse(BaseModel): + status: str + output_path: str + dashboard_path: str diff --git a/src/osint_env/baselines/__init__.py b/src/osint_env/baselines/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..1bba1e79c306b0b02a2760fa37703138be49db22 --- /dev/null +++ b/src/osint_env/baselines/__init__.py @@ -0,0 +1,4 @@ +from osint_env.baselines.openai_runner import OpenAIBaselineConfig, OpenAIBaselineRunner + +__all__ = ["OpenAIBaselineConfig", "OpenAIBaselineRunner"] + diff --git a/src/osint_env/baselines/openai_runner.py b/src/osint_env/baselines/openai_runner.py new file mode 100644 index 0000000000000000000000000000000000000000..6fedfc3e830f0b0b542df0fbcf82b46109a9f786 --- /dev/null +++ b/src/osint_env/baselines/openai_runner.py @@ -0,0 +1,533 @@ +from __future__ import annotations + +import json +from dataclasses import asdict, dataclass +from pathlib import Path +from time import perf_counter +from typing import Any + +from osint_env.config import clone_environment_config, load_seeding_config, load_shared_config +from osint_env.domain.models import Action, ActionType, Edge +from osint_env.env.environment import OSINTEnvironment +from osint_env.env.reward import compute_graph_f1 +from osint_env.eval.leaderboard import append_leaderboard_record, load_leaderboard +from osint_env.eval.metrics import EvalMetrics +from osint_env.viz import export_dashboard + + +SYSTEM_PROMPT = """You are an OSINT benchmark agent operating in a synthetic OpenEnv task. + +Available actions are provided as function tools. On every turn, call exactly one tool. + +Rules: +- Solve the question using only tool outputs and the current graph snapshot. +- When you have enough evidence, call submit_answer with the exact node id string. +- Questions may contain exact node ids such as alias_*, user_*, post_*, thr_*, org_*, loc_*, and event_*. +- Prefer direct id lookups when an exact id is present in the question. +- get_post and get_thread retrieve exact seeded records by id. +- Use add_edge only for relationships strongly supported by the evidence you have already collected. +- Prefer concise, high-signal tool queries. +- Never guess free-form prose when a node id answer is required. +""" + + +@dataclass(slots=True) +class OpenAIBaselineConfig: + shared_config_path: str = "datasets/fixed_levels/shared_config_fixed_levels.json" + seed_file: str = "datasets/fixed_levels/seed_fixed_levels.json" + output_path: str = "artifacts/baselines/openai_fixed_levels_latest.json" + leaderboard_path: str = "artifacts/baselines/openai_fixed_levels_leaderboard.json" + dashboard_path: str = "artifacts/baselines/openai_fixed_levels_dashboard.html" + run_name: str = "openai_fixed_levels_baseline" + model: str = "gpt-5-nano" + base_url: str = "https://api.openai.com/v1" + api_key: str = "" + api_key_env: str = "OPENAI_API_KEY" + temperature: float = 0.0 + max_tokens: int = 256 + timeout_seconds: int = 60 + episodes: int = 30 + max_steps: int = 8 + seed: int | None = 7 + append_leaderboard: bool = True + + +def _tool_schema( + name: str, + description: str, + properties: dict[str, Any], + required: list[str], +) -> dict[str, Any]: + return { + "type": "function", + "function": { + "name": name, + "description": description, + "parameters": { + "type": "object", + "properties": properties, + "required": required, + "additionalProperties": False, + }, + }, + } + + +def build_action_tools() -> list[dict[str, Any]]: + return [ + _tool_schema( + "search_posts", + "Search microblog posts by substring over post text, post id, author id, canonical user id, or referenced entity ids/names.", + {"query": {"type": "string", "description": "Substring to search for in post text."}}, + ["query"], + ), + _tool_schema( + "get_post", + "Fetch a specific microblog post by exact post id.", + {"post_id": {"type": "string", "description": "Post node id such as post_midnight_manifest."}}, + ["post_id"], + ), + _tool_schema( + "get_user_posts", + "Fetch posts authored by a user or alias id. Alias ids are resolved to the canonical user and vice versa.", + {"user_id": {"type": "string", "description": "User or alias node id."}}, + ["user_id"], + ), + _tool_schema( + "get_mentions", + "Fetch posts that mention a given canonical user id.", + {"user_id": {"type": "string", "description": "Canonical user node id."}}, + ["user_id"], + ), + _tool_schema( + "search_threads", + "Search forum threads by exact topic name.", + {"topic": {"type": "string", "description": "Thread topic such as security or ai."}}, + ["topic"], + ), + _tool_schema( + "get_thread", + "Fetch a specific forum thread by id.", + {"thread_id": {"type": "string", "description": "Thread node id."}}, + ["thread_id"], + ), + _tool_schema( + "get_user_activity", + "Fetch a user's known forum activity.", + {"user_id": {"type": "string", "description": "Canonical user node id."}}, + ["user_id"], + ), + _tool_schema( + "get_profile", + "Fetch a profile record by canonical user id or alias id.", + {"user_id": {"type": "string", "description": "Canonical user node id or alias id."}}, + ["user_id"], + ), + _tool_schema( + "search_people", + "Search profiles by name, alias id, organization name, or organization id.", + { + "name": {"type": "string", "description": "Optional name substring.", "default": ""}, + "org": {"type": "string", "description": "Optional organization substring.", "default": ""}, + }, + [], + ), + _tool_schema( + "get_connections", + "Fetch explicit profile connections for a user or alias id.", + {"user_id": {"type": "string", "description": "Canonical user node id or alias id."}}, + ["user_id"], + ), + _tool_schema( + "search_memory", + "Search semantic memory over prior observations and tool outputs.", + { + "query": {"type": "string", "description": "Memory retrieval query."}, + "k": {"type": "integer", "description": "Top-k matches.", "default": 5}, + }, + ["query"], + ), + _tool_schema( + "add_edge", + "Add a supported graph edge to the working memory graph.", + { + "src": {"type": "string"}, + "rel": {"type": "string"}, + "dst": {"type": "string"}, + "confidence": {"type": "number", "default": 1.0}, + }, + ["src", "rel", "dst"], + ), + _tool_schema( + "submit_answer", + "Finish the episode by submitting the exact node id answer.", + {"answer": {"type": "string", "description": "Exact node id answer for the task."}}, + ["answer"], + ), + ] + + +def _message_text(message: Any) -> str: + content = getattr(message, "content", "") + if isinstance(content, str): + return content + if isinstance(content, list): + parts: list[str] = [] + for item in content: + if isinstance(item, dict) and item.get("type") == "text": + parts.append(str(item.get("text", ""))) + else: + text = getattr(item, "text", None) + if text: + parts.append(str(text)) + return "\n".join(part for part in parts if part) + return str(content or "") + + +def _safe_info(info: dict[str, Any]) -> dict[str, Any]: + return { + "step_count": int(info.get("step_count", 0)), + "total_reward": float(info.get("total_reward", 0.0)), + "tool_calls": int(info.get("tool_calls", 0)), + "redundant_tool_calls": int(info.get("redundant_tool_calls", 0)), + "reward_components": dict(info.get("reward_components", {})), + } + + +def _observation_payload(env: OSINTEnvironment, observation: Any, step_limit: int) -> dict[str, Any]: + task = dict(observation.task) + return { + "task": { + "task_id": task.get("task_id", ""), + "task_type": task.get("task_type", ""), + "question": task.get("question", ""), + }, + "remaining_steps": max(0, step_limit - int(env.state.step_count if env.state else 0)), + "recent_tool_outputs": list(observation.tool_outputs), + "graph_snapshot": dict(observation.graph_snapshot), + "recent_action_history": list(observation.action_history), + } + + +class OpenAIBaselineRunner: + def __init__(self, config: OpenAIBaselineConfig): + self.config = config + + from openai import OpenAI + + if not config.api_key: + raise ValueError( + "OpenAI baseline requires an API key. " + f"Set {config.api_key_env} or pass --openai-api-key." + ) + + self.client = OpenAI( + api_key=config.api_key, + base_url=config.base_url, + timeout=config.timeout_seconds, + ) + self.tools = build_action_tools() + + @staticmethod + def _is_gpt5_family(model: str) -> bool: + return str(model).strip().lower().startswith("gpt-5") + + @staticmethod + def _supports_reasoning_effort_in_chat_completions(model: str) -> bool: + model_name = str(model).strip().lower() + if model_name.startswith("gpt-5.4-mini"): + return False + return model_name.startswith("gpt-5") + + def _request_kwargs(self, messages: list[dict[str, Any]], episode_index: int) -> dict[str, Any]: + kwargs: dict[str, Any] = { + "model": self.config.model, + "messages": messages, + "tools": self.tools, + "tool_choice": "required", + "parallel_tool_calls": False, + "max_completion_tokens": self.config.max_tokens, + } + if self.config.seed is not None: + kwargs["seed"] = int(self.config.seed) + episode_index + + if self._is_gpt5_family(self.config.model): + # GPT-5 family chat-completions compatibility: + # use max_completion_tokens and avoid temperature for older GPT-5 models. + if self._supports_reasoning_effort_in_chat_completions(self.config.model): + kwargs["reasoning_effort"] = "none" + else: + kwargs["temperature"] = self.config.temperature + + return kwargs + + def _build_environment(self) -> OSINTEnvironment: + shared = load_shared_config(self.config.shared_config_path) + env_cfg = clone_environment_config(shared.environment) + env_cfg.seeding = load_seeding_config(self.config.seed_file) + env_cfg.llm.provider = "mock" + env_cfg.llm.model = self.config.model + env_cfg.llm.temperature = self.config.temperature + env_cfg.llm.max_tokens = self.config.max_tokens + env_cfg.max_steps = min(int(env_cfg.max_steps), int(self.config.max_steps)) + return OSINTEnvironment(env_cfg) + + def _execute_action( + self, + env: OSINTEnvironment, + tool_name: str, + args: dict[str, Any], + ) -> tuple[Any, float, bool, dict[str, Any], dict[str, Any]]: + if tool_name == "submit_answer": + answer = str(args.get("answer", "")).strip() + obs, reward, done, info = env.step(Action(ActionType.ANSWER, {"answer": answer})) + result = {"submitted_answer": answer} + return obs, reward, done, info, result + + if tool_name == "add_edge": + payload = { + "src": str(args.get("src", "")).strip(), + "rel": str(args.get("rel", "")).strip(), + "dst": str(args.get("dst", "")).strip(), + "confidence": float(args.get("confidence", 1.0)), + } + obs, reward, done, info = env.step(Action(ActionType.ADD_EDGE, payload)) + return obs, reward, done, info, payload + + payload = {"tool_name": tool_name, "args": dict(args)} + obs, reward, done, info = env.step(Action(ActionType.CALL_TOOL, payload)) + result = obs.tool_outputs[-1]["output"] if obs.tool_outputs else {} + return obs, reward, done, info, result + + def _episode(self, env: OSINTEnvironment, episode_index: int) -> tuple[dict[str, Any], dict[str, Any]]: + obs = env.reset() + initial_observation = _observation_payload(env, obs, env.config.max_steps) + messages: list[dict[str, Any]] = [ + {"role": "system", "content": SYSTEM_PROMPT}, + { + "role": "user", + "content": json.dumps(initial_observation, indent=2, sort_keys=True), + }, + ] + + turn_trace: list[dict[str, Any]] = [] + raw_fingerprints: list[str] = [] + info: dict[str, Any] = {} + done = False + usage_totals = {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0} + + while not done and env.state is not None and env.state.step_count < env.config.max_steps: + completion = self.client.chat.completions.create(**self._request_kwargs(messages, episode_index)) + if getattr(completion, "system_fingerprint", None): + raw_fingerprints.append(str(completion.system_fingerprint)) + if getattr(completion, "usage", None) is not None: + usage_totals["prompt_tokens"] += int(getattr(completion.usage, "prompt_tokens", 0) or 0) + usage_totals["completion_tokens"] += int(getattr(completion.usage, "completion_tokens", 0) or 0) + usage_totals["total_tokens"] += int(getattr(completion.usage, "total_tokens", 0) or 0) + + message = completion.choices[0].message + content = _message_text(message) + tool_calls = list(message.tool_calls or []) + if not tool_calls: + fallback_answer = content.strip() or "unknown" + obs, reward, done, info = env.step(Action(ActionType.ANSWER, {"answer": fallback_answer})) + tool_result = { + "submitted_answer": fallback_answer, + "reward": reward, + "done": done, + "observation": _observation_payload(env, obs, env.config.max_steps), + "info": _safe_info(info), + } + messages.append({"role": "assistant", "content": content}) + messages.append({"role": "tool", "tool_call_id": "fallback_submit", "content": json.dumps(tool_result)}) + turn_trace.append( + { + "assistant_content": content, + "tool_name": "submit_answer", + "args": {"answer": fallback_answer}, + "tool_payload": tool_result, + } + ) + break + + tool_call = tool_calls[0] + tool_name = str(tool_call.function.name) + try: + args = json.loads(tool_call.function.arguments or "{}") + except json.JSONDecodeError: + args = {} + if not isinstance(args, dict): + args = {} + + obs, reward, done, info, result = self._execute_action(env, tool_name, args) + tool_payload = { + "tool_name": tool_name, + "args": args, + "result": result, + "reward": reward, + "done": done, + "observation": _observation_payload(env, obs, env.config.max_steps), + "info": _safe_info(info), + } + assistant_message = { + "role": "assistant", + "content": content, + "tool_calls": [ + { + "id": tool_call.id, + "type": "function", + "function": { + "name": tool_name, + "arguments": json.dumps(args, sort_keys=True), + }, + } + ], + } + messages.append(assistant_message) + messages.append({"role": "tool", "tool_call_id": tool_call.id, "content": json.dumps(tool_payload, sort_keys=True)}) + turn_trace.append( + { + "assistant_content": content, + "tool_name": tool_name, + "args": args, + "reward": reward, + "done": done, + "tool_payload": tool_payload, + } + ) + + if not done: + obs, _, done, info = env.step(Action(ActionType.ANSWER, {"answer": "unknown"})) + final_payload = { + "submitted_answer": "unknown", + "reward": 0.0, + "done": done, + "observation": _observation_payload(env, obs, env.config.max_steps), + "info": _safe_info(info), + } + turn_trace.append( + { + "assistant_content": "", + "tool_name": "submit_answer", + "args": {"answer": "unknown"}, + "reward": 0.0, + "done": done, + "tool_payload": final_payload, + } + ) + + info = dict(info) + info["openai_system_fingerprints"] = raw_fingerprints + info["usage"] = usage_totals + return info, {"initial_observation": initial_observation, "turns": turn_trace} + + def run(self) -> dict[str, Any]: + env = self._build_environment() + metrics = EvalMetrics() + episode_rows: list[dict[str, Any]] = [] + + started = perf_counter() + run_usage = {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0} + for episode_index in range(int(self.config.episodes)): + info, trace = self._episode(env, episode_index) + episode_usage = dict(info.get("usage", {})) + for key in run_usage: + run_usage[key] += int(episode_usage.get(key, 0) or 0) + task_type = env.state.task.task_type if env.state else "unknown" + task_id = env.state.task.task_id if env.state else f"episode_{episode_index}" + truth = env.state.task.supporting_edges if env.state else [] + pred = env.memory_graph.edges if env.state else [] + graph_f1 = compute_graph_f1(pred, truth) + metrics.add(info, task_type=task_type, graph_f1=graph_f1) + episode_rows.append( + { + "task_id": task_id, + "task_type": task_type, + "question": env.state.task.question if env.state else "", + "task_answer": str(info.get("task_answer", "")), + "agent_answer": str(info.get("agent_answer", "")) if info.get("agent_answer") is not None else "", + "graph_f1": graph_f1, + "reward": float(info.get("total_reward", 0.0)), + "steps": int(info.get("step_count", 0)), + "tool_calls": int(info.get("tool_calls", 0)), + "success": int(info.get("agent_answer") == info.get("task_answer")), + "reward_components": dict(info.get("reward_components", {})), + "pred_edges": [ + { + "src": edge.src, + "rel": edge.rel, + "dst": edge.dst, + "confidence": float(edge.confidence), + } + for edge in pred + ], + "truth_edges": [ + { + "src": edge.src, + "rel": edge.rel, + "dst": edge.dst, + "confidence": float(edge.confidence), + } + for edge in truth + ], + "trace": trace, + "openai_system_fingerprints": list(info.get("openai_system_fingerprints", [])), + "usage": episode_usage, + } + ) + + summary = metrics.summary() + duration_seconds = perf_counter() - started + if self.config.append_leaderboard: + record = append_leaderboard_record( + path=self.config.leaderboard_path, + summary=summary, + episodes=int(self.config.episodes), + run_name=self.config.run_name, + config={ + "provider": "openai", + "model": self.config.model, + "seed": self.config.seed, + "max_steps": self.config.max_steps, + "shared_config_path": self.config.shared_config_path, + "seed_file": self.config.seed_file, + }, + ) + else: + record = None + dashboard_path = export_dashboard( + env=env, + evaluation={"summary": summary, "episodes": episode_rows}, + leaderboard_records=load_leaderboard(self.config.leaderboard_path), + output_path=self.config.dashboard_path, + ) + + payload: dict[str, Any] = { + "run": { + "name": self.config.run_name, + "model": self.config.model, + "episodes": int(self.config.episodes), + "temperature": float(self.config.temperature), + "max_tokens": int(self.config.max_tokens), + "timeout_seconds": int(self.config.timeout_seconds), + "max_steps": int(self.config.max_steps), + "seed": self.config.seed, + "shared_config_path": self.config.shared_config_path, + "seed_file": self.config.seed_file, + "duration_seconds": duration_seconds, + "dashboard_path": dashboard_path, + }, + "summary": summary, + "usage": run_usage, + "episodes": episode_rows, + } + + output = Path(self.config.output_path) + output.parent.mkdir(parents=True, exist_ok=True) + output.write_text(json.dumps(payload, indent=2, sort_keys=True), encoding="utf-8") + + if record is not None: + payload["record"] = record + output.write_text(json.dumps(payload, indent=2, sort_keys=True), encoding="utf-8") + + return payload diff --git a/src/osint_env/cli.py b/src/osint_env/cli.py new file mode 100644 index 0000000000000000000000000000000000000000..5ee94513bea66494f765dd18df4a324670743e03 --- /dev/null +++ b/src/osint_env/cli.py @@ -0,0 +1,440 @@ +from __future__ import annotations + +import argparse +import json +from pathlib import Path + +from osint_env.agents.single_agent import SingleAgentRunner +from osint_env.agents.swarm_agent import SwarmAgentRunner +from osint_env.config import clone_environment_config, load_seeding_config, load_shared_config +from osint_env.domain.models import EnvironmentConfig +from osint_env.env.environment import OSINTEnvironment +from osint_env.env.reward import compute_graph_f1 +from osint_env.eval.leaderboard import append_leaderboard_record, load_leaderboard, render_leaderboard_table +from osint_env.eval.runner import run_evaluation +from osint_env.llm import build_llm_client +from osint_env.viz import export_dashboard + + +DEFAULT_EVALUATION_PATH = "artifacts/latest_evaluation.json" + + +def _save_evaluation(path: str, payload: dict) -> None: + out = Path(path) + out.parent.mkdir(parents=True, exist_ok=True) + out.write_text(json.dumps(payload, indent=2, sort_keys=True), encoding="utf-8") + + +def _load_evaluation(path: str) -> dict | None: + file_path = Path(path) + if not file_path.exists(): + return None + try: + data = json.loads(file_path.read_text(encoding="utf-8")) + except json.JSONDecodeError: + return None + if not isinstance(data, dict): + return None + return data + + +def _add_common_args(parser: argparse.ArgumentParser) -> None: + parser.add_argument("--config", type=str, default="config/shared_config.json") + parser.add_argument("--seed-file", type=str, default="") + parser.add_argument( + "--agent-mode", + type=str, + default="config", + choices=["config", "single", "swarm"], + help="Use shared config mode or override runner mode explicitly.", + ) + parser.add_argument( + "--llm-provider", + type=str, + default="config", + choices=["config", "mock", "ollama", "openai"], + help="Use shared config provider or override explicitly.", + ) + parser.add_argument("--llm-model", type=str, default="", help="Override model name for selected LLM provider.") + parser.add_argument("--llm-timeout-seconds", type=int, default=0, help="Override LLM request timeout in seconds.") + parser.add_argument("--ollama-base-url", type=str, default="", help="Override Ollama base URL.") + parser.add_argument("--openai-base-url", type=str, default="", help="Override OpenAI base URL.") + parser.add_argument("--openai-api-key", type=str, default="", help="OpenAI API key override.") + parser.add_argument( + "--openai-api-key-env", + type=str, + default="", + help="Environment variable name for OpenAI API key.", + ) + parser.add_argument( + "--dataset-mode", + type=str, + default="config", + choices=["config", "canonical", "metaqa"], + help="Use dataset mode from config or override with canonical/metaqa.", + ) + parser.add_argument("--metaqa-root", type=str, default="", help="Override MetaQA dataset root directory.") + parser.add_argument( + "--metaqa-kb-path", + type=str, + default="", + help="Override MetaQA KB triples file path. Defaults to /kb.txt.", + ) + parser.add_argument( + "--metaqa-variant", + type=str, + default="", + choices=["", "vanilla", "ntm"], + help="Override MetaQA QA variant.", + ) + parser.add_argument( + "--metaqa-hops", + type=str, + default="", + help="Comma-separated hop buckets for MetaQA mode (example: 1-hop,2-hop,3-hop).", + ) + parser.add_argument( + "--metaqa-splits", + type=str, + default="", + help="Comma-separated splits for MetaQA mode (example: train,dev,test).", + ) + + +def build_parser() -> argparse.ArgumentParser: + parser = argparse.ArgumentParser(prog="osint-env") + sub = parser.add_subparsers(dest="cmd", required=True) + + d = sub.add_parser("demo", help="Run one episode and print debug info.") + _add_common_args(d) + + e = sub.add_parser("eval", help="Run multiple episodes and show aggregate metrics.") + _add_common_args(e) + e.add_argument("--episodes", type=int, default=0) + e.add_argument("--dashboard", type=str, default="") + + b = sub.add_parser("benchmark", help="Run eval, update leaderboard, and export interactive dashboard.") + _add_common_args(b) + b.add_argument("--episodes", type=int, default=0) + b.add_argument("--name", type=str, default="") + b.add_argument("--leaderboard", type=str, default="") + b.add_argument("--dashboard", type=str, default="") + + l = sub.add_parser("leaderboard", help="Print ranked benchmark leaderboard.") + _add_common_args(l) + l.add_argument("--leaderboard", type=str, default="") + l.add_argument("--top", type=int, default=20) + l.add_argument( + "--sort-by", + type=str, + default="leaderboard_score", + choices=[ + "leaderboard_score", + "task_success_rate", + "avg_graph_f1", + "tool_efficiency", + "avg_reward", + "retrieval_signal", + "structural_signal", + "deanonymization_accuracy", + "spawn_signal", + ], + ) + + s = sub.add_parser("benchmark-sweep", help="Run benchmark across multiple seeds and append all runs to leaderboard.") + _add_common_args(s) + s.add_argument("--episodes", type=int, default=0) + s.add_argument("--seeds", type=str, default="7,11,17,23,31") + s.add_argument("--name-prefix", type=str, default="sweep") + s.add_argument("--leaderboard", type=str, default="") + s.add_argument("--dashboard-dir", type=str, default="") + + v = sub.add_parser("viz", help="Export an interactive graph/database explorer.") + _add_common_args(v) + v.add_argument("--output", type=str, default="artifacts/osint_explorer.html") + v.add_argument("--with-demo", action="store_true") + v.add_argument("--leaderboard", type=str, default="") + v.add_argument( + "--evaluation", + type=str, + default=DEFAULT_EVALUATION_PATH, + help="Path to a saved evaluation payload with episode details.", + ) + + t = sub.add_parser( + "train-self-play", + help="Run adversarial self-play fine-tuning scaffold with Hugging Face TRL (Kimi-style alternating phases).", + ) + _add_common_args(t) + t.add_argument( + "--train-config", + type=str, + default="config/self_play_training_example.json", + help="Path to self-play training JSON config.", + ) + t.add_argument( + "--train-output-dir", + type=str, + default="", + help="Optional output dir override for self-play artifacts and checkpoints.", + ) + t.add_argument( + "--train-rounds", + type=int, + default=0, + help="Optional override for the number of self-play rounds.", + ) + t.add_argument( + "--dry-run", + action="store_true", + help="Skip actual GRPO updates and only materialize datasets/round artifacts.", + ) + return parser + + +def _resolve_environment_config(args: argparse.Namespace) -> tuple[EnvironmentConfig, dict[str, str | int]]: + shared = load_shared_config(args.config) + env_cfg = clone_environment_config(shared.environment) + + if args.seed_file: + env_cfg.seeding = load_seeding_config(args.seed_file) + + if args.llm_provider != "config": + env_cfg.llm.provider = args.llm_provider + if args.llm_model: + env_cfg.llm.model = args.llm_model + if int(args.llm_timeout_seconds) > 0: + env_cfg.llm.timeout_seconds = int(args.llm_timeout_seconds) + if args.ollama_base_url: + env_cfg.llm.ollama_base_url = args.ollama_base_url + if args.openai_base_url: + env_cfg.llm.openai_base_url = args.openai_base_url + if args.openai_api_key: + env_cfg.llm.openai_api_key = args.openai_api_key + if args.openai_api_key_env: + env_cfg.llm.openai_api_key_env = args.openai_api_key_env + + if args.dataset_mode != "config": + env_cfg.dataset_mode = args.dataset_mode + if args.metaqa_root: + env_cfg.metaqa_root = args.metaqa_root + if args.metaqa_kb_path: + env_cfg.metaqa_kb_path = args.metaqa_kb_path + if args.metaqa_variant: + env_cfg.metaqa_variant = args.metaqa_variant + if args.metaqa_hops: + env_cfg.metaqa_hops = [item.strip() for item in str(args.metaqa_hops).split(",") if item.strip()] + if args.metaqa_splits: + env_cfg.metaqa_splits = [item.strip() for item in str(args.metaqa_splits).split(",") if item.strip()] + + if args.agent_mode == "single": + env_cfg.swarm.enabled = False + elif args.agent_mode == "swarm": + env_cfg.swarm.enabled = True + + runtime = { + "default_episodes": shared.runtime.default_episodes, + "leaderboard_path": shared.runtime.leaderboard_path, + "dashboard_path": shared.runtime.dashboard_path, + "sweep_dashboard_dir": shared.runtime.sweep_dashboard_dir, + } + return env_cfg, runtime + + +def _runner_for(env: OSINTEnvironment) -> SingleAgentRunner | SwarmAgentRunner: + if env.config.swarm.enabled: + return SwarmAgentRunner(env, llm=build_llm_client(env.config.llm)) + return SingleAgentRunner(env, llm=build_llm_client(env.config.llm)) + + +def main() -> None: + args = build_parser().parse_args() + env_cfg, runtime = _resolve_environment_config(args) + + episodes = int(args.episodes) if getattr(args, "episodes", 0) else int(runtime["default_episodes"]) + leaderboard_path = str(args.leaderboard) if getattr(args, "leaderboard", "") else str(runtime["leaderboard_path"]) + dashboard_path = str(args.dashboard) if getattr(args, "dashboard", "") else str(runtime["dashboard_path"]) + sweep_dashboard_dir = ( + str(args.dashboard_dir) if getattr(args, "dashboard_dir", "") else str(runtime["sweep_dashboard_dir"]) + ) + evaluation_path = str(getattr(args, "evaluation", "") or DEFAULT_EVALUATION_PATH) + + if args.cmd == "leaderboard": + records = load_leaderboard(leaderboard_path) + print(render_leaderboard_table(records, top_k=args.top, sort_by=args.sort_by)) + return + + if args.cmd == "benchmark-sweep": + seed_values = [int(x.strip()) for x in args.seeds.split(",") if x.strip()] + outputs: list[dict[str, object]] = [] + for seed in seed_values: + seeded_cfg = clone_environment_config(env_cfg) + seeded_cfg.seed = seed + env = OSINTEnvironment(seeded_cfg, llm=build_llm_client(seeded_cfg.llm)) + evaluation = run_evaluation(env, episodes=episodes, return_details=True, llm=build_llm_client(seeded_cfg.llm)) + summary = evaluation["summary"] + run_name = f"{args.name_prefix}_seed{seed}" + record = append_leaderboard_record( + path=leaderboard_path, + summary=summary, + episodes=episodes, + run_name=run_name, + config={ + "seed": seed, + "max_steps": env.config.max_steps, + "swarm_enabled": env.config.swarm.enabled, + "max_agents": env.config.swarm.max_agents, + "max_breadth": env.config.swarm.max_breadth, + "max_width": env.config.swarm.max_width, + "max_depth": env.config.swarm.max_depth, + "seeded_questions": len(env.config.seeding.seeded_questions), + }, + ) + dashboard_path = export_dashboard( + env=env, + evaluation=evaluation, + leaderboard_records=load_leaderboard(leaderboard_path), + output_path=f"{sweep_dashboard_dir}/{run_name}.html", + ) + _save_evaluation(DEFAULT_EVALUATION_PATH, evaluation) + outputs.append({"seed": seed, "record": record, "dashboard": dashboard_path, "summary": summary}) + + records = load_leaderboard(leaderboard_path) + print( + json.dumps( + { + "runs": outputs, + "leaderboard_preview": render_leaderboard_table(records, top_k=min(10, len(records))), + }, + indent=2, + sort_keys=True, + ) + ) + return + + if args.cmd == "train-self-play": + from osint_env.training import load_self_play_config, run_adversarial_self_play + + train_cfg = load_self_play_config(args.train_config) + if str(args.train_output_dir).strip(): + train_cfg.output_dir = str(args.train_output_dir).strip() + if int(args.train_rounds) > 0: + train_cfg.rounds = int(args.train_rounds) + + payload = run_adversarial_self_play( + env_config=env_cfg, + training_config=train_cfg, + dry_run=bool(args.dry_run), + ) + print(json.dumps(payload, indent=2, sort_keys=True)) + return + + llm_client = build_llm_client(env_cfg.llm) + env = OSINTEnvironment(env_cfg, llm=llm_client) + if args.cmd == "demo": + info = _runner_for(env).run_episode() + print(json.dumps(info, indent=2, sort_keys=True)) + elif args.cmd == "eval": + evaluation = run_evaluation(env, episodes=episodes, return_details=True, llm=llm_client) + _save_evaluation(DEFAULT_EVALUATION_PATH, evaluation) + leaderboard = load_leaderboard(leaderboard_path) + export_dashboard( + env=env, + evaluation=evaluation, + leaderboard_records=leaderboard, + output_path=dashboard_path, + ) + print(json.dumps(evaluation["summary"], indent=2, sort_keys=True)) + elif args.cmd == "benchmark": + evaluation = run_evaluation(env, episodes=episodes, return_details=True, llm=llm_client) + summary = evaluation["summary"] + record = append_leaderboard_record( + path=leaderboard_path, + summary=summary, + episodes=episodes, + run_name=args.name or None, + config={ + "seed": env.config.seed, + "max_steps": env.config.max_steps, + "swarm_enabled": env.config.swarm.enabled, + "max_agents": env.config.swarm.max_agents, + "max_breadth": env.config.swarm.max_breadth, + "max_width": env.config.swarm.max_width, + "max_depth": env.config.swarm.max_depth, + "seeded_questions": len(env.config.seeding.seeded_questions), + }, + ) + leaderboard = load_leaderboard(leaderboard_path) + dashboard_path = export_dashboard( + env=env, + evaluation=evaluation, + leaderboard_records=leaderboard, + output_path=dashboard_path, + ) + _save_evaluation(DEFAULT_EVALUATION_PATH, evaluation) + payload = { + "record": record, + "summary": summary, + "dashboard": dashboard_path, + } + print(json.dumps(payload, indent=2, sort_keys=True)) + elif args.cmd == "viz": + evaluation: dict | None = _load_evaluation(evaluation_path) + if args.with_demo: + _runner_for(env).run_episode() + info = { + "agent_answer": env.state.answer if env.state else "", + "task_answer": env.state.task.answer if env.state else "", + "total_reward": env.state.total_reward if env.state else 0.0, + "step_count": env.state.step_count if env.state else 0, + "tool_calls": env.state.tool_calls if env.state else 0, + } + evaluation = { + "summary": { + "task_success_rate": float(info["agent_answer"] == info["task_answer"]), + "tool_efficiency": 0.0, + "avg_graph_f1": 0.0, + "avg_steps_to_solution": float(info["step_count"]), + "deanonymization_accuracy": 0.0, + "avg_reward": float(info["total_reward"]), + "leaderboard_score": 0.0, + }, + "episodes": [ + { + "task_id": env.state.task.task_id if env.state else "n/a", + "task_type": env.state.task.task_type if env.state else "n/a", + "question": env.state.task.question if env.state else "n/a", + "task_answer": str(info["task_answer"]), + "agent_answer": str(info["agent_answer"]), + "graph_f1": 0.0, + "reward": float(info["total_reward"]), + "steps": int(info["step_count"]), + "tool_calls": int(info["tool_calls"]), + "success": int(info["agent_answer"] == info["task_answer"]), + } + ], + } + + graph_f1 = 0.0 + if env.state is not None: + graph_f1 = compute_graph_f1(env.memory_graph.edges, env.state.task.supporting_edges) + + if evaluation is None: + summary = { + "task_success_rate": 0.0, + "tool_efficiency": 0.0, + "avg_graph_f1": graph_f1, + "avg_steps_to_solution": float(env.state.step_count) if env.state else 0.0, + "deanonymization_accuracy": 0.0, + "avg_reward": float(env.state.total_reward) if env.state else 0.0, + "leaderboard_score": 0.0, + } + evaluation = {"summary": summary, "episodes": []} + + leaderboard = load_leaderboard(leaderboard_path) + out = export_dashboard(env=env, evaluation=evaluation, leaderboard_records=leaderboard, output_path=args.output) + print(json.dumps({"dashboard": out, "evaluation": evaluation_path}, indent=2, sort_keys=True)) + + +if __name__ == "__main__": + main() diff --git a/src/osint_env/config/__init__.py b/src/osint_env/config/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..0f33c5b3ecce550f035a413d9447b75844bc35dc --- /dev/null +++ b/src/osint_env/config/__init__.py @@ -0,0 +1,9 @@ +from osint_env.config.shared import RuntimeDefaults, SharedConfig, clone_environment_config, load_seeding_config, load_shared_config + +__all__ = [ + "RuntimeDefaults", + "SharedConfig", + "clone_environment_config", + "load_seeding_config", + "load_shared_config", +] diff --git a/src/osint_env/config/shared.py b/src/osint_env/config/shared.py new file mode 100644 index 0000000000000000000000000000000000000000..1f57e33f675957e15416e8ccb7478e486ab2f38a --- /dev/null +++ b/src/osint_env/config/shared.py @@ -0,0 +1,279 @@ +from __future__ import annotations + +import copy +import json +from dataclasses import dataclass, field +from pathlib import Path +from typing import Any + +from osint_env.domain.models import ( + EnvironmentConfig, + LLMConfig, + NodeType, + SeedingConfig, + SeedEdgeSpec, + SeedNodeSpec, + SeedQuestionSpec, + SpawnRewardConfig, + SwarmConfig, +) + + +@dataclass(slots=True) +class RuntimeDefaults: + default_episodes: int = 20 + leaderboard_path: str = "artifacts/leaderboard.json" + dashboard_path: str = "artifacts/osint_dashboard.html" + sweep_dashboard_dir: str = "artifacts/sweep_dashboards" + + +@dataclass(slots=True) +class SharedConfig: + environment: EnvironmentConfig = field(default_factory=EnvironmentConfig) + runtime: RuntimeDefaults = field(default_factory=RuntimeDefaults) + + +def clone_environment_config(config: EnvironmentConfig) -> EnvironmentConfig: + return copy.deepcopy(config) + + +def _as_dict(value: Any) -> dict[str, Any]: + return value if isinstance(value, dict) else {} + + +def _parse_int(value: Any, default: int) -> int: + try: + return int(value) + except (TypeError, ValueError): + return default + + +def _parse_float(value: Any, default: float) -> float: + try: + return float(value) + except (TypeError, ValueError): + return default + + +def _parse_bool(value: Any, default: bool) -> bool: + if isinstance(value, bool): + return value + if isinstance(value, str): + lowered = value.strip().lower() + if lowered in {"1", "true", "yes", "y", "on"}: + return True + if lowered in {"0", "false", "no", "n", "off"}: + return False + return default + + +def _parse_str_list(value: Any, default: list[str]) -> list[str]: + if isinstance(value, list): + items = [str(item).strip() for item in value if str(item).strip()] + return items or list(default) + if isinstance(value, str): + items = [part.strip() for part in value.split(",") if part.strip()] + return items or list(default) + return list(default) + + +def _infer_node_type(node_id: str) -> NodeType: + prefix = str(node_id).split("_", 1)[0].lower() + mapping = { + "user": NodeType.USER, + "alias": NodeType.ALIAS, + "org": NodeType.ORG, + "loc": NodeType.LOCATION, + "location": NodeType.LOCATION, + "post": NodeType.POST, + "thr": NodeType.THREAD, + "thread": NodeType.THREAD, + "event": NodeType.EVENT, + } + return mapping.get(prefix, NodeType.USER) + + +def _parse_node_type(value: Any, node_id: str) -> NodeType: + if isinstance(value, NodeType): + return value + if isinstance(value, str): + raw = value.strip().lower() + try: + return NodeType(raw) + except ValueError: + return _infer_node_type(node_id) + return _infer_node_type(node_id) + + +def _parse_seed_edge(item: dict[str, Any]) -> SeedEdgeSpec | None: + src = str(item.get("src", "")).strip() + rel = str(item.get("rel", "")).strip() + dst = str(item.get("dst", "")).strip() + if not src or not rel or not dst: + return None + confidence = _parse_float(item.get("confidence", 1.0), 1.0) + return SeedEdgeSpec(src=src, rel=rel, dst=dst, confidence=confidence) + + +def _parse_seeding(data: dict[str, Any]) -> SeedingConfig: + seeded_nodes: list[SeedNodeSpec] = [] + for item in data.get("seeded_nodes", []): + row = _as_dict(item) + node_id = str(row.get("node_id", "")).strip() + if not node_id: + continue + node_type = _parse_node_type(row.get("node_type"), node_id) + attrs = _as_dict(row.get("attrs")) + seeded_nodes.append(SeedNodeSpec(node_id=node_id, node_type=node_type, attrs=attrs)) + + seeded_edges: list[SeedEdgeSpec] = [] + for item in data.get("seeded_edges", []): + edge = _parse_seed_edge(_as_dict(item)) + if edge is not None: + seeded_edges.append(edge) + + seeded_questions: list[SeedQuestionSpec] = [] + for item in data.get("seeded_questions", []): + row = _as_dict(item) + question = str(row.get("question", "")).strip() + if not question: + continue + answer_val = row.get("answer") + answer = str(answer_val).strip() if answer_val is not None and str(answer_val).strip() else None + task_type = str(row.get("task_type", "seeded")).strip() or "seeded" + support_edges: list[SeedEdgeSpec] = [] + for edge_item in row.get("supporting_edges", []): + edge = _parse_seed_edge(_as_dict(edge_item)) + if edge is not None: + support_edges.append(edge) + metadata = _as_dict(row.get("metadata")) + seeded_questions.append( + SeedQuestionSpec( + question=question, + answer=answer, + task_type=task_type, + supporting_edges=support_edges, + metadata=metadata, + ) + ) + + return SeedingConfig( + seeded_nodes=seeded_nodes, + seeded_edges=seeded_edges, + seeded_questions=seeded_questions, + llm_generate_remaining_graph=_parse_bool(data.get("llm_generate_remaining_graph"), True), + llm_generate_remaining_tasks=_parse_bool(data.get("llm_generate_remaining_tasks"), True), + llm_generated_edge_budget=max(0, _parse_int(data.get("llm_generated_edge_budget"), 6)), + llm_generated_task_budget=max(0, _parse_int(data.get("llm_generated_task_budget"), 8)), + llm_generation_parallel=_parse_bool(data.get("llm_generation_parallel"), True), + llm_generation_workers=max(1, _parse_int(data.get("llm_generation_workers"), 3)), + llm_generation_retries=max(1, _parse_int(data.get("llm_generation_retries"), 2)), + allow_template_fallback_on_llm_failure=_parse_bool( + data.get("allow_template_fallback_on_llm_failure"), + False, + ), + ) + + +def load_seeding_config(path: str | Path) -> SeedingConfig: + payload = json.loads(Path(path).read_text(encoding="utf-8")) + if not isinstance(payload, dict): + raise ValueError("Seed file must contain a JSON object.") + source = _as_dict(payload.get("seeding", payload)) + return _parse_seeding(source) + + +def _parse_environment(payload: dict[str, Any]) -> EnvironmentConfig: + env_data = _as_dict(payload.get("environment", payload)) + dataset_data = _as_dict(payload.get("dataset", env_data.get("dataset", {}))) + swarm_data = _as_dict(payload.get("swarm", env_data.get("swarm", {}))) + spawn_data = _as_dict(payload.get("spawn_reward", env_data.get("spawn_reward", {}))) + seeding_data = _as_dict(payload.get("seeding", env_data.get("seeding", {}))) + llm_data = _as_dict(payload.get("llm", env_data.get("llm", {}))) + + dataset_mode = str(dataset_data.get("mode", env_data.get("dataset_mode", "canonical"))).strip().lower() + if dataset_mode not in {"canonical", "metaqa"}: + dataset_mode = "canonical" + + metaqa_variant = str(dataset_data.get("metaqa_variant", env_data.get("metaqa_variant", "vanilla"))).strip().lower() + if metaqa_variant not in {"vanilla", "ntm"}: + metaqa_variant = "vanilla" + + env = EnvironmentConfig( + n_users=max(4, _parse_int(env_data.get("n_users"), 40)), + alias_density=max(0.0, min(1.0, _parse_float(env_data.get("alias_density"), 0.35))), + noise_level=max(0.0, min(1.0, _parse_float(env_data.get("noise_level"), 0.15))), + red_herring_rate=max(0.0, min(1.0, _parse_float(env_data.get("red_herring_rate"), 0.1))), + max_steps=max(2, _parse_int(env_data.get("max_steps"), 18)), + seed=_parse_int(env_data.get("seed"), 7), + dataset_mode=dataset_mode, + metaqa_root=str(dataset_data.get("metaqa_root", env_data.get("metaqa_root", "metaQA"))).strip() or "metaQA", + metaqa_kb_path=str(dataset_data.get("metaqa_kb_path", env_data.get("metaqa_kb_path", ""))).strip(), + metaqa_variant=metaqa_variant, + metaqa_hops=_parse_str_list( + dataset_data.get("metaqa_hops", env_data.get("metaqa_hops", ["1-hop", "2-hop", "3-hop"])), + ["1-hop", "2-hop", "3-hop"], + ), + metaqa_splits=_parse_str_list( + dataset_data.get("metaqa_splits", env_data.get("metaqa_splits", ["train", "dev", "test"])), + ["train", "dev", "test"], + ), + ) + + env.swarm = SwarmConfig( + enabled=_parse_bool(swarm_data.get("enabled"), False), + max_agents=max(1, _parse_int(swarm_data.get("max_agents"), 3)), + max_breadth=max(1, _parse_int(swarm_data.get("max_breadth"), 2)), + max_width=max(1, _parse_int(swarm_data.get("max_width"), 2)), + max_depth=max(1, _parse_int(swarm_data.get("max_depth"), 2)), + planner_rounds=max(1, _parse_int(swarm_data.get("planner_rounds"), 2)), + tools_per_agent=max(1, _parse_int(swarm_data.get("tools_per_agent"), 1)), + ) + + env.spawn_reward = SpawnRewardConfig( + lambda_parallel=max(0.0, _parse_float(spawn_data.get("lambda_parallel"), 0.15)), + lambda_finish=max(0.0, _parse_float(spawn_data.get("lambda_finish"), 0.2)), + anneal=max(0.0, min(1.0, _parse_float(spawn_data.get("anneal"), 1.0))), + max_parallel_hint=max(1, _parse_int(spawn_data.get("max_parallel_hint"), 3)), + ) + + env.seeding = _parse_seeding(seeding_data) + env.llm = LLMConfig( + provider=str(llm_data.get("provider", "mock")).strip() or "mock", + model=str(llm_data.get("model", "qwen3:2b")).strip() or "qwen3:2b", + temperature=_parse_float(llm_data.get("temperature"), 0.1), + max_tokens=max(1, _parse_int(llm_data.get("max_tokens"), 256)), + timeout_seconds=max(1, _parse_int(llm_data.get("timeout_seconds"), 240)), + ollama_base_url=str(llm_data.get("ollama_base_url", "http://127.0.0.1:11434")).strip() + or "http://127.0.0.1:11434", + openai_base_url=str(llm_data.get("openai_base_url", "https://api.openai.com/v1")).strip() + or "https://api.openai.com/v1", + openai_api_key_env=str(llm_data.get("openai_api_key_env", "OPENAI_API_KEY")).strip() or "OPENAI_API_KEY", + openai_api_key=str(llm_data.get("openai_api_key", "")).strip(), + ) + return env + + +def _parse_runtime(payload: dict[str, Any]) -> RuntimeDefaults: + runtime = _as_dict(payload.get("runtime", {})) + return RuntimeDefaults( + default_episodes=max(1, _parse_int(runtime.get("default_episodes"), 20)), + leaderboard_path=str(runtime.get("leaderboard_path", "artifacts/leaderboard.json")), + dashboard_path=str(runtime.get("dashboard_path", "artifacts/osint_dashboard.html")), + sweep_dashboard_dir=str(runtime.get("sweep_dashboard_dir", "artifacts/sweep_dashboards")), + ) + + +def load_shared_config(path: str | Path | None) -> SharedConfig: + if not path: + return SharedConfig() + + file_path = Path(path) + if not file_path.exists(): + return SharedConfig() + + payload = json.loads(file_path.read_text(encoding="utf-8")) + if not isinstance(payload, dict): + raise ValueError("Shared config file must contain a JSON object.") + + return SharedConfig(environment=_parse_environment(payload), runtime=_parse_runtime(payload)) diff --git a/src/osint_env/data/__init__.py b/src/osint_env/data/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..6d328d532fb74285ba87590893c78c9f07d03bc8 --- /dev/null +++ b/src/osint_env/data/__init__.py @@ -0,0 +1,2 @@ +"""Dataset generation package.""" + diff --git a/src/osint_env/data/generator.py b/src/osint_env/data/generator.py new file mode 100644 index 0000000000000000000000000000000000000000..1b4a6caf07634d0564dce68c8d83c270f8c5e3fa --- /dev/null +++ b/src/osint_env/data/generator.py @@ -0,0 +1,1269 @@ +from __future__ import annotations + +from concurrent.futures import ThreadPoolExecutor, as_completed +import json +from pathlib import Path +import random +import re +from dataclasses import dataclass +from typing import TYPE_CHECKING, Any + +from osint_env.data.metaqa import MetaQATaskRecord, infer_metaqa_support_edges, load_metaqa_dataset + +from osint_env.domain.models import ( + CanonicalGraph, + Edge, + EnvironmentConfig, + Node, + NodeType, + SeedEdgeSpec, + SeedQuestionSpec, + TaskInstance, +) + +if TYPE_CHECKING: + from osint_env.llm.interface import LLMClient + + +@dataclass(slots=True) +class PlatformViews: + microblog_posts: list[dict] + forum_threads: list[dict] + profiles: list[dict] + alias_lookup: dict[str, str] + + +def _edge_payload(edge: Edge) -> dict[str, Any]: + return { + "src": edge.src, + "rel": edge.rel, + "dst": edge.dst, + "confidence": float(edge.confidence), + } + + +def _normalize_swarm_v2_path_edges(path: list[Edge | dict[str, Any]]) -> list[Edge]: + out: list[Edge] = [] + for row in path: + if isinstance(row, Edge): + out.append(Edge(row.src, row.rel, row.dst, float(row.confidence))) + continue + if not isinstance(row, dict): + return [] + src = str(row.get("src", "")).strip() + rel = str(row.get("rel", "")).strip() + dst = str(row.get("dst", "")).strip() + if not src or not rel or not dst: + return [] + try: + confidence = float(row.get("confidence", 1.0)) + except (TypeError, ValueError): + confidence = 1.0 + out.append(Edge(src=src, rel=rel, dst=dst, confidence=confidence)) + return out + + +def enumerate_swarm_v2_neighbors(graph: CanonicalGraph, node_id: str) -> list[Edge]: + edges = [edge for edge in graph.edges if edge.src == node_id] + edges.sort(key=lambda edge: (edge.src, edge.rel, edge.dst)) + return [Edge(edge.src, edge.rel, edge.dst, float(edge.confidence)) for edge in edges] + + +def trace_swarm_v2_path(graph: CanonicalGraph, path: list[Edge | dict[str, Any]]) -> list[Edge]: + edges = _normalize_swarm_v2_path_edges(path) + if not edges: + return [] + + graph_edges = {(edge.src, edge.rel, edge.dst) for edge in graph.edges} + for idx, edge in enumerate(edges): + if (edge.src, edge.rel, edge.dst) not in graph_edges: + return [] + if idx > 0 and edges[idx - 1].dst != edge.src: + return [] + return edges + + +def select_swarm_v2_answer(path_edges: list[Edge]) -> str: + if not path_edges: + return "" + return path_edges[-1].dst + + +def emit_swarm_v2_question(path_edges: list[Edge]) -> str: + if not path_edges: + return "" + start = path_edges[0].src + relation_path = " -> ".join(edge.rel for edge in path_edges) + hops = len(path_edges) + return ( + f"If you start at {start} and follow the relation path {relation_path}, " + f"which entity do you reach after {hops} hops?" + ) + + +def build_swarm_v2_tool_trace(graph: CanonicalGraph, path_edges: list[Edge]) -> list[dict[str, Any]]: + traced = trace_swarm_v2_path(graph, path_edges) + if not traced: + return [] + + tool_trace: list[dict[str, Any]] = [] + for idx, edge in enumerate(traced): + neighbors = enumerate_swarm_v2_neighbors(graph, edge.src) + tool_trace.append( + { + "tool_name": "enumerate_neighbors", + "args": { + "node_id": edge.src, + "hop_index": idx, + "expected_edge": _edge_payload(edge), + }, + "output": { + "neighbors": [_edge_payload(candidate) for candidate in neighbors], + }, + } + ) + + tool_trace.append( + { + "tool_name": "trace_path", + "args": { + "path": [_edge_payload(edge) for edge in traced], + }, + "output": { + "path": [_edge_payload(edge) for edge in traced], + }, + } + ) + + answer = select_swarm_v2_answer(traced) + tool_trace.append( + { + "tool_name": "select_answer", + "args": { + "strategy": "path_dst", + }, + "output": { + "answer": answer, + }, + } + ) + + question = emit_swarm_v2_question(traced) + tool_trace.append( + { + "tool_name": "emit_question", + "args": { + "style": "relation_path_v1", + }, + "output": { + "question": question, + }, + } + ) + return tool_trace + + +def build_swarm_v2_canonical_subgraph( + graph: CanonicalGraph, + path_edges: list[Edge], + max_extra_edges: int = 4, +) -> dict[str, Any]: + traced = trace_swarm_v2_path(graph, path_edges) + if not traced: + return {"nodes": [], "edges": [], "path": []} + + path_nodes = {traced[0].src} + for edge in traced: + path_nodes.add(edge.src) + path_nodes.add(edge.dst) + + path_keys = {(edge.src, edge.rel, edge.dst) for edge in traced} + extra_edges: list[Edge] = [] + for edge in graph.edges: + key = (edge.src, edge.rel, edge.dst) + if key in path_keys: + continue + if edge.src in path_nodes or edge.dst in path_nodes: + extra_edges.append(Edge(edge.src, edge.rel, edge.dst, float(edge.confidence))) + if len(extra_edges) >= max(0, int(max_extra_edges)): + break + + subgraph_edges = list(traced) + extra_edges + subgraph_nodes = sorted({edge.src for edge in subgraph_edges} | {edge.dst for edge in subgraph_edges}) + return { + "nodes": subgraph_nodes, + "edges": [_edge_payload(edge) for edge in subgraph_edges], + "path": [_edge_payload(edge) for edge in traced], + "answer": select_swarm_v2_answer(traced), + } + + +def build_swarm_v2_path_candidates( + graph: CanonicalGraph, + rng: random.Random, + count: int, + min_hops: int = 2, + max_hops: int = 4, +) -> list[list[Edge]]: + if count <= 0: + return [] + + outgoing: dict[str, list[Edge]] = {} + for edge in graph.edges: + outgoing.setdefault(edge.src, []).append(edge) + + def path_match_count(path: list[Edge], limit: int = 4) -> int: + if not path: + return 0 + relations = [edge.rel for edge in path] + answer = path[-1].dst + start = path[0].src + match_count = 0 + stack: list[tuple[str, int, tuple[str, ...]]] = [(start, 0, (start,))] + while stack: + node_id, rel_idx, seen_nodes = stack.pop() + if rel_idx >= len(relations): + if node_id == answer: + match_count += 1 + if match_count >= limit: + return match_count + continue + relation = relations[rel_idx] + for edge in outgoing.get(node_id, []): + if edge.rel != relation: + continue + if edge.dst in seen_nodes: + continue + stack.append((edge.dst, rel_idx + 1, seen_nodes + (edge.dst,))) + return match_count + + starts = [node_id for node_id, edges in outgoing.items() if edges] + if not starts: + return [] + + seen: set[tuple[tuple[str, str, str], ...]] = set() + candidates: list[list[Edge]] = [] + attempt_budget = max(16, count * 20) + lower_hops = max(1, int(min_hops)) + upper_hops = max(lower_hops, int(max_hops)) + + for _ in range(attempt_budget): + if len(candidates) >= count: + break + + current = rng.choice(starts) + target_hops = rng.randint(lower_hops, upper_hops) + path: list[Edge] = [] + visited_nodes = {current} + + for _hop in range(target_hops): + options = [edge for edge in outgoing.get(current, []) if edge.dst not in visited_nodes] + if not options: + break + edge = rng.choice(options) + path.append(Edge(edge.src, edge.rel, edge.dst, float(edge.confidence))) + current = edge.dst + visited_nodes.add(current) + + if len(path) < lower_hops: + continue + if path_match_count(path) != 1: + continue + + key = tuple((edge.src, edge.rel, edge.dst) for edge in path) + if key in seen: + continue + seen.add(key) + candidates.append(path) + + if candidates: + return candidates[:count] + + # Fall back to unique 1-hop paths only when the graph is too shallow for multi-hop traces. + for edge in graph.edges: + key = ((edge.src, edge.rel, edge.dst),) + if key in seen: + continue + if path_match_count([edge]) != 1: + continue + seen.add(key) + candidates.append([Edge(edge.src, edge.rel, edge.dst, float(edge.confidence))]) + if len(candidates) >= count: + break + return candidates[:count] + + +class DatasetGenerator: + def __init__(self, config: EnvironmentConfig, llm: LLMClient | None = None): + self.config = config + self.rng = random.Random(config.seed) + self.llm = llm + self._metaqa_records: list[MetaQATaskRecord] = [] + + @staticmethod + def _edge_key(edge: Edge) -> tuple[str, str, str]: + return (edge.src, edge.rel, edge.dst) + + def _dataset_mode(self) -> str: + token = str(getattr(self.config, "dataset_mode", "canonical") or "canonical").strip().lower() + return "metaqa" if token == "metaqa" else "canonical" + + @staticmethod + def _metaqa_difficulty(hop_label: str) -> str: + hop = str(hop_label).strip().lower() + if hop == "1-hop": + return "easy" + if hop == "2-hop": + return "medium" + return "hard" + + @staticmethod + def _infer_node_type(node_id: str) -> NodeType: + prefix = str(node_id).split("_", 1)[0].lower() + mapping = { + "user": NodeType.USER, + "alias": NodeType.ALIAS, + "org": NodeType.ORG, + "loc": NodeType.LOCATION, + "location": NodeType.LOCATION, + "post": NodeType.POST, + "thr": NodeType.THREAD, + "thread": NodeType.THREAD, + "event": NodeType.EVENT, + } + return mapping.get(prefix, NodeType.USER) + + def _ensure_node(self, graph: CanonicalGraph, node_id: str) -> None: + if node_id in graph.nodes: + return + node_type = self._infer_node_type(node_id) + attrs: dict[str, Any] = {} + if node_type == NodeType.USER: + attrs = {"name": node_id, "org": "Unknown", "location": "Unknown"} + if node_type == NodeType.ALIAS: + attrs = {"handle": f"@{node_id}"} + graph.nodes[node_id] = Node(node_id=node_id, node_type=node_type, attrs=attrs) + + def _add_edge_if_missing(self, graph: CanonicalGraph, edge: Edge) -> None: + key = self._edge_key(edge) + if any(self._edge_key(existing) == key for existing in graph.edges): + return + self._ensure_node(graph, edge.src) + self._ensure_node(graph, edge.dst) + graph.edges.append(edge) + + @staticmethod + def _extract_json_blob(text: str) -> Any: + text = str(text).strip() + if not text: + return None + for start, end in (("{", "}"), ("[", "]")): + left = text.find(start) + right = text.rfind(end) + if left >= 0 and right > left: + snippet = text[left : right + 1] + try: + return json.loads(snippet) + except json.JSONDecodeError: + continue + return None + + def _apply_seed_nodes(self, graph: CanonicalGraph) -> None: + for node_spec in self.config.seeding.seeded_nodes: + node_type = ( + node_spec.node_type + if isinstance(node_spec.node_type, NodeType) + else self._infer_node_type(node_spec.node_id) + ) + existing = graph.nodes.get(node_spec.node_id) + attrs = dict(existing.attrs) if existing else {} + attrs.update(node_spec.attrs) + graph.nodes[node_spec.node_id] = Node(node_spec.node_id, node_type, attrs) + + def _apply_seed_edges(self, graph: CanonicalGraph) -> None: + for edge_spec in self.config.seeding.seeded_edges: + self._add_edge_if_missing( + graph, + Edge( + src=edge_spec.src, + rel=edge_spec.rel, + dst=edge_spec.dst, + confidence=float(edge_spec.confidence), + ), + ) + + @staticmethod + def _normalize_edge_candidates(value: Any) -> list[SeedEdgeSpec]: + items: list[SeedEdgeSpec] = [] + if not isinstance(value, list): + return items + for row in value: + if not isinstance(row, dict): + continue + src = str(row.get("src", "")).strip() + rel = str(row.get("rel", "")).strip() + dst = str(row.get("dst", "")).strip() + if not src or not rel or not dst: + continue + try: + confidence = float(row.get("confidence", 1.0)) + except (TypeError, ValueError): + confidence = 1.0 + items.append(SeedEdgeSpec(src=src, rel=rel, dst=dst, confidence=confidence)) + return items + + @staticmethod + def _split_budget(total: int, parts: int) -> list[int]: + if total <= 0: + return [] + slots = max(1, parts) + base = total // slots + remainder = total % slots + chunks = [base + (1 if i < remainder else 0) for i in range(slots)] + return [chunk for chunk in chunks if chunk > 0] + + @staticmethod + def _shared_context_blob(graph: CanonicalGraph, node_limit: int = 100, edge_limit: int = 80) -> str: + payload = { + "known_nodes": sorted(graph.nodes.keys())[:node_limit], + "known_edges": [ + {"src": edge.src, "rel": edge.rel, "dst": edge.dst} + for edge in graph.edges[: min(edge_limit, len(graph.edges))] + ], + } + return json.dumps(payload) + + def _llm_generate_json_with_retry(self, prompt: str) -> Any: + if self.llm is None: + return None + + attempts = max(1, int(self.config.seeding.llm_generation_retries)) + for _ in range(attempts): + try: + response = self.llm.generate([{"role": "system", "content": prompt}], tools=[]) + except Exception: + continue + parsed = self._extract_json_blob(response.content) + if parsed is not None: + return parsed + return None + + def _run_generation_workers(self, prompts: list[str]) -> list[Any]: + if not prompts: + return [] + + max_workers = max(1, min(self.config.seeding.llm_generation_workers, len(prompts))) + if not self.config.seeding.llm_generation_parallel or max_workers == 1: + output: list[Any] = [] + for prompt in prompts: + parsed = self._llm_generate_json_with_retry(prompt) + if parsed is not None: + output.append(parsed) + return output + + output = [] + with ThreadPoolExecutor(max_workers=max_workers) as executor: + futures = [executor.submit(self._llm_generate_json_with_retry, prompt) for prompt in prompts] + for future in as_completed(futures): + try: + parsed = future.result() + except Exception: + parsed = None + if parsed is not None: + output.append(parsed) + return output + + def _template_fallback_allowed(self) -> bool: + if self.llm is None: + return True + return bool(self.config.seeding.allow_template_fallback_on_llm_failure) + + def _template_generated_edges(self, graph: CanonicalGraph, budget: int) -> list[Edge]: + if budget <= 0: + return [] + users = [n.node_id for n in graph.nodes.values() if n.node_type == NodeType.USER] + aliases = [n.node_id for n in graph.nodes.values() if n.node_type == NodeType.ALIAS] + if len(users) < 2: + return [] + + generated: list[Edge] = [] + rels = ["connected_to", "mentions", "co_occurs_with"] + for _ in range(budget * 3): + if len(generated) >= budget: + break + roll = self.rng.random() + if aliases and roll < 0.2: + src = self.rng.choice(aliases) + dst = self.rng.choice(users) + rel = "alias_of" + elif roll < 0.75: + src, dst = self.rng.sample(users, 2) + rel = self.rng.choice(rels) + else: + src = self.rng.choice(users) + dst = self.rng.choice([u for u in users if u != src]) + rel = "connected_to" + generated.append(Edge(src=src, rel=rel, dst=dst, confidence=0.7)) + return generated[:budget] + + def _llm_expand_graph(self, graph: CanonicalGraph, budget: int) -> list[Edge]: + if budget <= 0: + return [] + + if self.llm is None: + return self._template_generated_edges(graph, budget) + + shared_context = self._shared_context_blob(graph) + workers = max(1, min(self.config.seeding.llm_generation_workers, budget)) + chunks = self._split_budget(budget, workers) + focus_tracks = ["entity_linking", "network_expansion", "org_location", "event_trace"] + + prompts: list[str] = [] + for idx, chunk_budget in enumerate(chunks): + focus = focus_tracks[idx % len(focus_tracks)] + prompts.append( + ( + "SEED_GRAPH_EXPANSION_AGENT\n" + "SHARED_CONTEXT\n" + f"{shared_context}\n" + f"worker_id: {idx}\n" + f"focus: {focus}\n" + f"budget: {chunk_budget}\n" + "Generate plausible graph edges for OSINT retrieval.\n" + "Return STRICT JSON object: {\"edges\": [{\"src\": str, \"rel\": str, \"dst\": str, \"confidence\": float}]}.\n" + "Prefer known nodes from SHARED_CONTEXT and avoid duplicates." + ) + ) + + generated: list[Edge] = [] + seen: set[tuple[str, str, str]] = set() + for payload in self._run_generation_workers(prompts): + raw_edges: Any = None + if isinstance(payload, dict): + raw_edges = payload.get("edges") + elif isinstance(payload, list): + raw_edges = payload + for edge_spec in self._normalize_edge_candidates(raw_edges): + key = (edge_spec.src, edge_spec.rel, edge_spec.dst) + if key in seen: + continue + seen.add(key) + generated.append(Edge(edge_spec.src, edge_spec.rel, edge_spec.dst, float(edge_spec.confidence))) + if len(generated) >= budget: + break + if len(generated) >= budget: + break + + if len(generated) < budget: + residual = budget - len(generated) + residual_prompt = ( + "SEED_GRAPH_EXPANSION_AGENT\n" + "SHARED_CONTEXT\n" + f"{shared_context}\n" + f"budget: {residual}\n" + "Generate any remaining high-utility edges.\n" + "Return STRICT JSON object: {\"edges\": [{\"src\": str, \"rel\": str, \"dst\": str, \"confidence\": float}]}." + ) + payload = self._llm_generate_json_with_retry(residual_prompt) + raw_edges: Any = payload.get("edges") if isinstance(payload, dict) else payload + for edge_spec in self._normalize_edge_candidates(raw_edges): + key = (edge_spec.src, edge_spec.rel, edge_spec.dst) + if key in seen: + continue + seen.add(key) + generated.append(Edge(edge_spec.src, edge_spec.rel, edge_spec.dst, float(edge_spec.confidence))) + if len(generated) >= budget: + break + + if len(generated) < budget and self._template_fallback_allowed(): + for edge in self._template_generated_edges(graph, budget - len(generated)): + key = (edge.src, edge.rel, edge.dst) + if key in seen: + continue + seen.add(key) + generated.append(edge) + if len(generated) >= budget: + break + + return generated[:budget] + + @staticmethod + def _extract_entity_tokens(question: str) -> list[str]: + return re.findall(r"\b(?:alias|user|org|loc|post|thr|thread|event)_[a-zA-Z0-9_]+\b", question) + + @staticmethod + def _normalize_difficulty(value: str, index: int) -> str: + token = str(value or "").strip().lower() + if token in {"easy", "e"}: + return "easy" + if token in {"mid", "medium", "m"}: + return "medium" + if token in {"high", "hard", "h"}: + return "hard" + if index < 10: + return "easy" + if index < 20: + return "medium" + return "hard" + + @staticmethod + def _task_type_for_difficulty(base_task_type: str, difficulty: str) -> str: + token = str(base_task_type or "").strip().lower() + if token and token != "fixed_trace": + return token + if difficulty == "easy": + return "easy_trace" + if difficulty == "medium": + return "medium_trace" + return "hard_trace" + + @staticmethod + def _grader_for_difficulty(difficulty: str) -> dict[str, Any]: + return { + "type": "difficulty_exact_match", + "answer_type": "node_id", + "case_sensitive": True, + "reward_profile": difficulty, + "logic": { + "easy": "single_agent_simplified", + "medium": "reduced_components", + "hard": "full_reward", + }.get(difficulty, "full_reward"), + } + + def _task_metadata(self, index: int, base_task_type: str, metadata: dict[str, Any] | None = None) -> dict[str, Any]: + out = dict(metadata or {}) + difficulty = self._normalize_difficulty(out.get("difficulty", ""), index) + out["difficulty"] = difficulty + out.setdefault("grader", self._grader_for_difficulty(difficulty)) + out.setdefault("scenario", self._task_type_for_difficulty(base_task_type, difficulty)) + return out + + def _infer_answer_from_question(self, question: str, graph: CanonicalGraph) -> str: + entities = self._extract_entity_tokens(question) + question_l = question.lower() + + alias_tokens = [token for token in entities if token.startswith("alias_")] + if alias_tokens: + alias = alias_tokens[0] + for edge in graph.edges: + if edge.rel == "alias_of" and edge.src == alias: + return edge.dst + + if "connected" in question_l: + user_tokens = [token for token in entities if token.startswith("user_")] + if user_tokens: + source = user_tokens[0] + for edge in graph.edges: + if edge.rel == "connected_to" and edge.src == source: + return edge.dst + + if "works at" in question_l: + for edge in graph.edges: + if edge.rel != "works_at": + continue + org = graph.nodes.get(edge.dst) + org_name = str((org.attrs or {}).get("name", "")).lower() if org else "" + if org_name and org_name in question_l: + return edge.src + + return entities[0] if entities else "unknown" + + def _infer_support_edges(self, question: str, answer: str, graph: CanonicalGraph) -> list[Edge]: + if answer: + for edge in graph.edges: + if edge.dst == answer or edge.src == answer: + if edge.src in question or edge.dst in question or edge.rel in question.lower(): + return [edge] + + entities = self._extract_entity_tokens(question) + for edge in graph.edges: + if edge.src in entities or edge.dst in entities: + return [edge] + return [] + + def _seeded_tasks(self, graph: CanonicalGraph) -> list[TaskInstance]: + tasks: list[TaskInstance] = [] + for idx, question_spec in enumerate(self.config.seeding.seeded_questions): + answer = question_spec.answer or self._infer_answer_from_question(question_spec.question, graph) + metadata = self._task_metadata(idx, question_spec.task_type, dict(question_spec.metadata)) + difficulty = str(metadata.get("difficulty", "hard")) + if question_spec.supporting_edges: + support = [ + Edge(src=e.src, rel=e.rel, dst=e.dst, confidence=float(e.confidence)) + for e in question_spec.supporting_edges + ] + else: + support = self._infer_support_edges(question_spec.question, answer, graph) + + tasks.append( + TaskInstance( + task_id=f"seed_task_{idx}", + task_type=self._task_type_for_difficulty(question_spec.task_type, difficulty), + question=question_spec.question, + answer=answer, + supporting_edges=support, + metadata=metadata, + ) + ) + return tasks + + def _template_tasks(self, graph: CanonicalGraph, count: int, start_idx: int = 0) -> list[TaskInstance]: + alias_edges = [e for e in graph.edges if e.rel == "alias_of"] + conn_edges = [e for e in graph.edges if e.rel == "connected_to"] + work_edges = [e for e in graph.edges if e.rel == "works_at"] + tasks: list[TaskInstance] = [] + + for i in range(count): + mode = self.rng.choice(["identity_resolution", "network_discovery", "event_tracing"]) + if mode == "identity_resolution" and alias_edges: + edge = self.rng.choice(alias_edges) + q = f"Which canonical user owns alias {edge.src}?" + a = edge.dst + support = [edge] + elif mode == "network_discovery" and conn_edges: + edge = self.rng.choice(conn_edges) + q = f"Who is connected to {edge.src}?" + a = edge.dst + support = [edge] + else: + edge = self.rng.choice(work_edges) + org_node = graph.nodes.get(edge.dst) + org_name = (org_node.attrs or {}).get("name", edge.dst) if org_node else edge.dst + q = f"Which user works at {org_name}?" + a = edge.src + support = [edge] + tasks.append( + TaskInstance( + task_id=f"task_{start_idx + i}", + task_type=mode, + question=q, + answer=a, + supporting_edges=support, + metadata=self._task_metadata(start_idx + i, mode), + ) + ) + return tasks + + def _llm_generated_tasks(self, graph: CanonicalGraph, count: int, start_idx: int) -> list[TaskInstance]: + if count <= 0: + return [] + if self.llm is None: + return self._template_tasks(graph, count=count, start_idx=start_idx) + + candidate_edges = [ + {"src": edge.src, "rel": edge.rel, "dst": edge.dst} + for edge in graph.edges + if edge.rel in {"alias_of", "connected_to", "works_at"} + ][:60] + shared_context = json.dumps( + { + "known_nodes": sorted(graph.nodes.keys())[:100], + "edge_sample": candidate_edges, + } + ) + workers = max(1, min(self.config.seeding.llm_generation_workers, count)) + chunks = self._split_budget(count, workers) + focus_tracks = ["identity_resolution", "network_discovery", "event_tracing", "deanonymization"] + + prompts: list[str] = [] + for idx, chunk_budget in enumerate(chunks): + focus = focus_tracks[idx % len(focus_tracks)] + prompts.append( + ( + "SEED_TASK_EXPANSION_AGENT\n" + "SHARED_CONTEXT\n" + f"{shared_context}\n" + f"worker_id: {idx}\n" + f"focus: {focus}\n" + f"task_budget: {chunk_budget}\n" + "Generate OSINT QA tasks with answers and support edges.\n" + "Return STRICT JSON object: {\"tasks\": [{\"task_type\": str, \"question\": str, \"answer\": str, \"supporting_edges\": [{\"src\": str, \"rel\": str, \"dst\": str, \"confidence\": float}]}]}." + ) + ) + + llm_tasks: list[TaskInstance] = [] + seen_questions: set[str] = set() + for payload in self._run_generation_workers(prompts): + raw_tasks: Any = None + if isinstance(payload, dict): + raw_tasks = payload.get("tasks") + elif isinstance(payload, list): + raw_tasks = payload + if not isinstance(raw_tasks, list): + continue + + for row in raw_tasks: + if not isinstance(row, dict): + continue + question = str(row.get("question", "")).strip() + if not question: + continue + key = question.lower() + if key in seen_questions: + continue + seen_questions.add(key) + answer = str(row.get("answer", "")).strip() or self._infer_answer_from_question(question, graph) + task_type = str(row.get("task_type", "llm_generated")).strip() or "llm_generated" + support_specs = self._normalize_edge_candidates(row.get("supporting_edges")) + if support_specs: + support = [Edge(e.src, e.rel, e.dst, e.confidence) for e in support_specs] + else: + support = self._infer_support_edges(question, answer, graph) + llm_tasks.append( + TaskInstance( + task_id=f"task_{start_idx + len(llm_tasks)}", + task_type=task_type, + question=question, + answer=answer, + supporting_edges=support, + metadata=self._task_metadata( + start_idx + len(llm_tasks), + task_type, + {"generated_by": "llm", "shared_context": True}, + ), + ) + ) + if len(llm_tasks) >= count: + break + if len(llm_tasks) >= count: + break + + if len(llm_tasks) < count: + residual = count - len(llm_tasks) + residual_prompt = ( + "SEED_TASK_EXPANSION_AGENT\n" + "SHARED_CONTEXT\n" + f"{shared_context}\n" + f"task_budget: {residual}\n" + "Generate additional tasks not already present in SHARED_CONTEXT.\n" + "Return STRICT JSON object: {\"tasks\": [{\"task_type\": str, \"question\": str, \"answer\": str, \"supporting_edges\": [{\"src\": str, \"rel\": str, \"dst\": str, \"confidence\": float}]}]}." + ) + payload = self._llm_generate_json_with_retry(residual_prompt) + raw_tasks: Any = payload.get("tasks") if isinstance(payload, dict) else payload + if isinstance(raw_tasks, list): + for row in raw_tasks: + if not isinstance(row, dict): + continue + question = str(row.get("question", "")).strip() + if not question: + continue + key = question.lower() + if key in seen_questions: + continue + seen_questions.add(key) + answer = str(row.get("answer", "")).strip() or self._infer_answer_from_question(question, graph) + task_type = str(row.get("task_type", "llm_generated")).strip() or "llm_generated" + support_specs = self._normalize_edge_candidates(row.get("supporting_edges")) + if support_specs: + support = [Edge(e.src, e.rel, e.dst, e.confidence) for e in support_specs] + else: + support = self._infer_support_edges(question, answer, graph) + llm_tasks.append( + TaskInstance( + task_id=f"task_{start_idx + len(llm_tasks)}", + task_type=task_type, + question=question, + answer=answer, + supporting_edges=support, + metadata=self._task_metadata( + start_idx + len(llm_tasks), + task_type, + {"generated_by": "llm", "shared_context": True}, + ), + ) + ) + if len(llm_tasks) >= count: + break + + if len(llm_tasks) < count and self._template_fallback_allowed(): + llm_tasks.extend( + self._template_tasks( + graph, + count=count - len(llm_tasks), + start_idx=start_idx + len(llm_tasks), + ) + ) + return llm_tasks[:count] + + def _metaqa_selected_records(self, count: int) -> list[MetaQATaskRecord]: + records = list(self._metaqa_records) + if not records: + return [] + if count <= 0 or len(records) <= count: + return records + + grouped: dict[str, list[MetaQATaskRecord]] = {} + for record in records: + grouped.setdefault(record.hop_label, []).append(record) + + hop_keys = sorted(grouped.keys()) + if not hop_keys: + return records[:count] + + selected: list[MetaQATaskRecord] = [] + leftovers: list[MetaQATaskRecord] = [] + per_hop = max(1, count // len(hop_keys)) + + for hop in hop_keys: + bucket = list(grouped[hop]) + self.rng.shuffle(bucket) + take = min(len(bucket), per_hop) + selected.extend(bucket[:take]) + leftovers.extend(bucket[take:]) + + if len(selected) < count: + self.rng.shuffle(leftovers) + selected.extend(leftovers[: count - len(selected)]) + + return selected[:count] + + def _metaqa_tasks(self, graph: CanonicalGraph, count: int) -> list[TaskInstance]: + records = self._metaqa_selected_records(count) + tasks: list[TaskInstance] = [] + for idx, record in enumerate(records): + difficulty = self._metaqa_difficulty(record.hop_label) + support_edges = list(record.supporting_edges) + if not support_edges: + support_edges = infer_metaqa_support_edges( + graph=graph, + topic_entity=record.topic_entity, + answer_candidates=record.answers, + hop_count=record.hop_count, + ) + metadata = { + "difficulty": difficulty, + "hop": record.hop_label, + "split": record.split, + "source": "metaqa", + "dataset_mode": "metaqa", + "qtype": record.qtype, + "topic_entity": record.topic_entity, + "all_answers": list(record.answers), + "grader": { + "type": "metaqa_exact_match", + "answer_type": "entity_name", + "case_sensitive": True, + "reward_profile": difficulty, + "logic": "hop_trace", + }, + "scenario": f"metaqa_{record.hop_label}", + } + task_type = f"metaqa_{record.hop_label}" + tasks.append( + TaskInstance( + task_id=f"metaqa_{record.hop_label}_{record.split}_{idx}", + task_type=task_type, + question=record.question, + answer=record.primary_answer, + supporting_edges=support_edges, + metadata=metadata, + ) + ) + return tasks + + def _build_platform_views_metaqa(self, graph: CanonicalGraph) -> PlatformViews: + node_names = { + node_id: str((node.attrs or {}).get("name") or node_id) + for node_id, node in graph.nodes.items() + } + + microblog_posts: list[dict] = [] + for idx, edge in enumerate(graph.edges): + microblog_posts.append( + { + "post_id": f"post_metaqa_{idx}", + "user_id": edge.src, + "canonical_user": edge.src, + "text": f"{edge.src} {edge.rel} {edge.dst}", + "references": [edge.src, edge.dst], + "reference_names": [node_names.get(edge.src, edge.src), node_names.get(edge.dst, edge.dst)], + "mentions": [edge.dst], + "timestamp": 100000 + idx, + } + ) + + relation_groups: dict[str, list[Edge]] = {} + for edge in graph.edges: + relation_groups.setdefault(edge.rel, []).append(edge) + + forum_threads: list[dict] = [] + for idx, rel in enumerate(sorted(relation_groups.keys())[:200]): + group = relation_groups.get(rel, [])[:10] + forum_threads.append( + { + "thread_id": f"thr_metaqa_{idx}", + "topic": rel, + "author_id": group[0].src if group else "metaqa", + "comments": [ + { + "user_id": edge.src, + "text": f"{edge.src} {edge.rel} {edge.dst}", + } + for edge in group + ], + "references": [edge.dst for edge in group], + "discusses": [edge.dst for edge in group], + } + ) + + neighbors: dict[str, set[str]] = {} + for edge in graph.edges: + neighbors.setdefault(edge.src, set()).add(edge.dst) + neighbors.setdefault(edge.dst, set()).add(edge.src) + + profiles: list[dict] = [] + for node_id in sorted(graph.nodes.keys()): + node = graph.nodes[node_id] + profiles.append( + { + "user_id": node_id, + "name": str((node.attrs or {}).get("name") or node_id), + "org": str(node.node_type.value), + "org_id": str(node.node_type.value), + "location": "metaqa", + "location_id": "metaqa", + "alias_ids": [], + "connections": sorted(neighbors.get(node_id, set()))[:8], + "work_history": [str(node.node_type.value)], + } + ) + + return PlatformViews( + microblog_posts=microblog_posts, + forum_threads=forum_threads, + profiles=profiles, + alias_lookup={}, + ) + + def build_canonical_graph(self) -> CanonicalGraph: + if self._dataset_mode() == "metaqa": + root = Path(self.config.metaqa_root) + kb_path = Path(self.config.metaqa_kb_path) if str(self.config.metaqa_kb_path).strip() else None + graph, records = load_metaqa_dataset( + root=root, + kb_path=kb_path, + variant=self.config.metaqa_variant, + hops=list(self.config.metaqa_hops), + splits=list(self.config.metaqa_splits), + ) + self._metaqa_records = records + self._apply_seed_nodes(graph) + self._apply_seed_edges(graph) + return graph + + graph = CanonicalGraph() + orgs = ["Apex Dynamics", "Helios Labs", "Northbridge"] + locations = ["Bengaluru", "Pune", "Hyderabad", "Delhi"] + + for i in range(self.config.n_users): + uid = f"user_{i}" + org = self.rng.choice(orgs) + loc = self.rng.choice(locations) + graph.nodes[uid] = Node(uid, NodeType.USER, {"name": f"Person {i}", "org": org, "location": loc}) + org_id = f"org_{org.lower().replace(' ', '_')}" + loc_id = f"loc_{loc.lower()}" + graph.nodes.setdefault(org_id, Node(org_id, NodeType.ORG, {"name": org})) + graph.nodes.setdefault(loc_id, Node(loc_id, NodeType.LOCATION, {"name": loc})) + graph.edges.append(Edge(uid, "works_at", org_id)) + graph.edges.append(Edge(uid, "located_in", loc_id)) + + if self.rng.random() < self.config.alias_density: + alias = f"alias_{i}_{self.rng.randint(100,999)}" + graph.nodes[alias] = Node(alias, NodeType.ALIAS, {"handle": f"@{alias}"}) + graph.edges.append(Edge(alias, "alias_of", uid)) + + users = [n for n in graph.nodes.values() if n.node_type == NodeType.USER] + for _ in range(max(1, self.config.n_users // 2)): + a, b = self.rng.sample(users, 2) + graph.edges.append(Edge(a.node_id, "connected_to", b.node_id, confidence=0.8)) + + self._apply_seed_nodes(graph) + self._apply_seed_edges(graph) + + if self.config.seeding.llm_generate_remaining_graph: + llm_edges = self._llm_expand_graph(graph, self.config.seeding.llm_generated_edge_budget) + for edge in llm_edges: + self._add_edge_if_missing(graph, edge) + return graph + + def build_platform_views(self, graph: CanonicalGraph) -> PlatformViews: + if self._dataset_mode() == "metaqa": + return self._build_platform_views_metaqa(graph) + + users = [n for n in graph.nodes.values() if n.node_type == NodeType.USER] + aliases = [n for n in graph.nodes.values() if n.node_type == NodeType.ALIAS] + alias_owner = {e.src: e.dst for e in graph.edges if e.rel == "alias_of"} + user_aliases: dict[str, list[str]] = {} + for alias_id, user_id in alias_owner.items(): + user_aliases.setdefault(user_id, []).append(alias_id) + node_names = { + node_id: str((node.attrs or {}).get("name") or (node.attrs or {}).get("handle") or node_id) + for node_id, node in graph.nodes.items() + } + + microblog_posts: list[dict] = [] + for i, user in enumerate(users): + poster = user.node_id + if aliases and self.rng.random() < 0.45: + candidate = self.rng.choice(aliases).node_id + poster = candidate + text = f"Update {i} from {user.attrs['org']} #{user.attrs['location'].lower()}" + if self.rng.random() < self.config.noise_level: + text = f"Rumor: {text} maybe fake" + microblog_posts.append( + { + "post_id": f"post_{i}", + "user_id": poster, + "canonical_user": alias_owner.get(poster, user.node_id), + "text": text, + "references": [], + "reference_names": [], + "mentions": [f"user_{self.rng.randint(0, self.config.n_users - 1)}"], + "timestamp": 1000 + i, + } + ) + + authored_posts: dict[str, str] = {} + post_references: dict[str, list[str]] = {} + for edge in graph.edges: + if edge.rel == "authored_post": + authored_posts[edge.dst] = edge.src + elif edge.rel == "references" and edge.src.startswith("post_"): + post_references.setdefault(edge.src, []).append(edge.dst) + + for post_id, author_id in authored_posts.items(): + refs = post_references.get(post_id, []) + ref_names = [node_names.get(ref, ref) for ref in refs] + author_label = node_names.get(author_id, author_id) + text_parts = [f"{post_id} update from {author_label}"] + if ref_names: + text_parts.append("references " + ", ".join(ref_names)) + if refs: + text_parts.append("ids " + ", ".join(refs)) + post_payload = { + "post_id": post_id, + "user_id": author_id, + "canonical_user": alias_owner.get(author_id, author_id), + "text": ". ".join(text_parts), + "references": refs, + "reference_names": ref_names, + "mentions": [], + "timestamp": 5000 + len(microblog_posts), + } + existing_idx = next((idx for idx, row in enumerate(microblog_posts) if row["post_id"] == post_id), None) + if existing_idx is None: + microblog_posts.append(post_payload) + else: + microblog_posts[existing_idx] = post_payload + + forum_threads: list[dict] = [] + for i in range(max(8, self.config.n_users // 3)): + author = self.rng.choice(users).node_id + forum_threads.append( + { + "thread_id": f"thr_{i}", + "topic": self.rng.choice(["security", "startup", "ai", "infra"]), + "author_id": author, + "comments": [ + {"user_id": self.rng.choice(users).node_id, "text": "Following this."}, + {"user_id": self.rng.choice(users).node_id, "text": "Interesting link."}, + ], + "references": [], + "discusses": [], + } + ) + + authored_threads: dict[str, str] = {} + thread_refs: dict[str, list[str]] = {} + thread_discusses: dict[str, list[str]] = {} + for edge in graph.edges: + if edge.rel == "authored_thread": + authored_threads[edge.dst] = edge.src + elif edge.rel == "references" and edge.src.startswith(("thr_", "thread_")): + thread_refs.setdefault(edge.src, []).append(edge.dst) + elif edge.rel == "discusses" and edge.src.startswith(("thr_", "thread_")): + thread_discusses.setdefault(edge.src, []).append(edge.dst) + + for thread_id, author_id in authored_threads.items(): + node = graph.nodes.get(thread_id) + refs = thread_refs.get(thread_id, []) + discussed = thread_discusses.get(thread_id, []) + comments = [] + for ref in refs: + comments.append({"user_id": author_id, "text": f"Reference: {node_names.get(ref, ref)} ({ref})"}) + for item in discussed: + comments.append({"user_id": author_id, "text": f"Discusses: {node_names.get(item, item)} ({item})"}) + thread_payload = { + "thread_id": thread_id, + "topic": str((node.attrs or {}).get("topic", "seeded")) if node else "seeded", + "author_id": author_id, + "title": node_names.get(thread_id, thread_id), + "comments": comments, + "references": refs, + "discusses": discussed, + } + existing_idx = next((idx for idx, row in enumerate(forum_threads) if row["thread_id"] == thread_id), None) + if existing_idx is None: + forum_threads.append(thread_payload) + else: + forum_threads[existing_idx] = thread_payload + + profiles: list[dict] = [] + for user in users: + conns = [e.dst for e in graph.edges if e.src == user.node_id and e.rel == "connected_to"][:5] + org_id = next((e.dst for e in graph.edges if e.src == user.node_id and e.rel == "works_at"), "") + location_id = next((e.dst for e in graph.edges if e.src == user.node_id and e.rel == "located_in"), "") + profiles.append( + { + "user_id": user.node_id, + "name": user.attrs["name"], + "org": user.attrs["org"], + "org_id": org_id, + "location": user.attrs["location"], + "location_id": location_id, + "alias_ids": sorted(user_aliases.get(user.node_id, [])), + "connections": conns, + "work_history": [user.attrs["org"]], + } + ) + + for i in range(int(len(users) * self.config.red_herring_rate)): + profiles.append( + { + "user_id": f"noise_{i}", + "name": f"P{self.rng.randint(100,999)}", + "org": self.rng.choice(["Stealth Co", "Unknown Ventures"]), + "org_id": "", + "location": self.rng.choice(["Remote", "Unknown"]), + "location_id": "", + "alias_ids": [], + "connections": [], + "work_history": [], + } + ) + return PlatformViews(microblog_posts, forum_threads, profiles, alias_lookup=alias_owner) + + def generate_tasks(self, graph: CanonicalGraph, views: PlatformViews, count: int = 12) -> list[TaskInstance]: + if self._dataset_mode() == "metaqa": + metaqa_tasks = self._metaqa_tasks(graph=graph, count=max(1, count)) + if metaqa_tasks: + return metaqa_tasks + + tasks = self._seeded_tasks(graph) + target_count = max(1, count, len(tasks)) + + llm_budget = min( + max(0, self.config.seeding.llm_generated_task_budget), + max(0, target_count - len(tasks)), + ) + if self.config.seeding.llm_generate_remaining_tasks and llm_budget > 0: + tasks.extend(self._llm_generated_tasks(graph, count=llm_budget, start_idx=len(tasks))) + + if len(tasks) < target_count and self._template_fallback_allowed(): + tasks.extend(self._template_tasks(graph, count=target_count - len(tasks), start_idx=len(tasks))) + + if not tasks: + tasks.extend(self._template_tasks(graph, count=target_count, start_idx=0)) + + return tasks[:target_count] diff --git a/src/osint_env/data/metaqa.py b/src/osint_env/data/metaqa.py new file mode 100644 index 0000000000000000000000000000000000000000..478f0bfb8c97b70c3f01d70e55f2926de366e425 --- /dev/null +++ b/src/osint_env/data/metaqa.py @@ -0,0 +1,246 @@ +from __future__ import annotations + +from collections import deque +from dataclasses import dataclass +from pathlib import Path +import re +from typing import Iterable + +from osint_env.domain.models import CanonicalGraph, Edge, Node, NodeType + + +_TOPIC_PATTERN = re.compile(r"\[(.*?)\]") + + +@dataclass(slots=True) +class MetaQATaskRecord: + question: str + answers: list[str] + primary_answer: str + hop_label: str + hop_count: int + split: str + qtype: str + topic_entity: str + supporting_edges: list[Edge] + + +def _normalize_hop_label(value: str) -> str: + token = str(value or "").strip().lower().replace(" ", "") + if token in {"1", "1hop", "1-hop"}: + return "1-hop" + if token in {"2", "2hop", "2-hop"}: + return "2-hop" + if token in {"3", "3hop", "3-hop"}: + return "3-hop" + return "" + + +def _normalize_split(value: str) -> str: + token = str(value or "").strip().lower() + if token in {"train", "dev", "test"}: + return token + return "" + + +def _hop_count(label: str) -> int: + return int(label.split("-", 1)[0]) + + +def _extract_topic_entity(question: str) -> str: + match = _TOPIC_PATTERN.search(str(question)) + return match.group(1).strip() if match else "" + + +def _node_types_for_relation(rel: str) -> tuple[NodeType, NodeType]: + relation = str(rel or "").strip().lower() + src_type = NodeType.POST + if relation in {"directed_by", "written_by", "starred_actors"}: + return src_type, NodeType.USER + if relation == "release_year": + return src_type, NodeType.EVENT + if relation == "in_language": + return src_type, NodeType.LOCATION + if relation in {"has_genre", "has_tags", "has_imdb_votes"}: + return src_type, NodeType.ORG + return src_type, NodeType.USER + + +def _ensure_node(graph: CanonicalGraph, node_id: str, node_type: NodeType) -> None: + existing = graph.nodes.get(node_id) + if existing is not None: + return + graph.nodes[node_id] = Node(node_id=node_id, node_type=node_type, attrs={"name": node_id}) + + +def _read_non_empty_lines(path: Path) -> list[str]: + return [line.strip() for line in path.read_text(encoding="utf-8").splitlines() if line.strip()] + + +def _parse_kb_line(line: str) -> tuple[str, str, str] | None: + parts = [part.strip() for part in str(line).split("|", 2)] + if len(parts) != 3: + return None + src, rel, dst = parts + if not src or not rel or not dst: + return None + return src, rel, dst + + +def _undirected_adjacency(edges: Iterable[Edge]) -> dict[str, list[tuple[str, Edge]]]: + adj: dict[str, list[tuple[str, Edge]]] = {} + for edge in edges: + adj.setdefault(edge.src, []).append((edge.dst, edge)) + adj.setdefault(edge.dst, []).append((edge.src, edge)) + return adj + + +def _bfs_support_path( + topic_entity: str, + answer_candidates: list[str], + adjacency: dict[str, list[tuple[str, Edge]]], + max_depth: int, +) -> list[Edge]: + topic = str(topic_entity or "").strip() + if not topic or topic not in adjacency: + return [] + + answers = {item.strip() for item in answer_candidates if item.strip()} + if not answers: + return [] + + queue: deque[tuple[str, list[Edge]]] = deque([(topic, [])]) + visited_depth: dict[str, int] = {topic: 0} + + while queue: + node, path = queue.popleft() + depth = len(path) + if depth > max_depth: + continue + if node in answers and path: + return path + if depth == max_depth: + continue + for neighbor, edge in adjacency.get(node, []): + next_depth = depth + 1 + best = visited_depth.get(neighbor) + if best is not None and best <= next_depth: + continue + visited_depth[neighbor] = next_depth + queue.append((neighbor, path + [edge])) + return [] + + +def _infer_support_edges( + topic_entity: str, + answer_candidates: list[str], + adjacency: dict[str, list[tuple[str, Edge]]], + hop_count: int, +) -> list[Edge]: + for limit in (hop_count, hop_count + 1, hop_count + 2, max(4, hop_count + 3)): + path = _bfs_support_path(topic_entity, answer_candidates, adjacency, max_depth=max(1, limit)) + if path: + return path + return [] + + +def infer_metaqa_support_edges( + graph: CanonicalGraph, + topic_entity: str, + answer_candidates: list[str], + hop_count: int, +) -> list[Edge]: + adjacency = _undirected_adjacency(graph.edges) + return _infer_support_edges( + topic_entity=topic_entity, + answer_candidates=answer_candidates, + adjacency=adjacency, + hop_count=hop_count, + ) + + +def load_metaqa_dataset( + root: str | Path, + kb_path: str | Path | None, + variant: str, + hops: list[str], + splits: list[str], +) -> tuple[CanonicalGraph, list[MetaQATaskRecord]]: + root_path = Path(root) + if not root_path.exists(): + raise FileNotFoundError(f"MetaQA root not found: {root_path}") + + kb_file = Path(kb_path) if kb_path else root_path / "kb.txt" + if not kb_file.exists(): + raise FileNotFoundError(f"MetaQA KB file not found: {kb_file}") + + graph = CanonicalGraph() + seen_edges: set[tuple[str, str, str]] = set() + + for raw_line in _read_non_empty_lines(kb_file): + row = _parse_kb_line(raw_line) + if row is None: + continue + src, rel, dst = row + edge_key = (src, rel, dst) + if edge_key in seen_edges: + continue + seen_edges.add(edge_key) + src_type, dst_type = _node_types_for_relation(rel) + _ensure_node(graph, src, src_type) + _ensure_node(graph, dst, dst_type) + graph.edges.append(Edge(src=src, rel=rel, dst=dst, confidence=1.0)) + + hop_labels = [_normalize_hop_label(hop) for hop in hops] + hop_labels = [hop for hop in hop_labels if hop] + if not hop_labels: + hop_labels = ["1-hop", "2-hop", "3-hop"] + + split_labels = [_normalize_split(split) for split in splits] + split_labels = [split for split in split_labels if split] + if not split_labels: + split_labels = ["train", "dev", "test"] + + variant_token = str(variant or "vanilla").strip().lower() + if variant_token not in {"vanilla", "ntm"}: + variant_token = "vanilla" + + records: list[MetaQATaskRecord] = [] + for hop in hop_labels: + hop_dir = root_path / hop + for split in split_labels: + qa_path = hop_dir / variant_token / f"qa_{split}.txt" + if not qa_path.exists(): + continue + qa_lines = _read_non_empty_lines(qa_path) + + qtype_path = hop_dir / f"qa_{split}_qtype.txt" + qtypes = _read_non_empty_lines(qtype_path) if qtype_path.exists() else [] + + for idx, row in enumerate(qa_lines): + parts = row.split("\t") + if len(parts) < 2: + continue + question = parts[0].strip() + answer_blob = parts[1].strip() + answers = [item.strip() for item in answer_blob.split("|") if item.strip()] + if not question or not answers: + continue + + topic_entity = _extract_topic_entity(question) + qtype = qtypes[idx] if idx < len(qtypes) else "" + records.append( + MetaQATaskRecord( + question=question, + answers=answers, + primary_answer=answers[0], + hop_label=hop, + hop_count=_hop_count(hop), + split=split, + qtype=qtype, + topic_entity=topic_entity, + supporting_edges=[], + ) + ) + + return graph, records diff --git a/src/osint_env/domain/__init__.py b/src/osint_env/domain/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..09b80bffb5e77f4d52a066830e4d530fb9aa4b8e --- /dev/null +++ b/src/osint_env/domain/__init__.py @@ -0,0 +1,2 @@ +"""Core domain models.""" + diff --git a/src/osint_env/domain/models.py b/src/osint_env/domain/models.py new file mode 100644 index 0000000000000000000000000000000000000000..ed7d9ac551dbd9feb34e5e618583b189306dfaac --- /dev/null +++ b/src/osint_env/domain/models.py @@ -0,0 +1,191 @@ +from __future__ import annotations + +from dataclasses import dataclass, field +from enum import Enum +from typing import Any + +from pydantic import BaseModel, ConfigDict, Field + + +class NodeType(str, Enum): + USER = "user" + ALIAS = "alias" + ORG = "org" + LOCATION = "location" + POST = "post" + THREAD = "thread" + EVENT = "event" + + +class ActionType(str, Enum): + CALL_TOOL = "CALL_TOOL" + ADD_EDGE = "ADD_EDGE" + ANSWER = "ANSWER" + + +@dataclass(slots=True) +class Node: + node_id: str + node_type: NodeType + attrs: dict[str, Any] = field(default_factory=dict) + + +@dataclass(slots=True) +class Edge: + src: str + rel: str + dst: str + confidence: float = 1.0 + + +@dataclass(slots=True) +class CanonicalGraph: + nodes: dict[str, Node] = field(default_factory=dict) + edges: list[Edge] = field(default_factory=list) + + +@dataclass(slots=True) +class ToolCall: + tool_name: str + args: dict[str, Any] + + +class Action(BaseModel): + """Structured action payload used by OpenEnv step().""" + + model_config = ConfigDict(extra="forbid") + + action_type: ActionType + payload: dict[str, Any] = Field(default_factory=dict) + + def __init__(self, *args: Any, **kwargs: Any) -> None: + # Backward-compatible positional form: Action(action_type, payload) + if args: + if len(args) != 2: + raise TypeError("Action() accepts either keyword fields or 2 positional args") + if "action_type" in kwargs or "payload" in kwargs: + raise TypeError("Action() cannot mix positional and keyword fields") + kwargs["action_type"] = args[0] + kwargs["payload"] = args[1] + super().__init__(**kwargs) + + +class Observation(BaseModel): + """Typed observation payload returned by reset()/step()/state().""" + + model_config = ConfigDict(extra="forbid") + + tool_outputs: list[dict[str, Any]] = Field(default_factory=list) + graph_snapshot: dict[str, Any] = Field(default_factory=dict) + action_history: list[dict[str, Any]] = Field(default_factory=list) + task: dict[str, Any] = Field(default_factory=dict) + + +class Reward(BaseModel): + """Typed reward payload for structured reward accounting.""" + + model_config = ConfigDict(extra="forbid") + + value: float = 0.0 + components: dict[str, float] = Field(default_factory=dict) + + +@dataclass(slots=True) +class TaskInstance: + task_id: str + task_type: str + question: str + answer: str + supporting_edges: list[Edge] + metadata: dict[str, Any] = field(default_factory=dict) + + +@dataclass(slots=True) +class SeedNodeSpec: + node_id: str + node_type: NodeType | str + attrs: dict[str, Any] = field(default_factory=dict) + + +@dataclass(slots=True) +class SeedEdgeSpec: + src: str + rel: str + dst: str + confidence: float = 1.0 + + +@dataclass(slots=True) +class SeedQuestionSpec: + question: str + answer: str | None = None + task_type: str = "seeded" + supporting_edges: list[SeedEdgeSpec] = field(default_factory=list) + metadata: dict[str, Any] = field(default_factory=dict) + + +@dataclass(slots=True) +class SeedingConfig: + seeded_nodes: list[SeedNodeSpec] = field(default_factory=list) + seeded_edges: list[SeedEdgeSpec] = field(default_factory=list) + seeded_questions: list[SeedQuestionSpec] = field(default_factory=list) + llm_generate_remaining_graph: bool = True + llm_generate_remaining_tasks: bool = True + llm_generated_edge_budget: int = 6 + llm_generated_task_budget: int = 8 + llm_generation_parallel: bool = True + llm_generation_workers: int = 3 + llm_generation_retries: int = 2 + allow_template_fallback_on_llm_failure: bool = False + + +@dataclass(slots=True) +class SwarmConfig: + enabled: bool = False + max_agents: int = 3 + max_breadth: int = 2 + max_width: int = 2 + max_depth: int = 2 + planner_rounds: int = 2 + tools_per_agent: int = 1 + + +@dataclass(slots=True) +class SpawnRewardConfig: + lambda_parallel: float = 0.15 + lambda_finish: float = 0.20 + anneal: float = 1.0 + max_parallel_hint: int = 3 + + +@dataclass(slots=True) +class LLMConfig: + provider: str = "mock" + model: str = "qwen3:2b" + temperature: float = 0.1 + max_tokens: int = 256 + timeout_seconds: int = 240 + ollama_base_url: str = "http://127.0.0.1:11434" + openai_base_url: str = "https://api.openai.com/v1" + openai_api_key_env: str = "OPENAI_API_KEY" + openai_api_key: str = "" + + +@dataclass(slots=True) +class EnvironmentConfig: + n_users: int = 40 + alias_density: float = 0.35 + noise_level: float = 0.15 + red_herring_rate: float = 0.1 + max_steps: int = 18 + seed: int = 7 + dataset_mode: str = "canonical" + metaqa_root: str = "metaQA" + metaqa_kb_path: str = "" + metaqa_variant: str = "vanilla" + metaqa_hops: list[str] = field(default_factory=lambda: ["1-hop", "2-hop", "3-hop"]) + metaqa_splits: list[str] = field(default_factory=lambda: ["train", "dev", "test"]) + seeding: SeedingConfig = field(default_factory=SeedingConfig) + swarm: SwarmConfig = field(default_factory=SwarmConfig) + spawn_reward: SpawnRewardConfig = field(default_factory=SpawnRewardConfig) + llm: LLMConfig = field(default_factory=LLMConfig) diff --git a/src/osint_env/env/__init__.py b/src/osint_env/env/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..afc94dfc868f270a67ff4cd803494cce2d15bdb8 --- /dev/null +++ b/src/osint_env/env/__init__.py @@ -0,0 +1,2 @@ +"""Environment package.""" + diff --git a/src/osint_env/env/environment.py b/src/osint_env/env/environment.py new file mode 100644 index 0000000000000000000000000000000000000000..b6e4f1cedf69906221fffaf3c39eff821453fbd1 --- /dev/null +++ b/src/osint_env/env/environment.py @@ -0,0 +1,260 @@ +from __future__ import annotations + +from dataclasses import dataclass, field +from typing import TYPE_CHECKING, Any + +from osint_env.data.generator import DatasetGenerator +from osint_env.domain.models import Action, ActionType, Edge, EnvironmentConfig, Observation, TaskInstance +from osint_env.env.openenv_compat import Env +from osint_env.env.reward import ( + build_reward_model, + compute_answer_reward, + compute_edge_reward, + compute_graph_f1, +) +from osint_env.memory.store import MemoryGraph, SemanticMemory +from osint_env.platforms.tools import ToolRegistry + +if TYPE_CHECKING: + from osint_env.llm.interface import LLMClient + + +@dataclass(slots=True) +class EpisodeState: + task: TaskInstance + task_index: int = 0 + difficulty: str = "hard" + step_count: int = 0 + done: bool = False + total_reward: float = 0.0 + tool_calls: int = 0 + redundant_tool_calls: int = 0 + action_history: list[dict[str, Any]] = field(default_factory=list) + tool_outputs: list[dict[str, Any]] = field(default_factory=list) + answer: str | None = None + call_fingerprints: set[str] = field(default_factory=set) + reward_components: dict[str, float] = field(default_factory=dict) + + +class OSINTEnvironment(Env): + def __init__(self, config: EnvironmentConfig, llm: "LLMClient | None" = None): + super().__init__( + name="OSINTEnvironment", + state_space="json-observation", + action_space=["CALL_TOOL", "ADD_EDGE", "ANSWER"], + episode_max_length=config.max_steps, + ) + self.config = config + self.generator = DatasetGenerator(config, llm=llm) + self.graph = self.generator.build_canonical_graph() + self.views = self.generator.build_platform_views(self.graph) + self.tasks = self.generator.generate_tasks(self.graph, self.views, count=24) + self.reward_model = build_reward_model(self.graph) + self.tools = ToolRegistry(self.views) + self.memory_graph = MemoryGraph() + self.semantic_memory = SemanticMemory() + self._task_idx = 0 + self.state: EpisodeState | None = None + + @staticmethod + def _normalize_difficulty(value: str) -> str: + token = str(value or "").strip().lower() + if token in {"easy", "e"}: + return "easy" + if token in {"mid", "medium", "m"}: + return "medium" + if token in {"high", "hard", "h"}: + return "hard" + return "hard" + + def _resolve_task_difficulty(self, task: TaskInstance, task_index: int) -> str: + metadata = dict(task.metadata or {}) + if "difficulty" in metadata: + return self._normalize_difficulty(str(metadata.get("difficulty", ""))) + if task_index < 10: + return "easy" + if task_index < 20: + return "medium" + return "hard" + + def reset(self) -> Observation: + task_index = self._task_idx % len(self.tasks) + task = self.tasks[task_index] + self._task_idx += 1 + self.state = EpisodeState( + task=task, + task_index=task_index, + difficulty=self._resolve_task_difficulty(task, task_index), + ) + self.memory_graph = MemoryGraph() + self.semantic_memory = SemanticMemory() + return self._observation() + + def step(self, action: Action) -> tuple[Observation, float, bool, dict[str, Any]]: + if self.state is None: + raise RuntimeError("Call reset() before step().") + if self.state.done: + return self._observation(), 0.0, True, self._info() + + self.state.step_count += 1 + reward = 0.0 + + if action.action_type == ActionType.CALL_TOOL: + reward += self._handle_tool(action.payload) + elif action.action_type == ActionType.ADD_EDGE: + reward += self._handle_add_edge(action.payload) + elif action.action_type == ActionType.ANSWER: + reward += self._handle_answer(action.payload) + else: + reward -= 0.5 + + if self.state.step_count >= self.config.max_steps and not self.state.done: + self.state.done = True + reward -= 0.3 + + self.state.total_reward += reward + self.state.action_history.append({"type": action.action_type.value, "payload": action.payload, "reward": reward}) + return self._observation(), reward, self.state.done, self._info() + + def _handle_tool(self, payload: dict[str, Any]) -> float: + if self.state is None: + return 0.0 + tool_name = payload["tool_name"] + args = payload.get("args", {}) + fp = f"{tool_name}:{sorted(args.items())}" + self.state.tool_calls += 1 + if fp in self.state.call_fingerprints: + self.state.redundant_tool_calls += 1 + penalty = -0.2 + else: + penalty = 0.05 + self.state.call_fingerprints.add(fp) + + invalid_tool_penalty = 0.0 + try: + if tool_name == "search_memory": + query = str(args.get("query", "")).strip() + top_k = int(args.get("k", 5)) if str(args.get("k", "")).strip() else 5 + results = self.semantic_memory.search(query=query, k=max(1, top_k)) if query else [] + output = {"results": results, "count": len(results)} + else: + output = self.tools.call(tool_name, args) + except Exception as exc: + output = {"error": str(exc)} + invalid_tool_penalty = -0.25 + self.state.tool_outputs.append({"tool": tool_name, "args": args, "output": output}) + self.semantic_memory.add(f"{tool_name} {args} {output}", {"tool": tool_name}) + relevance_bonus = 0.08 * self._tool_relevance(self.state.task, output) + total = penalty + relevance_bonus + invalid_tool_penalty + self._accumulate_reward_components( + { + "tool_novelty": penalty, + "tool_relevance": relevance_bonus, + "invalid_tool_penalty": invalid_tool_penalty, + } + ) + return total + + def _handle_add_edge(self, payload: dict[str, Any]) -> float: + if self.state is None: + return 0.0 + edge = Edge(payload["src"], payload["rel"], payload["dst"], float(payload.get("confidence", 1.0))) + existing_edges = list(self.memory_graph.edges) + added = self.memory_graph.add_edge(edge) + if not added: + self._accumulate_reward_components({"duplicate_edge_penalty": -0.15}) + return -0.15 + + breakdown = compute_edge_reward( + edge=edge, + task=self.state.task, + existing_edges=existing_edges, + step_count=self.state.step_count, + model=self.reward_model, + graph=self.graph, + difficulty=self.state.difficulty, + ) + self._accumulate_reward_components(breakdown.to_dict()) + return breakdown.total + + def _handle_answer(self, payload: dict[str, Any]) -> float: + if self.state is None: + return 0.0 + proposed = str(payload.get("answer", "")).strip() + self.state.answer = proposed + self.state.done = True + breakdown = compute_answer_reward( + proposed_answer=proposed, + task=self.state.task, + pred_edges=self.memory_graph.edges, + tool_outputs=self.state.tool_outputs, + step_count=self.state.step_count, + model=self.reward_model, + difficulty=self.state.difficulty, + ) + self._accumulate_reward_components(breakdown.to_dict()) + return breakdown.total + + def _tool_relevance(self, task: TaskInstance, output: dict[str, Any]) -> float: + haystack = str(output).lower() + clues = {task.answer.lower()} + for edge in task.supporting_edges: + clues.add(edge.src.lower()) + clues.add(edge.dst.lower()) + clues.add(edge.rel.lower()) + if not clues: + return 0.0 + matches = sum(1 for token in clues if token in haystack) + return matches / len(clues) + + def _accumulate_reward_components(self, values: dict[str, float]) -> None: + if self.state is None: + return + for key, value in values.items(): + self.state.reward_components[key] = self.state.reward_components.get(key, 0.0) + float(value) + + def _observation(self) -> Observation: + if self.state is None: + raise RuntimeError("State is not initialized.") + metadata = dict(self.state.task.metadata or {}) + grader = metadata.get("grader") if isinstance(metadata.get("grader"), dict) else None + task_payload = { + "task_id": self.state.task.task_id, + "task_type": self.state.task.task_type, + "question": self.state.task.question, + "difficulty": self.state.difficulty, + "grader": ( + dict(grader) + if grader is not None + else { + "type": "difficulty_exact_match", + "answer_type": "node_id", + "case_sensitive": True, + "reward_profile": self.state.difficulty, + } + ), + } + if "scenario" in metadata: + task_payload["scenario"] = str(metadata.get("scenario", "")) + return Observation( + tool_outputs=self.state.tool_outputs[-5:], + graph_snapshot=self.memory_graph.to_snapshot(), + action_history=self.state.action_history[-10:], + task=task_payload, + ) + + def _info(self) -> dict[str, Any]: + if self.state is None: + return {} + return { + "step_count": self.state.step_count, + "difficulty": self.state.difficulty, + "task_index": self.state.task_index, + "total_reward": self.state.total_reward, + "tool_calls": self.state.tool_calls, + "redundant_tool_calls": self.state.redundant_tool_calls, + "task_answer": self.state.task.answer, + "agent_answer": self.state.answer, + "graph_f1": compute_graph_f1(self.memory_graph.edges, self.state.task.supporting_edges), + "reward_components": dict(self.state.reward_components), + } diff --git a/src/osint_env/env/openenv_compat.py b/src/osint_env/env/openenv_compat.py new file mode 100644 index 0000000000000000000000000000000000000000..46f2bfd007c092e38e5c636215a0ca17faa9b541 --- /dev/null +++ b/src/osint_env/env/openenv_compat.py @@ -0,0 +1,20 @@ +from __future__ import annotations + +try: + from openenv.env import Env +except ImportError: + class Env: + """Minimal fallback used when openenv is not installed locally.""" + + def __init__( + self, + name: str, + state_space: str, + action_space: list[str], + episode_max_length: int, + ) -> None: + self.name = name + self.state_space = state_space + self.action_space = action_space + self.episode_max_length = episode_max_length + diff --git a/src/osint_env/env/reward.py b/src/osint_env/env/reward.py new file mode 100644 index 0000000000000000000000000000000000000000..7a0b824daddd2ed696bc6af411b727d7a8c950e0 --- /dev/null +++ b/src/osint_env/env/reward.py @@ -0,0 +1,483 @@ +from __future__ import annotations + +import json +import math +import re +from collections import Counter +from dataclasses import asdict, dataclass + +from osint_env.domain.models import CanonicalGraph, Edge, TaskInstance + + +@dataclass(slots=True) +class RewardModel: + relation_idf: dict[str, float] + max_relation_idf: float + hub_penalty: dict[str, float] + max_hub_penalty: float + type_priors: dict[tuple[str, str, str], float] + + +@dataclass(slots=True) +class EdgeRewardBreakdown: + total: float + global_accuracy: float + soft_shaping: float + efficiency: float + diversity: float + relation_informativeness: float + entity_informativeness: float + connectivity_gain: float + + def to_dict(self) -> dict[str, float]: + return asdict(self) + + +@dataclass(slots=True) +class AnswerRewardBreakdown: + total: float + format_reward: float + correctness: float + knowledge_carrier: float + knowledge_indexing: float + connectivity: float + graph_f1: float + efficiency: float + compactness: float + relation_informativeness: float + entity_informativeness: float + repetition_penalty: float + + def to_dict(self) -> dict[str, float]: + return asdict(self) + + +def _normalize_difficulty(value: str) -> str: + token = str(value or "").strip().lower() + if token in {"easy", "e"}: + return "easy" + if token in {"mid", "medium", "m"}: + return "medium" + if token in {"high", "hard", "h"}: + return "hard" + return "hard" + + +def build_reward_model(graph: CanonicalGraph) -> RewardModel: + relation_freq: Counter[str] = Counter(e.rel for e in graph.edges) + total_edges = max(1, len(graph.edges)) + relation_idf = { + rel: math.log((1.0 + total_edges) / (1.0 + freq)) + 1.0 for rel, freq in relation_freq.items() + } + max_relation_idf = max(relation_idf.values()) if relation_idf else 1.0 + + degree: Counter[str] = Counter() + for edge in graph.edges: + degree[edge.src] += 1 + degree[edge.dst] += 1 + hub_penalty = {node_id: math.log(1.0 + deg) for node_id, deg in degree.items()} + max_hub_penalty = max(hub_penalty.values()) if hub_penalty else 1.0 + + type_counts: Counter[tuple[str, str, str]] = Counter() + rel_counts: Counter[str] = Counter() + for edge in graph.edges: + src = graph.nodes.get(edge.src) + dst = graph.nodes.get(edge.dst) + if src is None or dst is None: + continue + key = (str(src.node_type.value), edge.rel, str(dst.node_type.value)) + type_counts[key] += 1 + rel_counts[edge.rel] += 1 + type_priors = { + key: count / max(1, rel_counts[key[1]]) for key, count in type_counts.items() + } + + return RewardModel( + relation_idf=relation_idf, + max_relation_idf=max_relation_idf, + hub_penalty=hub_penalty, + max_hub_penalty=max_hub_penalty, + type_priors=type_priors, + ) + + +def edge_in_truth(edge: Edge, task: TaskInstance) -> bool: + return any(e.src == edge.src and e.rel == edge.rel and e.dst == edge.dst for e in task.supporting_edges) + + +def _cosine(a: Counter[str], b: Counter[str]) -> float: + common = set(a) & set(b) + num = sum(a[t] * b[t] for t in common) + den = math.sqrt(sum(v * v for v in a.values())) * math.sqrt(sum(v * v for v in b.values())) + return (num / den) if den else 0.0 + + +def _edge_signature(edge: Edge) -> Counter[str]: + # Approximate path/edge embedding using relation and endpoint prefixes. + src_prefix = edge.src.split("_", 1)[0] + dst_prefix = edge.dst.split("_", 1)[0] + return Counter({f"rel:{edge.rel}": 2, f"src:{src_prefix}": 1, f"dst:{dst_prefix}": 1}) + + +def _soft_fact_score(edge: Edge, model: RewardModel, graph: CanonicalGraph) -> float: + if any(e.src == edge.src and e.rel == edge.rel and e.dst == edge.dst for e in graph.edges): + return 1.0 + + src = graph.nodes.get(edge.src) + dst = graph.nodes.get(edge.dst) + if src is None or dst is None: + return 0.0 + + type_key = (str(src.node_type.value), edge.rel, str(dst.node_type.value)) + prior = model.type_priors.get(type_key, 0.0) + + # A tiny domain heuristic: alias links are common and worth soft credit even without exact support edge. + alias_bias = 0.2 if (edge.rel == "alias_of" and edge.src.startswith("alias_") and edge.dst.startswith("user_")) else 0.0 + relation_exists = any(e.rel == edge.rel for e in graph.edges) + relation_bonus = 0.1 if relation_exists else 0.0 + return max(0.0, min(1.0, 0.1 + (0.65 * prior) + alias_bias + relation_bonus)) + + +def _normalized_relation_info(rel: str, model: RewardModel) -> float: + idf = model.relation_idf.get(rel, 1.0) + return idf / max(1e-6, model.max_relation_idf) + + +def _normalized_entity_info(src: str, dst: str, model: RewardModel) -> float: + src_h = model.hub_penalty.get(src, 0.0) + dst_h = model.hub_penalty.get(dst, 0.0) + mean_hub = (src_h + dst_h) / 2.0 + # UniRel-style preference for low-degree intermediates: lower hub penalty -> higher informativeness. + return 1.0 - (mean_hub / max(1e-6, model.max_hub_penalty)) + + +def _is_reachable_undirected(edges: list[Edge], src: str, dst: str) -> bool: + if src == dst: + return True + adj: dict[str, set[str]] = {} + for edge in edges: + adj.setdefault(edge.src, set()).add(edge.dst) + adj.setdefault(edge.dst, set()).add(edge.src) + seen = {src} + stack = [src] + while stack: + node = stack.pop() + for nxt in adj.get(node, set()): + if nxt == dst: + return True + if nxt not in seen: + seen.add(nxt) + stack.append(nxt) + return False + + +def _connectivity_gain(edge: Edge, existing_edges: list[Edge]) -> float: + # Reward edges that bridge disconnected regions and penalize already-connected shortcuts. + if edge.src == edge.dst: + return -0.06 + already_connected = _is_reachable_undirected(existing_edges, edge.src, edge.dst) + if already_connected: + return -0.03 + return 0.10 + + +def _sigmoid_temperature(value: float, temperature: float = 2.0) -> float: + scaled = float(value) / max(1e-6, float(temperature)) + if scaled >= 0: + z = math.exp(-scaled) + return 1.0 / (1.0 + z) + z = math.exp(scaled) + return z / (1.0 + z) + + +def compute_edge_reward( + edge: Edge, + task: TaskInstance, + existing_edges: list[Edge], + step_count: int, + model: RewardModel, + graph: CanonicalGraph, + difficulty: str = "hard", +) -> EdgeRewardBreakdown: + in_truth = edge_in_truth(edge, task) + difficulty_level = _normalize_difficulty(difficulty) + + # DeepPath-inspired global accuracy term. + global_accuracy = 0.85 if in_truth else -0.55 + + # D18 reward shaping: R = Rb + (1 - Rb) * f, where f is a soft fact plausibility score. + base_reward = 1.0 if in_truth else 0.0 + shaped = base_reward + ((1.0 - base_reward) * _soft_fact_score(edge, model, graph)) + soft_shaping = 0.30 * (shaped - 0.5) + + # DeepPath-inspired efficiency term: earlier useful edges are better. + efficiency = 0.10 * (1.0 / max(1, step_count)) + + # DeepPath-inspired diversity term: discourage repeated edge patterns. + if not existing_edges: + diversity = 0.08 + else: + new_sig = _edge_signature(edge) + avg_similarity = sum(_cosine(new_sig, _edge_signature(e)) for e in existing_edges) / len(existing_edges) + novelty = 1.0 - avg_similarity + diversity = 0.14 * (novelty - 0.5) + + # UniRel-style informativeness terms. + relation_informativeness = 0.12 * (_normalized_relation_info(edge.rel, model) - 0.5) + entity_informativeness = 0.12 * (_normalized_entity_info(edge.src, edge.dst, model) - 0.5) + + # Additional structural utility shaping for KG construction. + connectivity_gain = _connectivity_gain(edge, existing_edges) + + if difficulty_level == "easy": + global_accuracy = 0.75 if in_truth else -0.45 + soft_shaping = 0.0 + diversity = 0.0 + relation_informativeness = 0.0 + entity_informativeness = 0.0 + connectivity_gain = 0.0 + efficiency = 0.15 * (1.0 / max(1, step_count)) + elif difficulty_level == "medium": + diversity = 0.0 + relation_informativeness = 0.0 + entity_informativeness = 0.0 + + raw_total = ( + global_accuracy + + soft_shaping + + efficiency + + diversity + + relation_informativeness + + entity_informativeness + + connectivity_gain + ) + total = _sigmoid_temperature(raw_total, temperature=2.0) + return EdgeRewardBreakdown( + total=total, + global_accuracy=global_accuracy, + soft_shaping=soft_shaping, + efficiency=efficiency, + diversity=diversity, + relation_informativeness=relation_informativeness, + entity_informativeness=entity_informativeness, + connectivity_gain=connectivity_gain, + ) + + +def _connectivity_ratio(pred_edges: list[Edge], task: TaskInstance) -> float: + nodes = {e.src for e in task.supporting_edges} | {e.dst for e in task.supporting_edges} + if len(nodes) <= 1: + return 1.0 + + adj: dict[str, set[str]] = {} + for edge in pred_edges: + adj.setdefault(edge.src, set()).add(edge.dst) + adj.setdefault(edge.dst, set()).add(edge.src) + + start = next(iter(nodes)) + seen = {start} + stack = [start] + while stack: + cur = stack.pop() + for nxt in adj.get(cur, set()): + if nxt not in seen: + seen.add(nxt) + stack.append(nxt) + return len(seen & nodes) / max(1, len(nodes)) + + +def _knowledge_indexing_recall(task: TaskInstance, tool_outputs: list[dict[str, object]]) -> float: + gold_terms = {task.answer.lower()} + for edge in task.supporting_edges: + gold_terms.add(edge.src.lower()) + gold_terms.add(edge.dst.lower()) + gold_terms.add(edge.rel.lower()) + + serialized = json.dumps(tool_outputs).lower() + covered = sum(1 for term in gold_terms if term and term in serialized) + return covered / max(1, len(gold_terms)) + + +def _knowledge_carrier_reward(pred_edges: list[Edge], task: TaskInstance) -> float: + pred = {(e.src, e.rel, e.dst) for e in pred_edges} + truth = {(e.src, e.rel, e.dst) for e in task.supporting_edges} + deducible = bool(truth & pred) + return 0.4 if deducible else -0.2 + + +def _extract_query_entities(question: str) -> set[str]: + pattern = r"\b(?:alias|user|org|loc|post|thr|thread|event)_[a-zA-Z0-9_]+\b" + return set(re.findall(pattern, question)) + + +def _max_connected_seed_count(pred_edges: list[Edge], seeds: set[str]) -> int: + if not seeds: + return 0 + adj: dict[str, set[str]] = {} + for edge in pred_edges: + adj.setdefault(edge.src, set()).add(edge.dst) + adj.setdefault(edge.dst, set()).add(edge.src) + + best = 1 + for seed in seeds: + seen = {seed} + stack = [seed] + while stack: + cur = stack.pop() + for nxt in adj.get(cur, set()): + if nxt not in seen: + seen.add(nxt) + stack.append(nxt) + connected_seed_count = len(seeds & seen) + best = max(best, connected_seed_count) + return best + + +def _unirel_connectivity_score(pred_edges: list[Edge], seeds: set[str]) -> float: + # UniRel-style discrete connectivity range projected to [-1, 1] for stable weighting. + n = len(seeds) + if n <= 1: + return 0.0 + + connected = _max_connected_seed_count(pred_edges, seeds) + raw = -math.floor(n / 2) + max(0, connected - 1) + lo = -math.floor(n / 2) + hi = math.ceil(n / 2) - 1 + if hi <= lo: + return 0.0 + return ((raw - lo) / (hi - lo)) * 2.0 - 1.0 + + +def _subgraph_relation_informativeness(pred_edges: list[Edge], model: RewardModel | None) -> float: + if not pred_edges or model is None: + return 0.0 + avg = sum(_normalized_relation_info(edge.rel, model) for edge in pred_edges) / len(pred_edges) + return avg - 0.5 + + +def _subgraph_entity_informativeness(pred_edges: list[Edge], model: RewardModel | None) -> float: + if not pred_edges or model is None: + return 0.0 + avg = sum(_normalized_entity_info(edge.src, edge.dst, model) for edge in pred_edges) / len(pred_edges) + return avg - 0.5 + + +def _relation_repetition_ratio(pred_edges: list[Edge]) -> float: + if len(pred_edges) <= 1: + return 0.0 + rels = [edge.rel for edge in pred_edges] + unique = len(set(rels)) + return 1.0 - (unique / len(rels)) + + +def _deducible_answer(proposed_answer: str, task: TaskInstance, pred_edges: list[Edge]) -> bool: + if proposed_answer != task.answer: + return False + truth = {(edge.src, edge.rel, edge.dst) for edge in task.supporting_edges} + pred = {(edge.src, edge.rel, edge.dst) for edge in pred_edges} + if truth & pred: + return True + + seeds = _extract_query_entities(task.question) + if not seeds: + return False + for seed in seeds: + if _is_reachable_undirected(pred_edges, seed, proposed_answer): + return True + return False + + +def compute_answer_reward( + proposed_answer: str, + task: TaskInstance, + pred_edges: list[Edge], + tool_outputs: list[dict[str, object]], + step_count: int, + model: RewardModel | None = None, + difficulty: str = "hard", +) -> AnswerRewardBreakdown: + difficulty_level = _normalize_difficulty(difficulty) + + format_reward = 0.15 if proposed_answer else -0.55 + correctness = 1.15 if proposed_answer == task.answer else -1.0 + + # AutoGraph-R1 style task utility decomposition. + knowledge_carrier = 0.50 if _deducible_answer(proposed_answer, task, pred_edges) else -0.25 + knowledge_indexing = 0.45 * _knowledge_indexing_recall(task, tool_outputs) + + # UniRel-style connectivity over seed entities. + seed_entities = _extract_query_entities(task.question) + seed_entities.add(task.answer) + connectivity = 0.30 * _unirel_connectivity_score(pred_edges, seed_entities) + + graph_f1 = 0.55 * compute_graph_f1(pred_edges, task.supporting_edges) + efficiency = 0.12 * (1.0 / max(1, step_count)) + + extra_edges = max(0, len(pred_edges) - len(task.supporting_edges)) + compactness = -0.05 * extra_edges + + relation_informativeness = 0.12 * _subgraph_relation_informativeness(pred_edges, model) + entity_informativeness = 0.12 * _subgraph_entity_informativeness(pred_edges, model) + + # AutoGraph-R1 repetition control variant used in larger models. + repetition_penalty = -0.10 * _relation_repetition_ratio(pred_edges) + + if difficulty_level == "easy": + knowledge_carrier = 0.0 + knowledge_indexing = 0.25 * _knowledge_indexing_recall(task, tool_outputs) + connectivity = 0.0 + graph_f1 = 0.0 + efficiency = 0.18 * (1.0 / max(1, step_count)) + compactness = 0.0 + relation_informativeness = 0.0 + entity_informativeness = 0.0 + repetition_penalty = 0.0 + elif difficulty_level == "medium": + connectivity = 0.18 * _unirel_connectivity_score(pred_edges, seed_entities) + graph_f1 = 0.35 * compute_graph_f1(pred_edges, task.supporting_edges) + compactness = -0.04 * extra_edges + relation_informativeness = 0.0 + entity_informativeness = 0.0 + repetition_penalty = 0.0 + + raw_total = ( + format_reward + + correctness + + knowledge_carrier + + knowledge_indexing + + connectivity + + graph_f1 + + efficiency + + compactness + + relation_informativeness + + entity_informativeness + + repetition_penalty + ) + total = _sigmoid_temperature(raw_total, temperature=2.0) + return AnswerRewardBreakdown( + total=total, + format_reward=format_reward, + correctness=correctness, + knowledge_carrier=knowledge_carrier, + knowledge_indexing=knowledge_indexing, + connectivity=connectivity, + graph_f1=graph_f1, + efficiency=efficiency, + compactness=compactness, + relation_informativeness=relation_informativeness, + entity_informativeness=entity_informativeness, + repetition_penalty=repetition_penalty, + ) + + +def compute_graph_f1(pred_edges: list[Edge], truth_edges: list[Edge]) -> float: + pred = {(e.src, e.rel, e.dst) for e in pred_edges} + truth = {(e.src, e.rel, e.dst) for e in truth_edges} + if not pred and not truth: + return 1.0 + if not pred or not truth: + return 0.0 + tp = len(pred & truth) + p = tp / len(pred) if pred else 0.0 + r = tp / len(truth) if truth else 0.0 + return (2 * p * r / (p + r)) if (p + r) else 0.0 diff --git a/src/osint_env/env/spawn_reward_hooks.py b/src/osint_env/env/spawn_reward_hooks.py new file mode 100644 index 0000000000000000000000000000000000000000..a272eaa47a98304a6a214edd39d976441706caa4 --- /dev/null +++ b/src/osint_env/env/spawn_reward_hooks.py @@ -0,0 +1,151 @@ +from __future__ import annotations + +import math +from dataclasses import dataclass + + +@dataclass(slots=True) +class PARLRewardBreakdown: + total: float + auxiliary: float + parallel: float + finish: float + latency: float + breadth_bonus: float + depth_penalty: float + + +def critical_steps(main_steps: list[int], parallel_subagent_steps: list[list[int]]) -> int: + """Compute critical-step latency proxy used in Kimi-style PARL shaping. + + For each stage t, we add: + Smain(t) + max_i Ssub,i(t) + where Ssub,i(t) is the i-th sub-agent step count for that stage. + """ + if len(main_steps) != len(parallel_subagent_steps): + raise ValueError("main_steps and parallel_subagent_steps must have the same length") + + total = 0 + for stage_main, stage_sub in zip(main_steps, parallel_subagent_steps): + main = max(0, int(stage_main)) + longest_sub = max((max(0, int(v)) for v in stage_sub), default=0) + total += main + longest_sub + return total + + +def parl_style_spawn_reward( + task_outcome_reward: float, + spawn_count: int, + finished_subtasks: int, + critical_steps: int, + lambda_parallel: float = 0.15, + lambda_finish: float = 0.20, + anneal: float = 1.0, + breadth: int | None = None, + depth: int | None = None, + max_parallel_hint: int | None = None, +) -> float: + """Kimi K2.5 inspired PARL reward utility for future multi-agent branches. + + This helper intentionally does not orchestrate agents. It only exposes the reward shape: + + r_parl = r_perf + a * (lambda_parallel * r_parallel + lambda_finish * r_finish + r_latency) + + where: + - r_parallel encourages non-zero agent spawning (avoids serial collapse) + - r_finish rewards meaningful completion, preventing spawn-only reward hacking + - r_latency favors lower critical-step execution paths + + The optional breadth/depth controls are small shaping terms for future branches where + orchestration state includes tree shape telemetry. + """ + spawn_count = max(0, int(spawn_count)) + finished_subtasks = max(0, int(finished_subtasks)) + critical_steps = max(1, int(critical_steps)) + anneal = max(0.0, min(1.0, anneal)) + lambda_parallel = max(0.0, float(lambda_parallel)) + lambda_finish = max(0.0, float(lambda_finish)) + breadth = max(0, int(breadth or 0)) + depth = max(0, int(depth or 0)) + max_parallel_hint = max(0, int(max_parallel_hint or 0)) + + breakdown = parl_reward_breakdown( + task_outcome_reward=task_outcome_reward, + spawn_count=spawn_count, + finished_subtasks=finished_subtasks, + critical_steps=critical_steps, + lambda_parallel=lambda_parallel, + lambda_finish=lambda_finish, + anneal=anneal, + breadth=breadth, + depth=depth, + max_parallel_hint=max_parallel_hint, + ) + return breakdown.total + + +def parl_reward_breakdown( + task_outcome_reward: float, + spawn_count: int, + finished_subtasks: int, + critical_steps: int, + lambda_parallel: float = 0.15, + lambda_finish: float = 0.20, + anneal: float = 1.0, + breadth: int | None = None, + depth: int | None = None, + max_parallel_hint: int | None = None, +) -> PARLRewardBreakdown: + spawn_count = max(0, int(spawn_count)) + finished_subtasks = max(0, int(finished_subtasks)) + critical_steps = max(1, int(critical_steps)) + anneal = max(0.0, min(1.0, anneal)) + lambda_parallel = max(0.0, float(lambda_parallel)) + lambda_finish = max(0.0, float(lambda_finish)) + breadth = max(0, int(breadth or 0)) + depth = max(0, int(depth or 0)) + max_parallel_hint = max(0, int(max_parallel_hint or 0)) + + if spawn_count == 0: + r_parallel = 0.0 + r_finish = 0.0 + else: + # Saturating incentive for parallelism so reward cannot grow unbounded with spawns. + r_parallel = math.tanh(spawn_count / 4.0) + if max_parallel_hint > 0: + utilization = min(1.0, spawn_count / max_parallel_hint) + r_parallel *= (0.7 + (0.3 * utilization)) + + r_finish = min(1.0, finished_subtasks / spawn_count) + + if breadth > 0: + breadth_bonus = 0.04 * math.tanh(breadth / 6.0) + else: + breadth_bonus = 0.0 + + if depth > 0: + # Mild depth penalty discourages brittle over-decomposition chains. + depth_penalty = -0.03 * math.tanh(max(0, depth - 1) / 4.0) + else: + depth_penalty = 0.0 + + # Optional latency shaping hook using critical steps (higher is worse). + r_latency = 0.05 * (1.0 / critical_steps) + + auxiliary = ( + (lambda_parallel * r_parallel) + + (lambda_finish * r_finish) + + r_latency + + breadth_bonus + + depth_penalty + ) + total = float(task_outcome_reward) + (anneal * auxiliary) + return PARLRewardBreakdown( + total=total, + auxiliary=anneal * auxiliary, + parallel=r_parallel, + finish=r_finish, + latency=r_latency, + breadth_bonus=breadth_bonus, + depth_penalty=depth_penalty, + ) diff --git a/src/osint_env/eval/__init__.py b/src/osint_env/eval/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..0041203255773471e8b612ada5ed7a53245dac30 --- /dev/null +++ b/src/osint_env/eval/__init__.py @@ -0,0 +1,2 @@ +"""Evaluation package.""" + diff --git a/src/osint_env/eval/leaderboard.py b/src/osint_env/eval/leaderboard.py new file mode 100644 index 0000000000000000000000000000000000000000..b46a6852c736ca6e332366cb5ce77ab45e9f30e3 --- /dev/null +++ b/src/osint_env/eval/leaderboard.py @@ -0,0 +1,83 @@ +from __future__ import annotations + +import json +from datetime import datetime, timezone +from pathlib import Path +from typing import Any + + +def _utc_now() -> str: + return datetime.now(tz=timezone.utc).replace(microsecond=0).isoformat() + + +def load_leaderboard(path: str | Path) -> list[dict[str, Any]]: + file_path = Path(path) + if not file_path.exists(): + return [] + with file_path.open("r", encoding="utf-8") as f: + data = json.load(f) + if not isinstance(data, list): + return [] + return data + + +def save_leaderboard(path: str | Path, records: list[dict[str, Any]]) -> None: + file_path = Path(path) + file_path.parent.mkdir(parents=True, exist_ok=True) + with file_path.open("w", encoding="utf-8") as f: + json.dump(records, f, indent=2, sort_keys=True) + + +def _metric_value(record: dict[str, Any], sort_by: str) -> float: + metrics = record.get("metrics", {}) + return float(metrics.get(sort_by, 0.0)) + + +def sorted_leaderboard(records: list[dict[str, Any]], sort_by: str = "leaderboard_score") -> list[dict[str, Any]]: + return sorted(records, key=lambda r: _metric_value(r, sort_by), reverse=True) + + +def append_leaderboard_record( + path: str | Path, + summary: dict[str, Any], + episodes: int, + run_name: str | None = None, + config: dict[str, Any] | None = None, +) -> dict[str, Any]: + records = load_leaderboard(path) + run_id = f"run_{len(records) + 1:04d}" + record = { + "run_id": run_id, + "run_name": run_name or run_id, + "created_at": _utc_now(), + "episodes": int(episodes), + "config": config or {}, + "metrics": summary, + } + records.append(record) + save_leaderboard(path, records) + return record + + +def render_leaderboard_table(records: list[dict[str, Any]], top_k: int = 20, sort_by: str = "leaderboard_score") -> str: + ranked = sorted_leaderboard(records, sort_by=sort_by)[:top_k] + header = "| rank | run | score | success | graph_f1 | retrieval | structural | spawn | reward | tool_eff |\n" + sep = "|---|---|---:|---:|---:|---:|---:|---:|---:|---:|\n" + rows: list[str] = [] + for idx, rec in enumerate(ranked, start=1): + m = rec.get("metrics", {}) + rows.append( + "| {rank} | {run} | {score:.4f} | {succ:.3f} | {f1:.3f} | {retrieval:.3f} | {structural:.3f} | {spawn:.3f} | {reward:.3f} | {tool:.3f} |".format( + rank=idx, + run=rec.get("run_name", rec.get("run_id", "run")), + score=float(m.get("leaderboard_score", 0.0)), + succ=float(m.get("task_success_rate", 0.0)), + f1=float(m.get("avg_graph_f1", 0.0)), + retrieval=float(m.get("retrieval_signal", 0.0)), + structural=float(m.get("structural_signal", 0.0)), + spawn=float(m.get("spawn_signal", 0.0)), + reward=float(m.get("avg_reward", 0.0)), + tool=float(m.get("tool_efficiency", 0.0)), + ) + ) + return header + sep + "\n".join(rows) diff --git a/src/osint_env/eval/metrics.py b/src/osint_env/eval/metrics.py new file mode 100644 index 0000000000000000000000000000000000000000..e32bf182907847d0b69be77d46ecdfc9df6d0722 --- /dev/null +++ b/src/osint_env/eval/metrics.py @@ -0,0 +1,139 @@ +from __future__ import annotations + +import math +from dataclasses import dataclass, field + + +@dataclass(slots=True) +class EvalMetrics: + episodes: int = 0 + success: int = 0 + total_steps: int = 0 + total_tool_calls: int = 0 + total_redundant_tool_calls: int = 0 + total_reward: float = 0.0 + deanonymization_total: int = 0 + deanonymization_success: int = 0 + graph_f1_scores: list[float] = field(default_factory=list) + total_knowledge_carrier: float = 0.0 + total_knowledge_indexing: float = 0.0 + total_connectivity: float = 0.0 + total_format_reward: float = 0.0 + total_relation_informativeness: float = 0.0 + total_entity_informativeness: float = 0.0 + total_diversity: float = 0.0 + total_soft_shaping: float = 0.0 + total_connectivity_gain: float = 0.0 + total_compactness: float = 0.0 + total_spawn_count: int = 0 + total_spawn_finished_subtasks: int = 0 + total_spawn_critical_steps: int = 0 + + @staticmethod + def _sigmoid_temperature(value: float, temperature: float = 2.0) -> float: + scaled = float(value) / max(1e-6, float(temperature)) + if scaled >= 0: + z = math.exp(-scaled) + return 1.0 / (1.0 + z) + z = math.exp(scaled) + return z / (1.0 + z) + + def add(self, info: dict, task_type: str, graph_f1: float) -> None: + self.episodes += 1 + ok = info.get("agent_answer") == info.get("task_answer") + self.success += int(ok) + self.total_steps += int(info.get("step_count", 0)) + self.total_tool_calls += int(info.get("tool_calls", 0)) + self.total_redundant_tool_calls += int(info.get("redundant_tool_calls", 0)) + self.total_reward += float(info.get("total_reward", 0.0)) + self.graph_f1_scores.append(graph_f1) + components = info.get("reward_components", {}) + self.total_knowledge_carrier += float(components.get("knowledge_carrier", 0.0)) + self.total_knowledge_indexing += float(components.get("knowledge_indexing", 0.0)) + self.total_connectivity += float(components.get("connectivity", 0.0)) + self.total_format_reward += float(components.get("format_reward", 0.0)) + self.total_relation_informativeness += float(components.get("relation_informativeness", 0.0)) + self.total_entity_informativeness += float(components.get("entity_informativeness", 0.0)) + self.total_diversity += float(components.get("diversity", 0.0)) + self.total_soft_shaping += float(components.get("soft_shaping", 0.0)) + self.total_connectivity_gain += float(components.get("connectivity_gain", 0.0)) + self.total_compactness += float(components.get("compactness", 0.0)) + self.total_spawn_count += int(info.get("spawn_count", 0)) + self.total_spawn_finished_subtasks += int(info.get("spawn_finished_subtasks", 0)) + self.total_spawn_critical_steps += int(info.get("spawn_critical_steps", 0)) + if task_type == "identity_resolution": + self.deanonymization_total += 1 + self.deanonymization_success += int(ok) + + def summary(self) -> dict: + episodes = max(1, self.episodes) + task_success_rate = self.success / episodes + tool_efficiency = 1.0 - (self.total_redundant_tool_calls / max(1, self.total_tool_calls)) + avg_graph_f1 = sum(self.graph_f1_scores) / max(1, len(self.graph_f1_scores)) + deanonymization_accuracy = self.deanonymization_success / max(1, self.deanonymization_total) + avg_reward_raw = self.total_reward / episodes + avg_reward = self._sigmoid_temperature(avg_reward_raw, temperature=2.0) + avg_knowledge_carrier = self.total_knowledge_carrier / episodes + avg_knowledge_indexing = self.total_knowledge_indexing / episodes + avg_connectivity = self.total_connectivity / episodes + avg_relation_informativeness = self.total_relation_informativeness / episodes + avg_entity_informativeness = self.total_entity_informativeness / episodes + avg_diversity = self.total_diversity / episodes + avg_soft_shaping = self.total_soft_shaping / episodes + avg_connectivity_gain = self.total_connectivity_gain / episodes + avg_compactness = self.total_compactness / episodes + avg_spawn_count = self.total_spawn_count / episodes + spawn_completion = self.total_spawn_finished_subtasks / max(1, self.total_spawn_count) + avg_spawn_critical_steps = self.total_spawn_critical_steps / episodes + spawn_latency_signal = 1.0 / max(1.0, avg_spawn_critical_steps) + spawn_signal = max(0.0, min(1.0, 0.6 * spawn_completion + 0.4 * spawn_latency_signal)) + + reward_norm = avg_reward + retrieval_signal = max(0.0, min(1.0, 0.5 + 0.35 * avg_knowledge_carrier + 0.35 * avg_knowledge_indexing)) + structural_signal = max( + 0.0, + min( + 1.0, + 0.5 + + 0.25 * avg_connectivity + + 0.20 * avg_relation_informativeness + + 0.20 * avg_entity_informativeness + + 0.15 * avg_diversity + + 0.10 * avg_connectivity_gain, + ), + ) + leaderboard_score = ( + 0.28 * task_success_rate + + 0.20 * avg_graph_f1 + + 0.12 * tool_efficiency + + 0.12 * deanonymization_accuracy + + 0.14 * retrieval_signal + + 0.09 * structural_signal + + 0.05 * reward_norm + + 0.04 * spawn_signal + ) + return { + "task_success_rate": task_success_rate, + "tool_efficiency": tool_efficiency, + "avg_graph_f1": avg_graph_f1, + "avg_steps_to_solution": self.total_steps / episodes, + "deanonymization_accuracy": deanonymization_accuracy, + "avg_reward": avg_reward, + "avg_knowledge_carrier_reward": avg_knowledge_carrier, + "avg_knowledge_indexing_reward": avg_knowledge_indexing, + "avg_connectivity_reward": avg_connectivity, + "avg_format_reward": self.total_format_reward / episodes, + "avg_relation_informativeness_reward": avg_relation_informativeness, + "avg_entity_informativeness_reward": avg_entity_informativeness, + "avg_diversity_reward": avg_diversity, + "avg_soft_shaping_reward": avg_soft_shaping, + "avg_connectivity_gain_reward": avg_connectivity_gain, + "avg_compactness_reward": avg_compactness, + "avg_spawn_count": avg_spawn_count, + "spawn_completion_rate": spawn_completion, + "avg_spawn_critical_steps": avg_spawn_critical_steps, + "spawn_signal": spawn_signal, + "retrieval_signal": retrieval_signal, + "structural_signal": structural_signal, + "leaderboard_score": leaderboard_score, + } diff --git a/src/osint_env/eval/runner.py b/src/osint_env/eval/runner.py new file mode 100644 index 0000000000000000000000000000000000000000..c6c074c23cb741f629e3621d9fc9cdc9f319d6cb --- /dev/null +++ b/src/osint_env/eval/runner.py @@ -0,0 +1,69 @@ +from __future__ import annotations + +from osint_env.agents.single_agent import SingleAgentRunner +from osint_env.agents.swarm_agent import SwarmAgentRunner +from osint_env.env.environment import OSINTEnvironment +from osint_env.env.reward import compute_graph_f1 +from osint_env.eval.metrics import EvalMetrics +from osint_env.llm.interface import LLMClient + + +def run_evaluation( + env: OSINTEnvironment, + episodes: int = 20, + return_details: bool = False, + llm: LLMClient | None = None, +) -> dict: + metrics = EvalMetrics() + if env.config.swarm.enabled: + runner = SwarmAgentRunner(env=env, llm=llm) + else: + runner = SingleAgentRunner(env=env, llm=llm) + episode_rows: list[dict] = [] + for _ in range(episodes): + info = runner.run_episode() + task_type = env.state.task.task_type if env.state else "unknown" + task_id = env.state.task.task_id if env.state else "unknown" + truth = env.state.task.supporting_edges if env.state else [] + pred = env.memory_graph.edges if env.state else [] + graph_f1 = compute_graph_f1(pred, truth) + metrics.add(info, task_type=task_type, graph_f1=graph_f1) + episode_rows.append( + { + "task_id": task_id, + "task_type": task_type, + "question": env.state.task.question if env.state else "", + "task_answer": str(info.get("task_answer", "")), + "agent_answer": str(info.get("agent_answer", "")) if info.get("agent_answer") is not None else "", + "graph_f1": graph_f1, + "reward": float(info.get("total_reward", 0.0)), + "steps": int(info.get("step_count", 0)), + "tool_calls": int(info.get("tool_calls", 0)), + "success": int(info.get("agent_answer") == info.get("task_answer")), + "reward_components": dict(info.get("reward_components", {})), + "spawn_count": int(info.get("spawn_count", 0)), + "spawn_critical_steps": int(info.get("spawn_critical_steps", 0)), + "pred_edges": [ + { + "src": edge.src, + "rel": edge.rel, + "dst": edge.dst, + "confidence": float(edge.confidence), + } + for edge in pred + ], + "truth_edges": [ + { + "src": edge.src, + "rel": edge.rel, + "dst": edge.dst, + "confidence": float(edge.confidence), + } + for edge in truth + ], + } + ) + summary = metrics.summary() + if return_details: + return {"summary": summary, "episodes": episode_rows} + return summary diff --git a/src/osint_env/llm/__init__.py b/src/osint_env/llm/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..2980377f05c73f88682c51811fb9871602f9a638 --- /dev/null +++ b/src/osint_env/llm/__init__.py @@ -0,0 +1,20 @@ +"""LLM interface package.""" + +from osint_env.llm.interface import ( + LLMClient, + LLMResponse, + OllamaLLMClient, + OpenAILLMClient, + RuleBasedMockLLM, + build_llm_client, +) + +__all__ = [ + "LLMClient", + "LLMResponse", + "RuleBasedMockLLM", + "OllamaLLMClient", + "OpenAILLMClient", + "build_llm_client", +] + diff --git a/src/osint_env/llm/interface.py b/src/osint_env/llm/interface.py new file mode 100644 index 0000000000000000000000000000000000000000..e223e1a3873d094b62789c4c45e7f0956c419d33 --- /dev/null +++ b/src/osint_env/llm/interface.py @@ -0,0 +1,167 @@ +from __future__ import annotations + +import json +import os +from dataclasses import dataclass +from typing import Any, Protocol + +import requests +from requests import RequestException + +from osint_env.domain.models import LLMConfig + + +@dataclass(slots=True) +class LLMResponse: + content: str + tool_calls: list[dict[str, Any]] + + +class LLMClient(Protocol): + def generate(self, messages: list[dict[str, Any]], tools: list[dict[str, Any]]) -> LLMResponse: + ... + + +class RuleBasedMockLLM: + """Deterministic fallback for local testing without model dependencies.""" + + def generate(self, messages: list[dict[str, Any]], tools: list[dict[str, Any]]) -> LLMResponse: + question = "" + for m in reversed(messages): + if m.get("role") == "system" and "question" in m.get("content", ""): + question = m["content"] + break + if "alias" in question: + return LLMResponse( + content="Need alias lookup.", + tool_calls=[{"tool_name": "search_posts", "args": {"query": "Update"}}, {"tool_name": "get_profile", "args": {"user_id": "user_0"}}], + ) + return LLMResponse(content="Need profile lookup.", tool_calls=[{"tool_name": "search_people", "args": {"org": "Apex"}}]) + + +class OllamaLLMClient: + def __init__(self, model: str, base_url: str = "http://127.0.0.1:11434", temperature: float = 0.1, timeout_seconds: int = 240): + self.model = model + self.base_url = base_url.rstrip("/") + self.temperature = float(temperature) + self.timeout_seconds = int(timeout_seconds) + + @staticmethod + def _extract_tool_calls(content: str) -> list[dict[str, Any]]: + text = str(content or "").strip() + if not text: + return [] + left = text.find("{") + right = text.rfind("}") + if left >= 0 and right > left: + snippet = text[left : right + 1] + try: + parsed = json.loads(snippet) + except json.JSONDecodeError: + parsed = None + if isinstance(parsed, dict) and isinstance(parsed.get("tool_calls"), list): + out: list[dict[str, Any]] = [] + for item in parsed["tool_calls"]: + if isinstance(item, dict) and "tool_name" in item and isinstance(item.get("args", {}), dict): + out.append({"tool_name": str(item["tool_name"]), "args": dict(item.get("args", {}))}) + return out + return [] + + def generate(self, messages: list[dict[str, Any]], tools: list[dict[str, Any]]) -> LLMResponse: + payload = { + "model": self.model, + "messages": messages, + "stream": False, + "options": { + "temperature": self.temperature, + }, + } + if tools: + payload["tools"] = tools + try: + response = requests.post( + f"{self.base_url}/api/chat", + json=payload, + timeout=self.timeout_seconds, + ) + response.raise_for_status() + data = response.json() + content = str((data.get("message") or {}).get("content", "")) + tool_calls = self._extract_tool_calls(content) + return LLMResponse(content=content, tool_calls=tool_calls) + except (RequestException, ValueError): + # Keep episode execution resilient when local model calls are transiently slow/unavailable. + return LLMResponse(content="", tool_calls=[]) + + +class OpenAILLMClient: + def __init__( + self, + model: str, + api_key: str, + base_url: str = "https://api.openai.com/v1", + temperature: float = 0.1, + max_tokens: int = 256, + timeout_seconds: int = 240, + ): + from openai import OpenAI + + self.model = model + self.temperature = float(temperature) + self.max_tokens = int(max_tokens) + self.client = OpenAI(api_key=api_key, base_url=base_url, timeout=timeout_seconds) + + def generate(self, messages: list[dict[str, Any]], tools: list[dict[str, Any]]) -> LLMResponse: + kwargs: dict[str, Any] = { + "model": self.model, + "messages": messages, + "temperature": self.temperature, + "max_tokens": self.max_tokens, + } + if tools: + kwargs["tools"] = tools + try: + completion = self.client.chat.completions.create(**kwargs) + message = completion.choices[0].message + content = message.content if isinstance(message.content, str) else "" + + tool_calls: list[dict[str, Any]] = [] + for tc in message.tool_calls or []: + try: + args = json.loads(tc.function.arguments or "{}") + except json.JSONDecodeError: + args = {} + tool_calls.append({"tool_name": tc.function.name, "args": args if isinstance(args, dict) else {}}) + return LLMResponse(content=content, tool_calls=tool_calls) + except Exception: + return LLMResponse(content="", tool_calls=[]) + + +def build_llm_client(config: LLMConfig | None = None) -> LLMClient: + cfg = config or LLMConfig() + provider = str(cfg.provider).strip().lower() + if provider in {"", "mock", "rule", "rule_based"}: + return RuleBasedMockLLM() + if provider == "ollama": + return OllamaLLMClient( + model=cfg.model, + base_url=cfg.ollama_base_url, + temperature=cfg.temperature, + timeout_seconds=cfg.timeout_seconds, + ) + if provider == "openai": + api_key = cfg.openai_api_key or os.getenv(cfg.openai_api_key_env, "") + if not api_key: + raise ValueError( + "OpenAI provider selected but API key is missing. " + f"Set {cfg.openai_api_key_env} or populate openai_api_key in config." + ) + return OpenAILLMClient( + model=cfg.model, + api_key=api_key, + base_url=cfg.openai_base_url, + temperature=cfg.temperature, + max_tokens=cfg.max_tokens, + timeout_seconds=cfg.timeout_seconds, + ) + raise ValueError(f"Unsupported llm provider: {cfg.provider}") diff --git a/src/osint_env/memory/__init__.py b/src/osint_env/memory/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..519788dfb2a8240a0d4f54981c2c65c6e71d0df8 --- /dev/null +++ b/src/osint_env/memory/__init__.py @@ -0,0 +1,2 @@ +"""Structured memory package.""" + diff --git a/src/osint_env/memory/store.py b/src/osint_env/memory/store.py new file mode 100644 index 0000000000000000000000000000000000000000..780f46087c8b7ea165b777a0fc61a4c8b7f613cb --- /dev/null +++ b/src/osint_env/memory/store.py @@ -0,0 +1,58 @@ +from __future__ import annotations + +import math +import re +from collections import Counter +from dataclasses import dataclass, field +from typing import Any + +from osint_env.domain.models import Edge + + +def _tokenize(text: str) -> list[str]: + return [t for t in re.findall(r"[a-zA-Z0-9_]+", text.lower()) if t] + + +@dataclass(slots=True) +class MemoryGraph: + nodes: dict[str, dict[str, Any]] = field(default_factory=dict) + edges: list[Edge] = field(default_factory=list) + + def add_edge(self, edge: Edge) -> bool: + key = (edge.src, edge.rel, edge.dst) + if any((e.src, e.rel, e.dst) == key for e in self.edges): + return False + self.edges.append(edge) + return True + + def to_snapshot(self) -> dict[str, Any]: + return { + "nodes_count": len(self.nodes), + "edges_count": len(self.edges), + "edges": [{"src": e.src, "rel": e.rel, "dst": e.dst, "confidence": e.confidence} for e in self.edges], + } + + +@dataclass(slots=True) +class SemanticMemory: + docs: list[dict[str, Any]] = field(default_factory=list) + + def add(self, text: str, metadata: dict[str, Any]) -> None: + self.docs.append({"text": text, "metadata": metadata, "tokens": Counter(_tokenize(text))}) + + def search(self, query: str, k: int = 5) -> list[dict[str, Any]]: + q = Counter(_tokenize(query)) + scored: list[tuple[float, dict[str, Any]]] = [] + for doc in self.docs: + score = self._cosine(q, doc["tokens"]) + if score > 0: + scored.append((score, doc)) + scored.sort(key=lambda x: x[0], reverse=True) + return [{"score": s, "text": d["text"], "metadata": d["metadata"]} for s, d in scored[:k]] + + @staticmethod + def _cosine(a: Counter, b: Counter) -> float: + common = set(a) & set(b) + num = sum(a[t] * b[t] for t in common) + den = math.sqrt(sum(v * v for v in a.values())) * math.sqrt(sum(v * v for v in b.values())) + return (num / den) if den else 0.0 diff --git a/src/osint_env/platforms/__init__.py b/src/osint_env/platforms/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..cc16c8fc0fb7ab3780746c358de603a3ff5eb1d6 --- /dev/null +++ b/src/osint_env/platforms/__init__.py @@ -0,0 +1,2 @@ +"""Platform and tool adapters.""" + diff --git a/src/osint_env/platforms/tools.py b/src/osint_env/platforms/tools.py new file mode 100644 index 0000000000000000000000000000000000000000..d547fe4383f0019bde331186486fc22549cd6651 --- /dev/null +++ b/src/osint_env/platforms/tools.py @@ -0,0 +1,168 @@ +from __future__ import annotations + +from collections import defaultdict +from typing import Any + +from osint_env.data.generator import PlatformViews + + +class ToolRegistry: + def __init__(self, views: PlatformViews): + self.views = views + self.alias_lookup = dict(getattr(views, "alias_lookup", {})) + self._index() + + @staticmethod + def _normalize_lookup_token(value: str) -> str: + token = str(value or "").strip().lower() + for prefix in ("org_", "loc_", "event_", "post_", "thr_", "thread_", "alias_", "user_"): + if token.startswith(prefix): + token = token[len(prefix) :] + break + return token.replace("_", " ") + + def _resolve_user_ids(self, user_id: str) -> list[str]: + user_id = str(user_id or "").strip() + if not user_id: + return [] + resolved = [user_id] + canonical = self.alias_lookup.get(user_id) + if canonical and canonical not in resolved: + resolved.append(canonical) + for alias_id, owner in self.alias_lookup.items(): + if owner == user_id and alias_id not in resolved: + resolved.append(alias_id) + return resolved + + def _index(self) -> None: + self.posts_by_user: dict[str, list[dict[str, Any]]] = defaultdict(list) + self.mentions_by_user: dict[str, list[dict[str, Any]]] = defaultdict(list) + self.posts_by_id = {post["post_id"]: post for post in self.views.microblog_posts} + for post in self.views.microblog_posts: + self.posts_by_user[post["user_id"]].append(post) + canonical_user = post.get("canonical_user") + if canonical_user: + self.posts_by_user[canonical_user].append(post) + for m in post.get("mentions", []): + self.mentions_by_user[m].append(post) + + self.threads_by_id = {t["thread_id"]: t for t in self.views.forum_threads} + self.activity_by_user: dict[str, list[dict[str, Any]]] = defaultdict(list) + for thread in self.views.forum_threads: + self.activity_by_user[thread["author_id"]].append({"kind": "thread", "thread_id": thread["thread_id"]}) + for c in thread.get("comments", []): + self.activity_by_user[c["user_id"]].append({"kind": "comment", "thread_id": thread["thread_id"]}) + + self.profiles_by_user = {p["user_id"]: p for p in self.views.profiles} + + def call(self, tool_name: str, args: dict[str, Any]) -> dict[str, Any]: + fn = getattr(self, tool_name, None) + if not fn: + raise ValueError(f"Unknown tool: {tool_name}") + return fn(**args) + + def search_posts(self, query: str, time_range: tuple[int, int] | None = None) -> dict[str, Any]: + start, end = time_range or (0, 10**9) + needle = str(query or "").lower() + results = [ + p + for p in self.views.microblog_posts + if start <= p["timestamp"] <= end + and ( + needle in p["text"].lower() + or needle in str(p.get("post_id", "")).lower() + or needle in str(p.get("user_id", "")).lower() + or needle in str(p.get("canonical_user", "")).lower() + or any(needle in str(ref).lower() for ref in p.get("references", [])) + or any(needle in str(ref).lower() for ref in p.get("reference_names", [])) + ) + ] + return {"results": results[:20], "count": len(results)} + + def get_post(self, post_id: str) -> dict[str, Any]: + post = self.posts_by_id.get(post_id) + return {"result": post, "found": post is not None} + + def get_user_posts(self, user_id: str) -> dict[str, Any]: + results: list[dict[str, Any]] = [] + seen_post_ids: set[str] = set() + for resolved_id in self._resolve_user_ids(user_id): + for post in self.posts_by_user.get(resolved_id, []): + post_id = str(post.get("post_id", "")) + if post_id in seen_post_ids: + continue + seen_post_ids.add(post_id) + results.append(post) + return {"results": results, "count": len(results)} + + def get_mentions(self, user_id: str) -> dict[str, Any]: + results: list[dict[str, Any]] = [] + seen_post_ids: set[str] = set() + for resolved_id in self._resolve_user_ids(user_id): + for post in self.mentions_by_user.get(resolved_id, []): + post_id = str(post.get("post_id", "")) + if post_id in seen_post_ids: + continue + seen_post_ids.add(post_id) + results.append(post) + return {"results": results, "count": len(results)} + + def search_threads(self, topic: str) -> dict[str, Any]: + needle = str(topic or "").strip().lower() + results = [ + t + for t in self.views.forum_threads + if t["topic"] == topic + or needle in str(t.get("thread_id", "")).lower() + or needle in str(t.get("title", "")).lower() + ] + return {"results": results[:20], "count": len(results)} + + def get_thread(self, thread_id: str) -> dict[str, Any]: + thread = self.threads_by_id.get(thread_id) + return {"result": thread, "found": thread is not None} + + def get_user_activity(self, user_id: str) -> dict[str, Any]: + acts: list[dict[str, Any]] = [] + seen = set() + for resolved_id in self._resolve_user_ids(user_id): + for activity in self.activity_by_user.get(resolved_id, []): + key = (activity.get("kind"), activity.get("thread_id")) + if key in seen: + continue + seen.add(key) + acts.append(activity) + return {"results": acts, "count": len(acts)} + + def get_profile(self, user_id: str) -> dict[str, Any]: + resolved_ids = self._resolve_user_ids(user_id) + profile = next((self.profiles_by_user.get(candidate) for candidate in resolved_ids if self.profiles_by_user.get(candidate)), None) + return {"result": profile, "found": profile is not None} + + def search_people(self, name: str | None = None, org: str | None = None) -> dict[str, Any]: + results = self.views.profiles + if name: + name_query = str(name).lower() + results = [ + p + for p in results + if name_query in p["name"].lower() + or name_query in p["user_id"].lower() + or any(name_query in alias.lower() for alias in p.get("alias_ids", [])) + ] + if org: + org_query = str(org).lower() + normalized_org = self._normalize_lookup_token(org_query) + results = [ + p + for p in results + if org_query in p["org"].lower() + or org_query in str(p.get("org_id", "")).lower() + or (normalized_org and normalized_org in p["org"].lower()) + ] + return {"results": results[:20], "count": len(results)} + + def get_connections(self, user_id: str) -> dict[str, Any]: + resolved_ids = self._resolve_user_ids(user_id) + profile = next((self.profiles_by_user.get(candidate) for candidate in resolved_ids if self.profiles_by_user.get(candidate)), None) + return {"results": profile["connections"] if profile else [], "count": len(profile["connections"]) if profile else 0} diff --git a/src/osint_env/server_entry.py b/src/osint_env/server_entry.py new file mode 100644 index 0000000000000000000000000000000000000000..928272ed0d02181bac278da9734db57cd41cf85c --- /dev/null +++ b/src/osint_env/server_entry.py @@ -0,0 +1,10 @@ +from __future__ import annotations + +import os + +import uvicorn + + +def main() -> None: + port = int(os.getenv("PORT", "7860")) + uvicorn.run("server:app", host="0.0.0.0", port=port) diff --git a/src/osint_env/training/__init__.py b/src/osint_env/training/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..841f5d5b96e4b462a01b191f5748d390683bbbbf --- /dev/null +++ b/src/osint_env/training/__init__.py @@ -0,0 +1,27 @@ +"""Adversarial self-play training helpers.""" + +from osint_env.training.config import ( + GeneratorRewardWeights, + KimiGRPOPhaseConfig, + LoraTuningConfig, + SelfPlayTrainingConfig, + SwarmV2Config, + SwarmV2SharedContextConfig, + SwarmV2SwarmConfig, + SwarmV2ValidationConfig, + load_self_play_config, +) +from osint_env.training.self_play import run_adversarial_self_play + +__all__ = [ + "GeneratorRewardWeights", + "KimiGRPOPhaseConfig", + "LoraTuningConfig", + "SelfPlayTrainingConfig", + "SwarmV2Config", + "SwarmV2SharedContextConfig", + "SwarmV2SwarmConfig", + "SwarmV2ValidationConfig", + "load_self_play_config", + "run_adversarial_self_play", +] diff --git a/src/osint_env/training/config.py b/src/osint_env/training/config.py new file mode 100644 index 0000000000000000000000000000000000000000..6be61f8ffa195d0391602b031d4dca1b98e4065b --- /dev/null +++ b/src/osint_env/training/config.py @@ -0,0 +1,428 @@ +from __future__ import annotations + +import json +from dataclasses import dataclass, field +from pathlib import Path +from typing import Any + + +@dataclass(slots=True) +class KimiGRPOPhaseConfig: + """Configuration for one GRPO phase in the alternating self-play loop.""" + + model_name_or_path: str = "Qwen/Qwen2.5-0.5B-Instruct" + learning_rate: float = 1e-6 + max_steps: int = 64 + per_device_train_batch_size: int = 2 + gradient_accumulation_steps: int = 4 + num_generations: int = 4 + max_completion_length: int = 256 + temperature: float = 1.0 + top_p: float = 1.0 + beta: float = 0.01 + epsilon: float = 0.2 + num_iterations: int = 1 + loss_type: str = "dapo" + scale_rewards: str = "none" + logging_steps: int = 10 + save_steps: int = 50 + output_subdir: str = "phase" + use_vllm: bool = False + vllm_mode: str = "colocate" + + +@dataclass(slots=True) +class GeneratorRewardWeights: + """Weighted components for adversarial task-generator reward.""" + + validity: float = 0.35 + hardness: float = 0.45 + diversity: float = 0.10 + consistency: float = 0.10 + + +@dataclass(slots=True) +class LoraTuningConfig: + """LoRA hyperparameters for parameter-efficient GRPO updates.""" + + r: int = 16 + alpha: int = 32 + dropout: float = 0.05 + target_modules: list[str] = field(default_factory=lambda: ["q_proj", "k_proj", "v_proj", "o_proj"]) + bias: str = "none" + task_type: str = "CAUSAL_LM" + + +@dataclass(slots=True) +class SwarmV2SwarmConfig: + """Config for one orchestrated swarm role inside the swarm_v2 pipeline.""" + + shared_context: bool = True + max_agents: int = 4 + max_breadth: int = 3 + max_depth: int = 2 + planner_rounds: int = 2 + tools_per_agent: int = 2 + + +@dataclass(slots=True) +class SwarmV2ValidationConfig: + """Validation and replay limits for swarm_v2 task generation.""" + + max_support_edges: int = 8 + max_path_hops: int = 4 + max_context_nodes: int = 14 + max_context_edges: int = 8 + duplicate_similarity_threshold: float = 0.8 + + +@dataclass(slots=True) +class SwarmV2SharedContextConfig: + """Shared context budgets used by both generator and answerer swarms.""" + + shared_by_default: bool = True + max_nodes: int = 14 + max_edges: int = 8 + target_pressure: float = 0.85 + + +@dataclass(slots=True) +class SwarmV2Config: + """Config block for the config-gated Swarm Self-Play v2 pipeline.""" + + generator_swarm: SwarmV2SwarmConfig = field(default_factory=SwarmV2SwarmConfig) + answerer_swarm: SwarmV2SwarmConfig = field( + default_factory=lambda: SwarmV2SwarmConfig( + shared_context=True, + max_agents=3, + max_breadth=2, + max_depth=2, + planner_rounds=2, + tools_per_agent=2, + ) + ) + validation: SwarmV2ValidationConfig = field(default_factory=SwarmV2ValidationConfig) + shared_context: SwarmV2SharedContextConfig = field(default_factory=SwarmV2SharedContextConfig) + + +@dataclass(slots=True) +class SelfPlayTrainingConfig: + """Top-level adversarial self-play training configuration.""" + + rounds: int = 3 + output_dir: str = "artifacts/self_play" + dry_run: bool = True + wandb_enabled: bool = False + wandb_project: str = "osint-self-play" + wandb_entity: str = "" + wandb_run_name_prefix: str = "self-play" + canonical_graph_mode: str = "generate" + pipeline_mode: str = "legacy" + model_topology: str = "dual" + phase_schedule: str = "generator_answerer" + tuning_mode: str = "full" + shared_model_name_or_path: str = "" + seed_tasks_per_round: int = 16 + generated_tasks_per_round: int = 24 + generator_prompts_per_round: int = 24 + max_graph_context_nodes: int = 100 + max_graph_context_edges: int = 100 + max_support_edges: int = 8 + answerer_judge_max_new_tokens: int = 48 + generator_reward_weights: GeneratorRewardWeights = field(default_factory=GeneratorRewardWeights) + lora: LoraTuningConfig = field(default_factory=LoraTuningConfig) + swarm_v2: SwarmV2Config = field(default_factory=SwarmV2Config) + generator_phase: KimiGRPOPhaseConfig = field( + default_factory=lambda: KimiGRPOPhaseConfig(output_subdir="generator") + ) + answerer_phase: KimiGRPOPhaseConfig = field( + default_factory=lambda: KimiGRPOPhaseConfig(output_subdir="answerer") + ) + + +def _as_dict(value: Any) -> dict[str, Any]: + return value if isinstance(value, dict) else {} + + +def _parse_int(value: Any, default: int, floor: int | None = None) -> int: + try: + out = int(value) + except (TypeError, ValueError): + out = default + if floor is not None: + out = max(floor, out) + return out + + +def _parse_float(value: Any, default: float) -> float: + try: + return float(value) + except (TypeError, ValueError): + return default + + +def _parse_bool(value: Any, default: bool) -> bool: + if isinstance(value, bool): + return value + if isinstance(value, str): + token = value.strip().lower() + if token in {"1", "true", "yes", "y", "on"}: + return True + if token in {"0", "false", "no", "n", "off"}: + return False + return default + + +def _parse_str_choice(value: Any, default: str, allowed: set[str]) -> str: + token = str(value).strip().lower() + if token in allowed: + return token + return default + + +def _parse_str_list(value: Any, fallback: list[str]) -> list[str]: + if isinstance(value, list): + out = [str(item).strip() for item in value if str(item).strip()] + return out or list(fallback) + if isinstance(value, str): + out = [item.strip() for item in value.split(",") if item.strip()] + return out or list(fallback) + return list(fallback) + + +def _parse_phase(data: dict[str, Any], fallback: KimiGRPOPhaseConfig) -> KimiGRPOPhaseConfig: + return KimiGRPOPhaseConfig( + model_name_or_path=str(data.get("model_name_or_path", fallback.model_name_or_path)).strip() + or fallback.model_name_or_path, + learning_rate=_parse_float(data.get("learning_rate"), fallback.learning_rate), + max_steps=_parse_int(data.get("max_steps"), fallback.max_steps, floor=1), + per_device_train_batch_size=_parse_int( + data.get("per_device_train_batch_size"), + fallback.per_device_train_batch_size, + floor=1, + ), + gradient_accumulation_steps=_parse_int( + data.get("gradient_accumulation_steps"), + fallback.gradient_accumulation_steps, + floor=1, + ), + num_generations=_parse_int(data.get("num_generations"), fallback.num_generations, floor=1), + max_completion_length=_parse_int( + data.get("max_completion_length"), + fallback.max_completion_length, + floor=1, + ), + temperature=_parse_float(data.get("temperature"), fallback.temperature), + top_p=_parse_float(data.get("top_p"), fallback.top_p), + beta=_parse_float(data.get("beta"), fallback.beta), + epsilon=_parse_float(data.get("epsilon"), fallback.epsilon), + num_iterations=_parse_int(data.get("num_iterations"), fallback.num_iterations, floor=1), + loss_type=str(data.get("loss_type", fallback.loss_type)).strip() or fallback.loss_type, + scale_rewards=str(data.get("scale_rewards", fallback.scale_rewards)).strip() or fallback.scale_rewards, + logging_steps=_parse_int(data.get("logging_steps"), fallback.logging_steps, floor=1), + save_steps=_parse_int(data.get("save_steps"), fallback.save_steps, floor=1), + output_subdir=str(data.get("output_subdir", fallback.output_subdir)).strip() or fallback.output_subdir, + use_vllm=_parse_bool(data.get("use_vllm"), fallback.use_vllm), + vllm_mode=str(data.get("vllm_mode", fallback.vllm_mode)).strip() or fallback.vllm_mode, + ) + + +def _parse_generator_weights(data: dict[str, Any]) -> GeneratorRewardWeights: + return GeneratorRewardWeights( + validity=_parse_float(data.get("validity"), 0.35), + hardness=_parse_float(data.get("hardness"), 0.45), + diversity=_parse_float(data.get("diversity"), 0.10), + consistency=_parse_float(data.get("consistency"), 0.10), + ) + + +def _parse_lora_config(data: dict[str, Any], fallback: LoraTuningConfig) -> LoraTuningConfig: + return LoraTuningConfig( + r=_parse_int(data.get("r"), fallback.r, floor=1), + alpha=_parse_int(data.get("alpha"), fallback.alpha, floor=1), + dropout=_parse_float(data.get("dropout"), fallback.dropout), + target_modules=_parse_str_list(data.get("target_modules"), fallback.target_modules), + bias=str(data.get("bias", fallback.bias)).strip() or fallback.bias, + task_type=str(data.get("task_type", fallback.task_type)).strip() or fallback.task_type, + ) + + +def _parse_swarm_v2_swarm_config( + data: dict[str, Any], + fallback: SwarmV2SwarmConfig, +) -> SwarmV2SwarmConfig: + return SwarmV2SwarmConfig( + shared_context=_parse_bool(data.get("shared_context"), fallback.shared_context), + max_agents=_parse_int(data.get("max_agents"), fallback.max_agents, floor=1), + max_breadth=_parse_int(data.get("max_breadth"), fallback.max_breadth, floor=1), + max_depth=_parse_int(data.get("max_depth"), fallback.max_depth, floor=1), + planner_rounds=_parse_int(data.get("planner_rounds"), fallback.planner_rounds, floor=1), + tools_per_agent=_parse_int(data.get("tools_per_agent"), fallback.tools_per_agent, floor=1), + ) + + +def _parse_swarm_v2_validation_config( + data: dict[str, Any], + fallback: SwarmV2ValidationConfig, + legacy_max_support_edges: int, +) -> SwarmV2ValidationConfig: + default_max_support_edges = ( + _parse_int(data.get("max_support_edges"), legacy_max_support_edges, floor=1) + if "max_support_edges" not in data + else _parse_int(data.get("max_support_edges"), fallback.max_support_edges, floor=1) + ) + return SwarmV2ValidationConfig( + max_support_edges=default_max_support_edges, + max_path_hops=_parse_int(data.get("max_path_hops"), fallback.max_path_hops, floor=1), + max_context_nodes=_parse_int(data.get("max_context_nodes"), fallback.max_context_nodes, floor=1), + max_context_edges=_parse_int(data.get("max_context_edges"), fallback.max_context_edges, floor=1), + duplicate_similarity_threshold=max( + 0.0, + min( + 1.0, + _parse_float( + data.get("duplicate_similarity_threshold"), + fallback.duplicate_similarity_threshold, + ), + ), + ), + ) + + +def _parse_swarm_v2_shared_context_config( + data: dict[str, Any], + fallback: SwarmV2SharedContextConfig, +) -> SwarmV2SharedContextConfig: + return SwarmV2SharedContextConfig( + shared_by_default=_parse_bool(data.get("shared_by_default"), fallback.shared_by_default), + max_nodes=_parse_int(data.get("max_nodes"), fallback.max_nodes, floor=1), + max_edges=_parse_int(data.get("max_edges"), fallback.max_edges, floor=1), + target_pressure=max(0.0, min(1.0, _parse_float(data.get("target_pressure"), fallback.target_pressure))), + ) + + +def _parse_swarm_v2_config( + data: dict[str, Any], + fallback: SwarmV2Config, + legacy_max_support_edges: int, +) -> SwarmV2Config: + return SwarmV2Config( + generator_swarm=_parse_swarm_v2_swarm_config( + _as_dict(data.get("generator_swarm")), + fallback.generator_swarm, + ), + answerer_swarm=_parse_swarm_v2_swarm_config( + _as_dict(data.get("answerer_swarm")), + fallback.answerer_swarm, + ), + validation=_parse_swarm_v2_validation_config( + _as_dict(data.get("validation")), + fallback.validation, + legacy_max_support_edges=legacy_max_support_edges, + ), + shared_context=_parse_swarm_v2_shared_context_config( + _as_dict(data.get("shared_context")), + fallback.shared_context, + ), + ) + + +def load_self_play_config(path: str | Path | None) -> SelfPlayTrainingConfig: + if not path: + return SelfPlayTrainingConfig() + + file_path = Path(path) + if not file_path.exists(): + return SelfPlayTrainingConfig() + + payload = json.loads(file_path.read_text(encoding="utf-8")) + if not isinstance(payload, dict): + raise ValueError("Self-play config file must contain a JSON object.") + + defaults = SelfPlayTrainingConfig() + generator_phase = _parse_phase(_as_dict(payload.get("generator_phase")), defaults.generator_phase) + answerer_phase = _parse_phase(_as_dict(payload.get("answerer_phase")), defaults.answerer_phase) + lora_cfg = _parse_lora_config(_as_dict(payload.get("lora")), defaults.lora) + legacy_max_support_edges = _parse_int(payload.get("max_support_edges"), defaults.max_support_edges, floor=1) + swarm_v2_cfg = _parse_swarm_v2_config( + _as_dict(payload.get("swarm_v2")), + defaults.swarm_v2, + legacy_max_support_edges=legacy_max_support_edges, + ) + + return SelfPlayTrainingConfig( + rounds=_parse_int(payload.get("rounds"), defaults.rounds, floor=1), + output_dir=str(payload.get("output_dir", defaults.output_dir)).strip() or defaults.output_dir, + dry_run=_parse_bool(payload.get("dry_run"), defaults.dry_run), + wandb_enabled=_parse_bool(payload.get("wandb_enabled"), defaults.wandb_enabled), + wandb_project=str(payload.get("wandb_project", defaults.wandb_project)).strip() or defaults.wandb_project, + wandb_entity=str(payload.get("wandb_entity", defaults.wandb_entity)).strip(), + wandb_run_name_prefix=str(payload.get("wandb_run_name_prefix", defaults.wandb_run_name_prefix)).strip() + or defaults.wandb_run_name_prefix, + canonical_graph_mode=_parse_str_choice( + payload.get("canonical_graph_mode"), + defaults.canonical_graph_mode, + {"generate", "fixed"}, + ), + pipeline_mode=_parse_str_choice( + payload.get("pipeline_mode"), + defaults.pipeline_mode, + {"legacy", "swarm_v2"}, + ), + model_topology=_parse_str_choice( + payload.get("model_topology"), + defaults.model_topology, + {"dual", "shared"}, + ), + phase_schedule=_parse_str_choice( + payload.get("phase_schedule"), + defaults.phase_schedule, + {"generator_answerer", "answerer_generator_answerer"}, + ), + tuning_mode=_parse_str_choice( + payload.get("tuning_mode"), + defaults.tuning_mode, + {"full", "lora"}, + ), + shared_model_name_or_path=str( + payload.get("shared_model_name_or_path", defaults.shared_model_name_or_path) + ).strip(), + seed_tasks_per_round=_parse_int( + payload.get("seed_tasks_per_round"), + defaults.seed_tasks_per_round, + floor=1, + ), + generated_tasks_per_round=_parse_int( + payload.get("generated_tasks_per_round"), + defaults.generated_tasks_per_round, + floor=1, + ), + generator_prompts_per_round=_parse_int( + payload.get("generator_prompts_per_round"), + defaults.generator_prompts_per_round, + floor=1, + ), + max_graph_context_nodes=_parse_int( + payload.get("max_graph_context_nodes"), + defaults.max_graph_context_nodes, + floor=1, + ), + max_graph_context_edges=_parse_int( + payload.get("max_graph_context_edges"), + defaults.max_graph_context_edges, + floor=1, + ), + max_support_edges=legacy_max_support_edges, + answerer_judge_max_new_tokens=_parse_int( + payload.get("answerer_judge_max_new_tokens"), + defaults.answerer_judge_max_new_tokens, + floor=1, + ), + generator_reward_weights=_parse_generator_weights( + _as_dict(payload.get("generator_reward_weights")) + ), + lora=lora_cfg, + swarm_v2=swarm_v2_cfg, + generator_phase=generator_phase, + answerer_phase=answerer_phase, + ) diff --git a/src/osint_env/training/rewards.py b/src/osint_env/training/rewards.py new file mode 100644 index 0000000000000000000000000000000000000000..aca22695cf5826a8b8fa272356da459c21fb4f55 --- /dev/null +++ b/src/osint_env/training/rewards.py @@ -0,0 +1,959 @@ +from __future__ import annotations + +import json +import re +from collections import Counter +from dataclasses import dataclass, field +from functools import lru_cache +from typing import Any + +from osint_env.data.generator import ( + emit_swarm_v2_question, + enumerate_swarm_v2_neighbors, + select_swarm_v2_answer, + trace_swarm_v2_path, +) +from osint_env.domain.models import CanonicalGraph, Edge, TaskInstance +from osint_env.env.reward import build_reward_model, compute_answer_reward +from osint_env.env.spawn_reward_hooks import parl_reward_breakdown +from osint_env.training.config import ( + GeneratorRewardWeights, + SwarmV2SharedContextConfig, + SwarmV2ValidationConfig, +) + + +def decode_completion_text(completion: Any) -> str: + if isinstance(completion, str): + return completion + if isinstance(completion, list): + parts: list[str] = [] + for item in completion: + if isinstance(item, str): + parts.append(item) + elif isinstance(item, dict): + parts.append(str(item.get("content", ""))) + return "\n".join(part for part in parts if part) + if isinstance(completion, dict): + return str(completion.get("content", "")) + return str(completion) + + +def _extract_json_blob(text: str) -> Any: + candidate = str(text or "").strip() + if not candidate: + return None + left = candidate.find("{") + right = candidate.rfind("}") + if left >= 0 and right > left: + snippet = candidate[left : right + 1] + try: + return json.loads(snippet) + except json.JSONDecodeError: + return None + return None + + +def normalize_answer(text: str) -> str: + value = str(text or "").strip() + value = value.strip('"').strip("'") + value = re.sub(r"\s+", " ", value) + value = value.rstrip(".\n ") + return value + + +def extract_answer_from_completion(completion_text: str) -> str: + blob = _extract_json_blob(completion_text) + if isinstance(blob, dict): + answer = str(blob.get("answer", "")).strip() + if answer: + return normalize_answer(answer) + + match = re.search(r"answer\s*[:=]\s*(.+)", completion_text, flags=re.IGNORECASE) + if match: + return normalize_answer(match.group(1)) + + lines = [line.strip() for line in completion_text.splitlines() if line.strip()] + if not lines: + return "" + return normalize_answer(lines[-1]) + + +@dataclass(slots=True) +class SwarmReplayToolCall: + tool_name: str + args: dict[str, Any] = field(default_factory=dict) + output: dict[str, Any] = field(default_factory=dict) + + +@dataclass(slots=True) +class SwarmOrchestratorTelemetry: + spawn_count: int = 0 + finished_subtasks: int = 0 + critical_steps: int = 1 + breadth: int = 0 + depth: int = 0 + + +@dataclass(slots=True) +class ReplayValidationResult: + is_valid: bool + reasons: list[str] = field(default_factory=list) + duplicate_similarity: float = 0.0 + context_nodes: int = 0 + context_edges: int = 0 + unique_path_count: int = 0 + replayed_question: str = "" + replayed_answer: str = "" + replayed_edges: list[Edge] = field(default_factory=list) + + def to_dict(self) -> dict[str, Any]: + return { + "is_valid": self.is_valid, + "reasons": list(self.reasons), + "duplicate_similarity": float(self.duplicate_similarity), + "context_nodes": int(self.context_nodes), + "context_edges": int(self.context_edges), + "unique_path_count": int(self.unique_path_count), + "replayed_question": self.replayed_question, + "replayed_answer": self.replayed_answer, + "replayed_edges": [ + { + "src": edge.src, + "rel": edge.rel, + "dst": edge.dst, + "confidence": float(edge.confidence), + } + for edge in self.replayed_edges + ], + } + + +def _parse_edge_rows(value: Any, max_support_edges: int) -> list[Edge]: + if not isinstance(value, list): + return [] + out: list[Edge] = [] + for row in value[:max_support_edges]: + if not isinstance(row, dict): + continue + src = str(row.get("src", "")).strip() + rel = str(row.get("rel", "")).strip() + dst = str(row.get("dst", "")).strip() + if not src or not rel or not dst: + continue + try: + confidence = float(row.get("confidence", 1.0)) + except (TypeError, ValueError): + confidence = 1.0 + out.append(Edge(src=src, rel=rel, dst=dst, confidence=confidence)) + return out + + +def _parse_tool_trace(value: Any) -> list[SwarmReplayToolCall]: + if not isinstance(value, list): + return [] + out: list[SwarmReplayToolCall] = [] + for row in value: + if not isinstance(row, dict): + continue + tool_name = str(row.get("tool_name", row.get("tool", ""))).strip() + args = row.get("args", {}) + output = row.get("output", {}) + if not tool_name: + continue + out.append( + SwarmReplayToolCall( + tool_name=tool_name, + args=dict(args) if isinstance(args, dict) else {}, + output=dict(output) if isinstance(output, dict) else {}, + ) + ) + return out + + +def _parse_subagent_outputs(value: Any) -> list[str]: + if not isinstance(value, list): + return [] + out: list[str] = [] + for row in value: + if isinstance(row, str): + token = row.strip() + elif isinstance(row, dict): + token = str(row.get("content", row.get("summary", ""))).strip() + else: + token = str(row).strip() + if token: + out.append(token) + return out + + +def _parse_orchestrator(value: Any) -> SwarmOrchestratorTelemetry: + if not isinstance(value, dict): + return SwarmOrchestratorTelemetry() + return SwarmOrchestratorTelemetry( + spawn_count=max(0, int(value.get("spawn_count", 0) or 0)), + finished_subtasks=max(0, int(value.get("finished_subtasks", 0) or 0)), + critical_steps=max(1, int(value.get("critical_steps", 1) or 1)), + breadth=max(0, int(value.get("breadth", 0) or 0)), + depth=max(0, int(value.get("depth", 0) or 0)), + ) + + +@dataclass(slots=True) +class GeneratedTaskCandidate: + question: str + answer: str + supporting_edges: list[Edge] + task_type: str + is_valid: bool + tool_trace: list[SwarmReplayToolCall] = field(default_factory=list) + subagent_outputs: list[str] = field(default_factory=list) + canonical_edges: list[Edge] = field(default_factory=list) + canonical_nodes: list[str] = field(default_factory=list) + orchestrator: SwarmOrchestratorTelemetry = field(default_factory=SwarmOrchestratorTelemetry) + validation: dict[str, Any] = field(default_factory=dict) + + + +def parse_generated_task_completion(completion_text: str, max_support_edges: int = 8) -> GeneratedTaskCandidate: + blob = _extract_json_blob(completion_text) + + question = "" + answer = "" + task_type = "adversarial_trace" + supporting_edges: list[Edge] = [] + tool_trace: list[SwarmReplayToolCall] = [] + subagent_outputs: list[str] = [] + canonical_edges: list[Edge] = [] + canonical_nodes: list[str] = [] + orchestrator = SwarmOrchestratorTelemetry() + validation: dict[str, Any] = {} + + if isinstance(blob, dict): + question = str(blob.get("question", "")).strip() + answer = normalize_answer(str(blob.get("answer", "")).strip()) + task_type = str(blob.get("task_type", "adversarial_trace")).strip() or "adversarial_trace" + supporting_edges = _parse_edge_rows(blob.get("supporting_edges", []), max_support_edges=max_support_edges) + tool_trace = _parse_tool_trace(blob.get("tool_trace", [])) + subagent_outputs = _parse_subagent_outputs(blob.get("subagent_outputs", [])) + orchestrator = _parse_orchestrator(blob.get("orchestrator")) + validation = dict(blob.get("validation", {})) if isinstance(blob.get("validation"), dict) else {} + canonical_graph = blob.get("canonical_graph", {}) + if isinstance(canonical_graph, dict): + canonical_nodes = [ + str(node_id).strip() + for node_id in canonical_graph.get("nodes", []) + if str(node_id).strip() + ] + canonical_edges = _parse_edge_rows( + canonical_graph.get("edges", []), + max_support_edges=max(1, max_support_edges * 4), + ) + + if not question: + line_match = re.search(r"question\s*[:=]\s*(.+)", completion_text, flags=re.IGNORECASE) + if line_match: + question = line_match.group(1).strip() + if not answer: + answer = extract_answer_from_completion(completion_text) + + is_valid = bool(question and answer) + return GeneratedTaskCandidate( + question=question, + answer=answer, + supporting_edges=supporting_edges, + task_type=task_type, + is_valid=is_valid, + tool_trace=tool_trace, + subagent_outputs=subagent_outputs, + canonical_edges=canonical_edges, + canonical_nodes=canonical_nodes, + orchestrator=orchestrator, + validation=validation, + ) + + + +def _token_set(text: str) -> set[str]: + return set(re.findall(r"[a-zA-Z0-9_]+", str(text).lower())) + + + +def _jaccard_similarity(left: str, right: str) -> float: + a = _token_set(left) + b = _token_set(right) + if not a and not b: + return 1.0 + if not a or not b: + return 0.0 + return len(a & b) / max(1, len(a | b)) + + +def _distinct_ngram_ratio(texts: list[str], n: int = 2) -> float: + tokens: list[str] = [] + for text in texts: + tokens.extend(re.findall(r"[a-zA-Z0-9_]+", text.lower())) + if len(tokens) < n: + return 0.0 if texts else 1.0 + ngrams = [tuple(tokens[idx : idx + n]) for idx in range(0, len(tokens) - n + 1)] + if not ngrams: + return 0.0 + return len(set(ngrams)) / max(1, len(ngrams)) + + +class SwarmV2ReplayValidator: + """Hard-gated replay validator for deterministic swarm_v2 generation.""" + + def __init__( + self, + graph: CanonicalGraph, + validation: SwarmV2ValidationConfig, + shared_context: SwarmV2SharedContextConfig, + seen_questions: list[str] | None = None, + ): + self.graph = graph + self.validation = validation + self.shared_context = shared_context + self.seen_questions = list(seen_questions or []) + self.graph_nodes = set(graph.nodes.keys()) + self.graph_edges = {(edge.src, edge.rel, edge.dst) for edge in graph.edges} + self.outgoing: dict[str, list[Edge]] = {} + for edge in graph.edges: + self.outgoing.setdefault(edge.src, []).append(edge) + + def remember(self, question: str) -> None: + token = str(question).strip() + if not token: + return + self.seen_questions.append(token) + if len(self.seen_questions) > 4096: + self.seen_questions = self.seen_questions[-2048:] + + def _count_matching_paths(self, start: str, relations: list[str], answer: str, limit: int = 4) -> int: + if not start or not relations: + return 0 + + count = 0 + stack: list[tuple[str, int, tuple[str, ...]]] = [(start, 0, (start,))] + while stack: + node_id, rel_idx, seen_nodes = stack.pop() + if rel_idx >= len(relations): + if node_id == answer: + count += 1 + if count >= limit: + return count + continue + + relation = relations[rel_idx] + for edge in self.outgoing.get(node_id, []): + if edge.rel != relation: + continue + if edge.dst in seen_nodes: + continue + stack.append((edge.dst, rel_idx + 1, seen_nodes + (edge.dst,))) + return count + + def _replay_tool_trace(self, candidate: GeneratedTaskCandidate) -> tuple[list[str], list[Edge], str, str]: + reasons: list[str] = [] + replayed_edges: list[Edge] = [] + replayed_answer = "" + replayed_question = "" + + if not candidate.tool_trace: + return ["non_replayable_tool_calls"], replayed_edges, replayed_answer, replayed_question + + for call in candidate.tool_trace: + if call.tool_name == "enumerate_neighbors": + node_id = str(call.args.get("node_id", "")).strip() + expected_edge = call.args.get("expected_edge", {}) + if not node_id: + reasons.append("non_replayable_tool_calls") + continue + neighbors = enumerate_swarm_v2_neighbors(self.graph, node_id) + if not neighbors: + reasons.append("non_replayable_tool_calls") + if isinstance(expected_edge, dict): + expected_key = ( + str(expected_edge.get("src", "")).strip(), + str(expected_edge.get("rel", "")).strip(), + str(expected_edge.get("dst", "")).strip(), + ) + if expected_key not in {(edge.src, edge.rel, edge.dst) for edge in neighbors}: + reasons.append("non_replayable_tool_calls") + elif call.tool_name == "trace_path": + candidate_path = call.args.get("path", candidate.supporting_edges) + replayed_edges = trace_swarm_v2_path(self.graph, candidate_path) + if not replayed_edges: + reasons.append("non_replayable_tool_calls") + elif call.tool_name == "select_answer": + replayed_answer = select_swarm_v2_answer(replayed_edges) + if not replayed_answer: + reasons.append("non_replayable_tool_calls") + elif call.tool_name == "emit_question": + replayed_question = emit_swarm_v2_question(replayed_edges) + if not replayed_question: + reasons.append("non_replayable_tool_calls") + else: + reasons.append("non_replayable_tool_calls") + + return reasons, replayed_edges, replayed_answer, replayed_question + + def validate(self, candidate: GeneratedTaskCandidate) -> ReplayValidationResult: + reasons: list[str] = [] + + if not candidate.question or not candidate.answer: + reasons.append("missing_question_or_answer") + + if not candidate.supporting_edges: + reasons.append("malformed_support_edges") + + if len(candidate.supporting_edges) > self.validation.max_support_edges: + reasons.append("context_or_support_budget_overflow") + + edge_keys = [(edge.src, edge.rel, edge.dst) for edge in candidate.supporting_edges] + if len(set(edge_keys)) != len(edge_keys): + reasons.append("malformed_support_edges") + + for edge in candidate.supporting_edges: + if edge.src not in self.graph_nodes or edge.dst not in self.graph_nodes: + reasons.append("unseen_nodes_or_edges") + break + if (edge.src, edge.rel, edge.dst) not in self.graph_edges: + reasons.append("unseen_nodes_or_edges") + break + + replay_reasons, replayed_edges, replayed_answer, replayed_question = self._replay_tool_trace(candidate) + reasons.extend(replay_reasons) + + if replayed_edges: + expected_keys = [(edge.src, edge.rel, edge.dst) for edge in replayed_edges] + if expected_keys != edge_keys: + reasons.append("non_replayable_tool_calls") + relations = [edge.rel for edge in replayed_edges] + unique_path_count = self._count_matching_paths( + start=replayed_edges[0].src, + relations=relations, + answer=replayed_answer or candidate.answer, + ) + else: + unique_path_count = 0 + + if unique_path_count != 1: + reasons.append("non_unique_derivation_path") + + if replayed_answer and normalize_answer(replayed_answer) != normalize_answer(candidate.answer): + reasons.append("non_replayable_tool_calls") + + if replayed_question and replayed_question != candidate.question: + reasons.append("non_replayable_tool_calls") + + if candidate.answer and normalize_answer(candidate.answer).lower() in candidate.question.lower(): + reasons.append("answer_leakage") + + duplicate_similarity = 0.0 + if candidate.question and self.seen_questions: + duplicate_similarity = max( + _jaccard_similarity(candidate.question, seen_question) + for seen_question in self.seen_questions + ) + if duplicate_similarity >= self.validation.duplicate_similarity_threshold: + reasons.append("duplicate_or_near_duplicate") + + context_nodes = len({edge.src for edge in candidate.supporting_edges} | {edge.dst for edge in candidate.supporting_edges}) + context_edges = len(candidate.supporting_edges) + max_context_nodes = min(self.validation.max_context_nodes, self.shared_context.max_nodes) + max_context_edges = min(self.validation.max_context_edges, self.shared_context.max_edges) + if context_nodes > max_context_nodes or context_edges > max_context_edges: + reasons.append("context_or_support_budget_overflow") + + if len(candidate.supporting_edges) > self.validation.max_path_hops: + reasons.append("context_or_support_budget_overflow") + + return ReplayValidationResult( + is_valid=not reasons, + reasons=sorted(set(reasons)), + duplicate_similarity=duplicate_similarity, + context_nodes=context_nodes, + context_edges=context_edges, + unique_path_count=unique_path_count, + replayed_question=replayed_question, + replayed_answer=replayed_answer, + replayed_edges=replayed_edges, + ) + + +class AnswererJudge: + """Lightweight frozen answerer used to score adversarial hardness.""" + + def __init__(self, model_name_or_path: str, max_new_tokens: int = 48): + self.model_name_or_path = model_name_or_path + self.max_new_tokens = max_new_tokens + self._model = None + self._tokenizer = None + + def _ensure_loaded(self) -> None: + if self._model is not None and self._tokenizer is not None: + return + + import torch + from transformers import AutoModelForCausalLM, AutoTokenizer + + tokenizer = AutoTokenizer.from_pretrained(self.model_name_or_path) + if tokenizer.pad_token is None and tokenizer.eos_token is not None: + tokenizer.pad_token = tokenizer.eos_token + + model_kwargs: dict[str, Any] = {} + if torch.cuda.is_available(): + model_kwargs["device_map"] = "auto" + model_kwargs["torch_dtype"] = torch.bfloat16 + + model = AutoModelForCausalLM.from_pretrained(self.model_name_or_path, **model_kwargs) + model.eval() + + self._model = model + self._tokenizer = tokenizer + + @lru_cache(maxsize=2048) + def answer(self, question: str) -> str: + self._ensure_loaded() + assert self._model is not None + assert self._tokenizer is not None + + import torch + + prompt = ( + "You are an OSINT answering model. " + "Answer with only the final entity string.\n" + f"Question: {question}\n" + "Answer:" + ) + + tokenizer = self._tokenizer + model = self._model + encoded = tokenizer(prompt, return_tensors="pt") + device = next(model.parameters()).device + encoded = {k: v.to(device) for k, v in encoded.items()} + + with torch.no_grad(): + output = model.generate( + **encoded, + max_new_tokens=max(1, int(self.max_new_tokens)), + do_sample=False, + temperature=0.0, + pad_token_id=tokenizer.eos_token_id, + ) + + generated = output[0][encoded["input_ids"].shape[1] :] + completion = tokenizer.decode(generated, skip_special_tokens=True) + return normalize_answer(extract_answer_from_completion(completion)) + + +class GeneratorRewardFunction: + """Reward for the graph/question generation swarm in adversarial self-play.""" + + def __init__( + self, + graph: CanonicalGraph, + answerer_judge: AnswererJudge, + weights: GeneratorRewardWeights, + max_support_edges: int = 8, + pipeline_mode: str = "legacy", + swarm_v2_validation: SwarmV2ValidationConfig | None = None, + swarm_v2_shared_context: SwarmV2SharedContextConfig | None = None, + parl_max_parallel_hint: int = 0, + ): + self.graph = graph + self.answerer_judge = answerer_judge + self.weights = weights + self.max_support_edges = max_support_edges + self.pipeline_mode = str(pipeline_mode).strip().lower() or "legacy" + self.graph_nodes = set(graph.nodes.keys()) + self.graph_edges = {(edge.src, edge.rel, edge.dst) for edge in graph.edges} + self._seen_questions: list[str] = [] + self.swarm_v2_validation = swarm_v2_validation or SwarmV2ValidationConfig( + max_support_edges=max_support_edges + ) + self.swarm_v2_shared_context = swarm_v2_shared_context or SwarmV2SharedContextConfig() + self.parl_max_parallel_hint = max(0, int(parl_max_parallel_hint or 0)) + self._swarm_v2_validator = SwarmV2ReplayValidator( + graph=graph, + validation=self.swarm_v2_validation, + shared_context=self.swarm_v2_shared_context, + seen_questions=self._seen_questions, + ) + self._debug_batches_seen = 0 + self._debug_reason_counter: Counter[str] = Counter() + self._debug_reward_window: list[float] = [] + self._debug_last_batch: dict[str, Any] = {} + + @staticmethod + def _std(values: list[float]) -> float: + if len(values) <= 1: + return 0.0 + mean = sum(values) / len(values) + variance = sum((value - mean) ** 2 for value in values) / len(values) + return variance ** 0.5 + + def _invalid_swarm_v2_reward( + self, + candidate: GeneratedTaskCandidate, + validation_result: ReplayValidationResult, + ) -> float: + # Avoid a constant hard penalty. Keep invalid samples negative but + # graded so GRPO still gets reward variance/advantages when quality + # differs. Scale is intentionally wider than the original [-1.35] + # constant path: + # malformed/no JSON ~= -2.0 + # partial structured JSON ~= -1.2 .. -0.4 + # replayable but imperfect candidates are handled by valid path. + reason_penalty = { + "missing_question_or_answer": 0.55, + "malformed_support_edges": 0.40, + "non_replayable_tool_calls": 0.55, + "non_unique_derivation_path": 0.30, + "unseen_nodes_or_edges": 0.35, + "answer_leakage": 0.45, + "duplicate_or_near_duplicate": 0.20, + "context_or_support_budget_overflow": 0.25, + } + penalty = 0.35 + for reason in validation_result.reasons: + penalty += reason_penalty.get(reason, 0.10) + + # Partial credit for parseable structure to reduce flat rewards. + partial_credit = 0.0 + if candidate.question: + partial_credit += 0.25 + if candidate.answer: + partial_credit += 0.25 + if candidate.supporting_edges: + partial_credit += min(0.35, 0.08 * len(candidate.supporting_edges)) + if candidate.tool_trace: + partial_credit += min(0.30, 0.06 * len(candidate.tool_trace)) + if candidate.subagent_outputs: + partial_credit += 0.10 + if candidate.canonical_edges or candidate.canonical_nodes: + partial_credit += 0.10 + + reward = partial_credit - penalty + return float(max(-2.0, min(-0.05, reward))) + + def _validity_score(self, candidate: GeneratedTaskCandidate) -> float: + score = 0.0 + if candidate.question: + score += 0.4 + if candidate.answer: + score += 0.4 + if len(candidate.supporting_edges) <= self.max_support_edges: + score += 0.2 + return min(1.0, score) + + def _consistency_score(self, candidate: GeneratedTaskCandidate) -> float: + if not candidate.question or not candidate.answer: + return 0.0 + + edge_consistency = 0.0 + if candidate.supporting_edges: + matches = sum( + 1 + for edge in candidate.supporting_edges + if (edge.src, edge.rel, edge.dst) in self.graph_edges + ) + edge_consistency = matches / max(1, len(candidate.supporting_edges)) + + answer_in_graph = 1.0 if candidate.answer in self.graph_nodes else 0.0 + answer_in_edges = 1.0 if any( + candidate.answer in {edge.src, edge.dst} for edge in candidate.supporting_edges + ) else 0.0 + + question_mentions_graph_symbol = 1.0 if any( + node_id in candidate.question for node_id in self.graph_nodes + ) else 0.0 + + return ( + 0.45 * edge_consistency + + 0.30 * max(answer_in_graph, answer_in_edges) + + 0.25 * question_mentions_graph_symbol + ) + + def _diversity_score(self, question: str) -> float: + if not self._seen_questions: + return 1.0 + max_similarity = max(_jaccard_similarity(question, prior) for prior in self._seen_questions) + return max(0.0, 1.0 - max_similarity) + + def _hardness_score(self, candidate: GeneratedTaskCandidate) -> float: + if not candidate.is_valid: + return -1.0 + predicted_answer = normalize_answer(self.answerer_judge.answer(candidate.question)) + target_answer = normalize_answer(candidate.answer) + return 1.0 if predicted_answer != target_answer else -0.4 + + @staticmethod + def _support_path_coverage(candidate: GeneratedTaskCandidate) -> float: + if not candidate.supporting_edges: + return 0.0 + keys = {(edge.src, edge.rel, edge.dst) for edge in candidate.supporting_edges} + return len(keys) / max(1, len(candidate.supporting_edges)) + + def _swarm_diversity_score(self, candidate: GeneratedTaskCandidate) -> float: + if not candidate.subagent_outputs: + return 0.0 + distinct_ratio = _distinct_ngram_ratio(candidate.subagent_outputs, n=2) + path_coverage = self._support_path_coverage(candidate) + return max(0.0, min(1.0, (0.7 * distinct_ratio) + (0.3 * path_coverage))) + + def _context_pressure_score(self, validation_result: ReplayValidationResult) -> float: + if not validation_result.is_valid: + return 0.0 + + node_util = validation_result.context_nodes / max(1, self.swarm_v2_shared_context.max_nodes) + edge_util = validation_result.context_edges / max(1, self.swarm_v2_shared_context.max_edges) + utilization = max(node_util, edge_util) + target = max(0.05, float(self.swarm_v2_shared_context.target_pressure)) + if utilization > 1.0: + return 0.0 + gap = abs(utilization - target) + return max(0.0, 1.0 - (gap / max(target, 1.0 - target))) + + def _parl_scores(self, candidate: GeneratedTaskCandidate) -> tuple[float, float]: + breakdown = parl_reward_breakdown( + task_outcome_reward=0.0, + spawn_count=candidate.orchestrator.spawn_count, + finished_subtasks=candidate.orchestrator.finished_subtasks, + critical_steps=candidate.orchestrator.critical_steps, + lambda_parallel=0.15, + lambda_finish=0.20, + anneal=1.0, + breadth=candidate.orchestrator.breadth, + depth=candidate.orchestrator.depth, + max_parallel_hint=self.parl_max_parallel_hint, + ) + return breakdown.parallel, breakdown.finish + + def _swarm_v2_reward(self, candidate: GeneratedTaskCandidate) -> tuple[float, ReplayValidationResult]: + validator = self._swarm_v2_validator + validator.seen_questions = list(self._seen_questions) + validation_result = validator.validate(candidate) + if not validation_result.is_valid: + return self._invalid_swarm_v2_reward(candidate, validation_result), validation_result + + hardness = self._hardness_score(candidate) + swarm_diversity = self._swarm_diversity_score(candidate) + context_pressure = self._context_pressure_score(validation_result) + parl_parallel, parl_finish = self._parl_scores(candidate) + + reward = ( + 0.25 # valid JSON/schema + + 0.30 # replayable derivation + + (0.30 * hardness) + + (0.15 * swarm_diversity) + + (0.10 * context_pressure) + + (0.025 * parl_parallel) + + (0.025 * parl_finish) + ) + return reward, validation_result + + def __call__( + self, + prompts: list[Any] | None = None, + completions: list[Any] | None = None, + **kwargs: Any, + ) -> list[float]: + del prompts + if completions is None: + completions = list(kwargs.get("completions", [])) + rewards: list[float] = [] + batch_reasons: Counter[str] = Counter() + valid_count = 0 + for completion in completions: + text = decode_completion_text(completion) + candidate = parse_generated_task_completion(text, max_support_edges=self.max_support_edges) + + if self.pipeline_mode == "swarm_v2": + reward, validation_result = self._swarm_v2_reward(candidate) + rewards.append(float(max(-2.0, min(1.2, reward)))) + if validation_result.is_valid and candidate.question: + valid_count += 1 + self._seen_questions.append(candidate.question) + if len(self._seen_questions) > 4096: + self._seen_questions = self._seen_questions[-2048:] + else: + for reason in validation_result.reasons: + batch_reasons[reason] += 1 + else: + validity = self._validity_score(candidate) + consistency = self._consistency_score(candidate) + diversity = self._diversity_score(candidate.question) if candidate.question else 0.0 + hardness = self._hardness_score(candidate) + + reward = ( + self.weights.validity * validity + + self.weights.hardness * hardness + + self.weights.diversity * diversity + + self.weights.consistency * consistency + ) + rewards.append(float(max(-2.0, min(1.2, reward)))) + + if self.pipeline_mode != "swarm_v2" and candidate.question: + self._seen_questions.append(candidate.question) + if len(self._seen_questions) > 4096: + self._seen_questions = self._seen_questions[-2048:] + + self._debug_batches_seen += 1 + self._debug_reward_window.extend(rewards) + self._debug_reward_window = self._debug_reward_window[-512:] + self._debug_reason_counter.update(batch_reasons) + batch_mean = float(sum(rewards) / max(1, len(rewards))) + batch_std = float(self._std(rewards)) + advantages = [float(value - batch_mean) for value in rewards] + self._debug_last_batch = { + "batch_rewards": list(rewards), + "batch_reward_mean": batch_mean, + "batch_reward_std": batch_std, + "advantage_proxy_min": min(advantages) if advantages else 0.0, + "advantage_proxy_max": max(advantages) if advantages else 0.0, + "advantage_proxy_std": float(self._std(advantages)), + "valid_count": int(valid_count), + "invalid_count": int(max(0, len(rewards) - valid_count)), + "valid_output_ratio": float(valid_count / max(1, len(rewards))), + "top_invalid_reasons": batch_reasons.most_common(5), + } + if self.pipeline_mode == "swarm_v2" and (self._debug_batches_seen % 10 == 0): + window_std = self._std(self._debug_reward_window) + print( + "[reward_debug][generator] " + f"batches={self._debug_batches_seen} " + f"window_reward_std={window_std:.6f} " + f"last_batch_valid={valid_count}/{len(rewards)} " + f"top_invalid_reasons={batch_reasons.most_common(3)}" + ) + + return rewards + + +class AnswererRewardFunction: + """Answer-swarm reward wrapper that reuses the environment answer reward logic.""" + + def __init__( + self, + graph: CanonicalGraph, + pipeline_mode: str = "legacy", + parl_max_parallel_hint: int = 0, + ): + self.reward_model = build_reward_model(graph) + self.pipeline_mode = str(pipeline_mode).strip().lower() or "legacy" + self.parl_max_parallel_hint = max(0, int(parl_max_parallel_hint or 0)) + + @staticmethod + def _parse_support_edges(value: Any) -> list[Edge]: + payload = value + if isinstance(value, str): + try: + payload = json.loads(value) + except json.JSONDecodeError: + payload = [] + + out: list[Edge] = [] + if not isinstance(payload, list): + return out + for row in payload: + if not isinstance(row, dict): + continue + src = str(row.get("src", "")).strip() + rel = str(row.get("rel", "")).strip() + dst = str(row.get("dst", "")).strip() + if not src or not rel or not dst: + continue + try: + confidence = float(row.get("confidence", 1.0)) + except (TypeError, ValueError): + confidence = 1.0 + out.append(Edge(src=src, rel=rel, dst=dst, confidence=confidence)) + return out + + @staticmethod + def _value_at(column: Any, index: int, default: Any) -> Any: + if isinstance(column, list) and index < len(column): + return column[index] + return default + + @staticmethod + def _extract_predicted_edges(completion_text: str, support_edges: list[Edge]) -> list[Edge]: + blob = _extract_json_blob(completion_text) + if isinstance(blob, dict): + structured_edges = _parse_edge_rows(blob.get("supporting_edges", []), max_support_edges=len(support_edges)) + if structured_edges: + return structured_edges + text = completion_text.lower() + matched: list[Edge] = [] + for edge in support_edges: + if edge.src.lower() in text and edge.rel.lower() in text and edge.dst.lower() in text: + matched.append(edge) + return matched + + def _extract_orchestrator_reward(self, completion_text: str, base_reward: float) -> float: + if self.pipeline_mode != "swarm_v2": + return float(base_reward) + blob = _extract_json_blob(completion_text) + orchestrator = _parse_orchestrator(blob.get("orchestrator")) if isinstance(blob, dict) else SwarmOrchestratorTelemetry() + breakdown = parl_reward_breakdown( + task_outcome_reward=base_reward, + spawn_count=orchestrator.spawn_count, + finished_subtasks=orchestrator.finished_subtasks, + critical_steps=orchestrator.critical_steps, + lambda_parallel=0.15, + lambda_finish=0.20, + anneal=1.0, + breadth=orchestrator.breadth, + depth=orchestrator.depth, + max_parallel_hint=self.parl_max_parallel_hint, + ) + return float(breakdown.total) + + def __call__( + self, + prompts: list[Any], + completions: list[Any], + answer: list[Any] | None = None, + question: list[Any] | None = None, + supporting_edges_json: list[Any] | None = None, + difficulty: list[Any] | None = None, + **kwargs: Any, + ) -> list[float]: + rewards: list[float] = [] + + for idx, completion in enumerate(completions): + completion_text = decode_completion_text(completion) + predicted_answer = extract_answer_from_completion(completion_text) + + target_answer = normalize_answer(str(self._value_at(answer, idx, ""))) + question_text = str(self._value_at(question, idx, "")).strip() + if not question_text: + question_text = str(self._value_at(prompts, idx, "")).strip() + + support_payload = self._value_at(supporting_edges_json, idx, []) + support_edges = self._parse_support_edges(support_payload) + difficulty_level = str(self._value_at(difficulty, idx, "hard")).strip() or "hard" + + task = TaskInstance( + task_id=f"train_task_{idx}", + task_type="adversarial_trace", + question=question_text, + answer=target_answer, + supporting_edges=support_edges, + metadata={"difficulty": difficulty_level}, + ) + pred_edges = self._extract_predicted_edges(completion_text, support_edges) + breakdown = compute_answer_reward( + proposed_answer=predicted_answer, + task=task, + pred_edges=pred_edges, + tool_outputs=[], + step_count=1, + model=self.reward_model, + difficulty=difficulty_level, + ) + rewards.append(self._extract_orchestrator_reward(completion_text, breakdown.total)) + + return rewards diff --git a/src/osint_env/training/self_play.py b/src/osint_env/training/self_play.py new file mode 100644 index 0000000000000000000000000000000000000000..e63820a734c8eb31b68e2940a778e3d8ed35e7e0 --- /dev/null +++ b/src/osint_env/training/self_play.py @@ -0,0 +1,1665 @@ +from __future__ import annotations + +import inspect +import json +import os +from dataclasses import dataclass +from pathlib import Path +import random +from typing import Any + +from osint_env.data.generator import ( + build_swarm_v2_canonical_subgraph, + build_swarm_v2_path_candidates, + build_swarm_v2_tool_trace, + emit_swarm_v2_question, + select_swarm_v2_answer, + trace_swarm_v2_path, +) +from osint_env.domain.models import Edge, EnvironmentConfig, TaskInstance +from osint_env.env.environment import OSINTEnvironment +from osint_env.llm import build_llm_client +from osint_env.training.config import ( + KimiGRPOPhaseConfig, + LoraTuningConfig, + SelfPlayTrainingConfig, + SwarmV2SwarmConfig, +) +from osint_env.training.rewards import ( + AnswererJudge, + AnswererRewardFunction, + GeneratorRewardFunction, + SwarmV2ReplayValidator, + decode_completion_text, + parse_generated_task_completion, +) + + +@dataclass(slots=True) +class _RoundArtifacts: + round_index: int + generator_dataset_path: str + answerer_dataset_path: str + generated_tasks_path: str + + + +def _require_training_stack() -> tuple[Any, Any, Any]: + try: + from datasets import Dataset + from trl import GRPOConfig, GRPOTrainer + except ImportError as exc: + raise RuntimeError( + "Training stack is missing. Install train dependencies first: " + "python -m pip install -e .[train]" + ) from exc + return Dataset, GRPOConfig, GRPOTrainer + + + +def _task_to_edge_json(task: TaskInstance) -> str: + payload = [ + { + "src": edge.src, + "rel": edge.rel, + "dst": edge.dst, + "confidence": float(edge.confidence), + } + for edge in task.supporting_edges + ] + return json.dumps(payload, sort_keys=True) + + +def _edge_payload(edge: Edge) -> dict[str, Any]: + return { + "src": edge.src, + "rel": edge.rel, + "dst": edge.dst, + "confidence": float(edge.confidence), + } + + +def _edges_from_payload(rows: Any, max_edges: int) -> list[Edge]: + if not isinstance(rows, list): + return [] + edges: list[Edge] = [] + for row in rows[:max_edges]: + if not isinstance(row, dict): + continue + src = str(row.get("src", "")).strip() + rel = str(row.get("rel", "")).strip() + dst = str(row.get("dst", "")).strip() + if not src or not rel or not dst: + continue + try: + confidence = float(row.get("confidence", 1.0)) + except (TypeError, ValueError): + confidence = 1.0 + edges.append(Edge(src=src, rel=rel, dst=dst, confidence=confidence)) + return edges + + + +def _canonical_example_payload( + graph: Any, + canonical_candidate: dict[str, Any], + swarm_cfg: SwarmV2SwarmConfig, +) -> dict[str, Any]: + candidate_edges = _edges_from_payload(canonical_candidate.get("edges", []), max_edges=4) + traced_edges = trace_swarm_v2_path(graph, candidate_edges) or candidate_edges + if not traced_edges: + return { + "canonical_graph": canonical_candidate, + "question": "Which entity is reached by following the provided replayable relation path?", + "answer": "", + "task_type": "swarm_v2_trace", + "supporting_edges": [], + "tool_trace": [], + "subagent_outputs": ["path_agent: no replayable edge available"], + "orchestrator": { + "spawn_count": 1, + "finished_subtasks": 1, + "critical_steps": 1, + "breadth": 1, + "depth": 1, + }, + } + + spawn_count = min(swarm_cfg.max_agents, max(1, len(traced_edges) + 1)) + return { + "canonical_graph": canonical_candidate, + "question": emit_swarm_v2_question(traced_edges), + "answer": select_swarm_v2_answer(traced_edges), + "task_type": f"swarm_v2_{len(traced_edges)}hop_trace", + "supporting_edges": [_edge_payload(edge) for edge in traced_edges], + "tool_trace": build_swarm_v2_tool_trace(graph, traced_edges), + "subagent_outputs": [ + f"path_agent_{idx}: {edge.src} --{edge.rel}--> {edge.dst}" + for idx, edge in enumerate(traced_edges) + ] + + ["question_agent: emitted deterministic relation-path question"], + "orchestrator": { + "spawn_count": spawn_count, + "finished_subtasks": spawn_count, + "critical_steps": max(1, len(traced_edges)), + "breadth": min(swarm_cfg.max_breadth, spawn_count), + "depth": min(swarm_cfg.max_depth, 1 if len(traced_edges) <= 2 else 2), + }, + } + + +def _difficulty_for_task(task: TaskInstance) -> str: + metadata = dict(task.metadata or {}) + token = str(metadata.get("difficulty", "")).strip().lower() + if token in {"easy", "medium", "hard"}: + return token + if task.task_type.startswith("metaqa_1-hop"): + return "easy" + if task.task_type.startswith("metaqa_2-hop"): + return "medium" + return "hard" + + + +def _answer_prompt(question: str) -> str: + return ( + "You are the answer-generation swarm for an OSINT graph task.\n" + "Return ONLY one compact JSON object. Do not use markdown. Do not add prose.\n" + "Required schema: {\"answer\": \"\"}\n" + "Valid example: {\"answer\": \"user_7\"}\n" + f"Question: {question}" + ) + + +def _swarm_v2_answer_prompt( + question: str, + shared_context: dict[str, Any], + swarm_cfg: SwarmV2SwarmConfig, +) -> str: + return ( + "You are the trainable orchestrator for the OSINT answer-generation swarm.\n" + "Assume frozen subagents share the same context window by default.\n" + f"Max agents: {swarm_cfg.max_agents}. " + f"Max breadth: {swarm_cfg.max_breadth}. " + f"Max depth: {swarm_cfg.max_depth}. " + f"Planner rounds: {swarm_cfg.planner_rounds}. " + f"Tools per agent: {swarm_cfg.tools_per_agent}.\n" + "Return ONLY one compact JSON object. Do not use markdown. Do not add prose.\n" + "Required schema: {\"answer\": string, \"supporting_edges\": list, \"orchestrator\": object}.\n" + "orchestrator must contain integer keys: spawn_count, finished_subtasks, critical_steps, breadth, depth.\n" + "supporting_edges must use only edges from Shared context and each edge must contain src, rel, dst, confidence.\n" + "Valid example: {\"answer\":\"user_7\",\"supporting_edges\":[{\"src\":\"alias_7_123\",\"rel\":\"alias_of\",\"dst\":\"user_7\",\"confidence\":1.0}],\"orchestrator\":{\"spawn_count\":2,\"finished_subtasks\":2,\"critical_steps\":1,\"breadth\":2,\"depth\":1}}\n" + f"Shared context:\n{json.dumps(shared_context, sort_keys=True)}\n" + f"Question: {question}" + ) + + + +def _build_answerer_rows(tasks: list[TaskInstance]) -> list[dict[str, Any]]: + rows: list[dict[str, Any]] = [] + for task in tasks: + rows.append( + { + "prompt": _answer_prompt(task.question), + "question": task.question, + "answer": str(task.answer), + "supporting_edges_json": _task_to_edge_json(task), + "difficulty": _difficulty_for_task(task), + "task_type": task.task_type, + "task_id": task.task_id, + } + ) + return rows + + +def _build_swarm_v2_answerer_rows( + env: OSINTEnvironment, + tasks: list[TaskInstance], + cfg: SelfPlayTrainingConfig, +) -> list[dict[str, Any]]: + rows: list[dict[str, Any]] = [] + for task in tasks: + metadata = dict(task.metadata or {}) + canonical_graph = metadata.get("canonical_graph") + if isinstance(canonical_graph, dict): + shared_context = { + "nodes": list(canonical_graph.get("nodes", []))[: cfg.swarm_v2.shared_context.max_nodes], + "edges": list(canonical_graph.get("edges", []))[: cfg.swarm_v2.shared_context.max_edges], + } + else: + deterministic_seed = sum(ord(ch) for ch in task.task_id) + shared_context = _graph_context_for_prompt( + env=env, + max_nodes=cfg.swarm_v2.shared_context.max_nodes, + max_edges=cfg.swarm_v2.shared_context.max_edges, + rng=random.Random(deterministic_seed), + ) + + rows.append( + { + "prompt": _swarm_v2_answer_prompt( + question=task.question, + shared_context=shared_context, + swarm_cfg=cfg.swarm_v2.answerer_swarm, + ), + "question": task.question, + "answer": str(task.answer), + "supporting_edges_json": _task_to_edge_json(task), + "difficulty": _difficulty_for_task(task), + "task_type": task.task_type, + "task_id": task.task_id, + } + ) + return rows + + + +def _graph_context_for_prompt( + env: OSINTEnvironment, + max_nodes: int, + max_edges: int, + rng: random.Random, +) -> dict[str, Any]: + node_ids = sorted(env.graph.nodes.keys()) + if len(node_ids) > max_nodes: + node_ids = rng.sample(node_ids, k=max_nodes) + + edges = list(env.graph.edges) + if len(edges) > max_edges: + edges = rng.sample(edges, k=max_edges) + + return { + "nodes": node_ids, + "edges": [ + { + "src": edge.src, + "rel": edge.rel, + "dst": edge.dst, + } + for edge in edges + ], + } + + + +def _generator_prompt(context_blob: dict[str, Any], anchor_questions: list[str]) -> str: + anchors = "\n".join(f"- {question}" for question in anchor_questions) + return ( + "You are the adversarial question-and-graph generation swarm in self-play.\n" + "Generate one challenging but answerable OSINT task that makes answering difficult.\n" + "Use only entities and relations from the provided graph context.\n" + "Prefer multi-hop traces and avoid duplicates of the anchor questions.\n" + "Return strict JSON with keys: question, answer, task_type, supporting_edges.\n" + "supporting_edges must be a list of objects with src, rel, dst, confidence.\n" + "Graph context:\n" + f"{json.dumps(context_blob, sort_keys=True)}\n" + "Anchor questions to avoid:\n" + f"{anchors}\n" + ) + + + +def _build_generator_rows( + env: OSINTEnvironment, + cfg: SelfPlayTrainingConfig, + rng: random.Random, +) -> list[dict[str, Any]]: + rows: list[dict[str, Any]] = [] + existing_questions = [task.question for task in env.tasks] + + for _ in range(max(1, cfg.generator_prompts_per_round)): + context_blob = _graph_context_for_prompt( + env=env, + max_nodes=cfg.max_graph_context_nodes, + max_edges=cfg.max_graph_context_edges, + rng=rng, + ) + anchor_sample_size = min(5, len(existing_questions)) + anchor_sample = rng.sample(existing_questions, k=anchor_sample_size) if anchor_sample_size > 0 else [] + rows.append( + { + "prompt": _generator_prompt(context_blob, anchor_sample), + } + ) + return rows + + +def _swarm_v2_generator_prompt( + graph: Any, + shared_context: dict[str, Any], + canonical_candidate: dict[str, Any], + anchor_questions: list[str], + swarm_cfg: SwarmV2SwarmConfig, + canonical_graph_mode: str, +) -> str: + anchors = "\n".join(f"- {question}" for question in anchor_questions) + canonical_mode = str(canonical_graph_mode).strip().lower() or "generate" + example_payload = _canonical_example_payload(graph, canonical_candidate, swarm_cfg) + canonical_instruction = ( + "You may propose canonical_graph updates when they improve replayability and keep it graph-grounded." + if canonical_mode == "generate" + else "Reuse the provided canonical candidate as-is; do not add, remove, or modify canonical_graph nodes/edges." + ) + return ( + "You are the trainable orchestrator for the adversarial OSINT question-generation swarm.\n" + "Coordinate frozen subagents over the shared context and return a replayable task.\n" + f"Max agents: {swarm_cfg.max_agents}. " + f"Max breadth: {swarm_cfg.max_breadth}. " + f"Max depth: {swarm_cfg.max_depth}. " + f"Planner rounds: {swarm_cfg.planner_rounds}. " + f"Tools per agent: {swarm_cfg.tools_per_agent}.\n" + "Return ONLY one compact JSON object. Do not use markdown fences. Do not add commentary.\n" + "Required top-level keys: canonical_graph, question, answer, task_type, supporting_edges, " + "tool_trace, subagent_outputs, orchestrator.\n" + "supporting_edges must be a non-empty list of objects with src, rel, dst, confidence.\n" + "tool_trace must be non-empty and may only use enumerate_neighbors, trace_path, select_answer, emit_question.\n" + "orchestrator must contain integer keys: spawn_count, finished_subtasks, critical_steps, breadth, depth.\n" + "The answer must be the final dst selected by the replayed relation path.\n" + "The question must exactly match the deterministic emit_question result derived from the replayed path.\n" + f"Canonical graph mode: {canonical_mode}. {canonical_instruction}\n" + "Valid output example using this canonical candidate:\n" + f"{json.dumps(example_payload, separators=(',', ':'), sort_keys=True)}\n" + "Now produce a different valid JSON object for the provided candidate.\n" + f"Shared context:\n{json.dumps(shared_context, sort_keys=True)}\n" + f"Canonical candidate:\n{json.dumps(canonical_candidate, sort_keys=True)}\n" + "Anchor questions to avoid:\n" + f"{anchors}\n" + ) + + +def _build_swarm_v2_generator_rows( + env: OSINTEnvironment, + cfg: SelfPlayTrainingConfig, + rng: random.Random, +) -> tuple[list[dict[str, Any]], list[dict[str, Any]]]: + rows: list[dict[str, Any]] = [] + canonical_candidates: list[dict[str, Any]] = [] + existing_questions = [task.question for task in env.tasks] + path_candidates = build_swarm_v2_path_candidates( + env.graph, + rng=rng, + count=max(1, cfg.generator_prompts_per_round), + min_hops=2, + max_hops=cfg.swarm_v2.validation.max_path_hops, + ) + for idx, path_edges in enumerate(path_candidates): + shared_context = _graph_context_for_prompt( + env=env, + max_nodes=cfg.swarm_v2.shared_context.max_nodes, + max_edges=cfg.swarm_v2.shared_context.max_edges, + rng=rng, + ) + canonical_candidate = build_swarm_v2_canonical_subgraph( + env.graph, + path_edges=path_edges, + max_extra_edges=max(0, cfg.swarm_v2.shared_context.max_edges - len(path_edges)), + ) + anchor_sample_size = min(5, len(existing_questions)) + anchor_sample = rng.sample(existing_questions, k=anchor_sample_size) if anchor_sample_size > 0 else [] + prompt = _swarm_v2_generator_prompt( + graph=env.graph, + shared_context=shared_context, + canonical_candidate=canonical_candidate, + anchor_questions=anchor_sample, + swarm_cfg=cfg.swarm_v2.generator_swarm, + canonical_graph_mode=cfg.canonical_graph_mode, + ) + rows.append( + { + "prompt": prompt, + "candidate_id": f"candidate_{idx}", + "canonical_graph_json": json.dumps(canonical_candidate, sort_keys=True), + } + ) + canonical_candidates.append(canonical_candidate) + return rows, canonical_candidates + + + +def _safe_build_grpo_config( + phase: KimiGRPOPhaseConfig, + output_dir: str, + grpo_config_cls: Any, + report_to: list[str] | None = None, + run_name: str = "", +) -> Any: + kwargs: dict[str, Any] = { + "output_dir": output_dir, + "learning_rate": float(phase.learning_rate), + "max_steps": int(phase.max_steps), + "per_device_train_batch_size": int(phase.per_device_train_batch_size), + "gradient_accumulation_steps": int(phase.gradient_accumulation_steps), + "num_generations": int(phase.num_generations), + "max_completion_length": int(phase.max_completion_length), + "temperature": float(phase.temperature), + "top_p": float(phase.top_p), + "beta": float(phase.beta), + "epsilon": float(phase.epsilon), + "num_iterations": int(phase.num_iterations), + "loss_type": str(phase.loss_type), + "scale_rewards": str(phase.scale_rewards), + "logging_steps": int(phase.logging_steps), + "save_steps": int(phase.save_steps), + "remove_unused_columns": False, + "use_vllm": bool(phase.use_vllm), + "vllm_mode": str(phase.vllm_mode), + "report_to": list(report_to or []), + } + if str(run_name).strip(): + kwargs["run_name"] = str(run_name).strip() + + signature = inspect.signature(grpo_config_cls.__init__) + filtered = {key: value for key, value in kwargs.items() if key in signature.parameters} + return grpo_config_cls(**filtered) + + +def _build_lora_config(lora: LoraTuningConfig) -> Any: + try: + from peft import LoraConfig, TaskType + except ImportError as exc: + raise RuntimeError( + "LoRA tuning selected, but PEFT is not installed. " + "Install train dependencies first: python -m pip install -e .[train]" + ) from exc + + task_type_token = str(lora.task_type or "CAUSAL_LM").strip().upper() + task_type = getattr(TaskType, task_type_token, TaskType.CAUSAL_LM) + return LoraConfig( + r=max(1, int(lora.r)), + lora_alpha=max(1, int(lora.alpha)), + lora_dropout=float(lora.dropout), + target_modules=list(lora.target_modules), + bias=str(lora.bias), + task_type=task_type, + ) + + +def _coerce_named_reward_func(reward_function: Any) -> Any: + """Return a callable with a stable __name__ for TRL compatibility.""" + if hasattr(reward_function, "__name__") and str(getattr(reward_function, "__name__", "")).strip(): + return reward_function + + # TRL versions that introspect reward_funcs[i].__name__ require this attribute. + if callable(reward_function): + name = reward_function.__class__.__name__ or "reward_func" + try: + setattr(reward_function, "__name__", name) + return reward_function + except Exception: + def _wrapped_reward(*args: Any, **kwargs: Any) -> Any: + return reward_function(*args, **kwargs) + + _wrapped_reward.__name__ = name + return _wrapped_reward + return reward_function + + + +def _train_grpo_phase( + model_name_or_path: str, + phase: KimiGRPOPhaseConfig, + rows: list[dict[str, Any]], + reward_function: Any, + output_dir: Path, + tuning_mode: str, + lora: LoraTuningConfig, + report_to: list[str] | None = None, + run_name: str = "", +) -> dict[str, Any]: + Dataset, GRPOConfig, GRPOTrainer = _require_training_stack() + + output_dir.mkdir(parents=True, exist_ok=True) + dataset = Dataset.from_list(rows) + args = _safe_build_grpo_config( + phase=phase, + output_dir=str(output_dir), + grpo_config_cls=GRPOConfig, + report_to=report_to, + run_name=run_name, + ) + + trainer_kwargs: dict[str, Any] = { + "model": model_name_or_path, + "args": args, + "reward_funcs": _coerce_named_reward_func(reward_function), + "train_dataset": dataset, + } + + if str(tuning_mode).strip().lower() == "lora": + trainer_signature = inspect.signature(GRPOTrainer.__init__) + if "peft_config" not in trainer_signature.parameters: + raise RuntimeError("Installed TRL version does not expose peft_config in GRPOTrainer.") + trainer_kwargs["peft_config"] = _build_lora_config(lora) + + phase_label = str(run_name).strip() or str(output_dir.name) + print(f"[self_play] Starting phase: {phase_label} rows={len(rows)} max_steps={phase.max_steps}") + strict_asserts = str(os.getenv("OSINT_TRAIN_STRICT_ASSERTS", "")).strip().lower() in {"1", "true", "yes", "on"} + trainer = GRPOTrainer(**trainer_kwargs) + tracked_params = [ + (name, param) + for name, param in trainer.model.named_parameters() + if getattr(param, "requires_grad", False) + ][:32] + pre_update_fingerprint = { + name: float(param.detach().float().abs().mean().item()) + for name, param in tracked_params + } + train_output = trainer.train() + + final_dir = output_dir / "final_model" + trainer.save_model(str(final_dir)) + + global_step = int(getattr(train_output, "global_step", 0)) + training_loss = float(getattr(train_output, "training_loss", 0.0)) + + result = { + "model_path": str(final_dir), + "global_step": global_step, + "training_loss": training_loss, + "train_rows": len(rows), + "tuning_mode": str(tuning_mode).strip().lower() or "full", + } + + log_history = list(getattr(getattr(trainer, "state", None), "log_history", []) or []) + reward_values = [float(row.get("reward")) for row in log_history if isinstance(row, dict) and "reward" in row] + reward_std_values = [ + float(row.get("reward_std")) + for row in log_history + if isinstance(row, dict) and "reward_std" in row + ] + kl_values = [float(row.get("kl")) for row in log_history if isinstance(row, dict) and "kl" in row] + grad_norm_values = [ + float(row.get("grad_norm")) + for row in log_history + if isinstance(row, dict) and "grad_norm" in row + ] + loss_values = [float(row.get("loss")) for row in log_history if isinstance(row, dict) and "loss" in row] + entropy_values = [float(row.get("entropy")) for row in log_history if isinstance(row, dict) and "entropy" in row] + + trainable_params = [param for param in trainer.model.parameters() if getattr(param, "requires_grad", False)] + grad_tensors = [param.grad for param in trainable_params if getattr(param, "grad", None) is not None] + trainable_param_count = int(sum(param.numel() for param in trainable_params)) + params_with_grad = int(len(grad_tensors)) + nonzero_grad_tensors = int( + sum( + 1 + for grad in grad_tensors + if float(grad.detach().abs().sum().item()) > 0.0 + ) + ) + + diagnostics = { + "reward_min": min(reward_values) if reward_values else 0.0, + "reward_max": max(reward_values) if reward_values else 0.0, + "reward_std_max": max(reward_std_values) if reward_std_values else 0.0, + "kl_max": max(kl_values) if kl_values else 0.0, + "loss_abs_max": max((abs(value) for value in loss_values), default=0.0), + "grad_norm_max": max(grad_norm_values) if grad_norm_values else 0.0, + "entropy_min": min(entropy_values) if entropy_values else 0.0, + "entropy_max": max(entropy_values) if entropy_values else 0.0, + "trainable_param_count": trainable_param_count, + "params_with_grad": params_with_grad, + "nonzero_grad_tensors": nonzero_grad_tensors, + "fingerprint_param_count": len(pre_update_fingerprint), + "fingerprint_changed_count": 0, + } + if pre_update_fingerprint: + changed_count = 0 + for name, param in tracked_params: + after_value = float(param.detach().float().abs().mean().item()) + before_value = pre_update_fingerprint.get(name, after_value) + if abs(after_value - before_value) > 1e-9: + changed_count += 1 + diagnostics["fingerprint_changed_count"] = changed_count + result["diagnostics"] = diagnostics + + print( + "[self_play][diagnostics] " + f"{phase_label} reward_range=({diagnostics['reward_min']:.4f},{diagnostics['reward_max']:.4f}) " + f"reward_std_max={diagnostics['reward_std_max']:.6f} " + f"kl_max={diagnostics['kl_max']:.6f} " + f"loss_abs_max={diagnostics['loss_abs_max']:.6f} " + f"grad_norm_max={diagnostics['grad_norm_max']:.6f} " + f"nonzero_grad_tensors={diagnostics['nonzero_grad_tensors']}/{max(1, diagnostics['params_with_grad'])} " + f"fingerprint_changed={diagnostics['fingerprint_changed_count']}/{max(1, diagnostics['fingerprint_param_count'])}" + ) + + if strict_asserts: + assert diagnostics["reward_max"] != diagnostics["reward_min"], ( + f"Constant reward detected in {phase_label}: {diagnostics['reward_min']}" + ) + assert diagnostics["reward_std_max"] > 0.0, f"reward_std stayed zero in {phase_label}" + assert diagnostics["kl_max"] > 0.0, f"KL stayed zero in {phase_label}" + assert diagnostics["loss_abs_max"] > 0.0, f"Loss stayed zero in {phase_label}" + assert diagnostics["grad_norm_max"] > 0.0, f"Grad norm stayed zero in {phase_label}" + assert diagnostics["nonzero_grad_tensors"] > 0, f"No non-zero grads in {phase_label}" + assert diagnostics["fingerprint_changed_count"] > 0, f"No parameter fingerprint change in {phase_label}" + + reward_debug = getattr(reward_function, "_debug_last_batch", None) + if isinstance(reward_debug, dict): + print(f"[reward_debug][last_batch] {phase_label} {json.dumps(reward_debug, sort_keys=True)}") + + print( + "[self_play] Finished phase: " + f"{phase_label} global_step={global_step} training_loss={training_loss} output={final_dir}" + ) + return result + + +def _resolve_reporting(training_config: SelfPlayTrainingConfig, phase_name: str, round_index: int) -> tuple[list[str], str]: + if not training_config.wandb_enabled: + return [], "" + if training_config.wandb_project: + os.environ["WANDB_PROJECT"] = str(training_config.wandb_project) + if training_config.wandb_entity: + os.environ["WANDB_ENTITY"] = str(training_config.wandb_entity) + prefix = str(training_config.wandb_run_name_prefix).strip() or "self-play" + run_name = f"{prefix}-r{round_index:03d}-{phase_name}" + return ["wandb"], run_name + + +def _resolve_initial_models(cfg: SelfPlayTrainingConfig) -> tuple[str, str]: + topology = str(cfg.model_topology).strip().lower() + if topology == "shared": + shared = str(cfg.shared_model_name_or_path).strip() + if not shared: + shared = str(cfg.answerer_phase.model_name_or_path).strip() or str(cfg.generator_phase.model_name_or_path).strip() + return shared, shared + return str(cfg.generator_phase.model_name_or_path), str(cfg.answerer_phase.model_name_or_path) + + + +def _fallback_generated_tasks( + base_tasks: list[TaskInstance], + round_index: int, + count: int, + rng: random.Random, +) -> list[TaskInstance]: + if not base_tasks: + return [] + + selected = list(base_tasks) + rng.shuffle(selected) + selected = selected[: max(1, count)] + + out: list[TaskInstance] = [] + for idx, task in enumerate(selected): + metadata = dict(task.metadata or {}) + metadata.update( + { + "generated_by": "fallback_generator", + "difficulty": "hard", + "round": round_index, + "scenario": "adversarial_trace", + "grader": { + "type": "difficulty_exact_match", + "answer_type": "node_id", + "case_sensitive": True, + "reward_profile": "hard", + }, + } + ) + out.append( + TaskInstance( + task_id=f"adv_r{round_index}_{idx}", + task_type="adversarial_trace", + question=f"[Adversarial] {task.question}", + answer=task.answer, + supporting_edges=list(task.supporting_edges), + metadata=metadata, + ) + ) + return out + + + +def _sample_generated_tasks_with_model( + model_name_or_path: str, + prompts: list[str], + round_index: int, + count: int, + max_support_edges: int, +) -> list[TaskInstance]: + from transformers import AutoModelForCausalLM, AutoTokenizer + + if count <= 0: + return [] + + tokenizer = AutoTokenizer.from_pretrained(model_name_or_path) + if tokenizer.pad_token is None and tokenizer.eos_token is not None: + tokenizer.pad_token = tokenizer.eos_token + model = AutoModelForCausalLM.from_pretrained(model_name_or_path) + model.eval() + + import torch + + device = next(model.parameters()).device + generated: list[TaskInstance] = [] + + for prompt in prompts: + if len(generated) >= count: + break + encoded = tokenizer(prompt, return_tensors="pt") + encoded = {k: v.to(device) for k, v in encoded.items()} + + with torch.no_grad(): + output = model.generate( + **encoded, + max_new_tokens=256, + do_sample=True, + top_p=0.95, + temperature=1.0, + num_return_sequences=1, + pad_token_id=tokenizer.eos_token_id, + ) + + completion_ids = output[0][encoded["input_ids"].shape[1] :] + completion = tokenizer.decode(completion_ids, skip_special_tokens=True) + candidate = parse_generated_task_completion(completion, max_support_edges=max_support_edges) + if not candidate.is_valid: + continue + + metadata = { + "generated_by": "generator_model", + "round": round_index, + "difficulty": "hard", + "scenario": "adversarial_trace", + "grader": { + "type": "difficulty_exact_match", + "answer_type": "node_id", + "case_sensitive": True, + "reward_profile": "hard", + }, + } + generated.append( + TaskInstance( + task_id=f"adv_r{round_index}_{len(generated)}", + task_type=candidate.task_type, + question=candidate.question, + answer=candidate.answer, + supporting_edges=list(candidate.supporting_edges), + metadata=metadata, + ) + ) + + return generated + + + +def _select_answerer_tasks( + seed_tasks: list[TaskInstance], + generated_tasks: list[TaskInstance], + cfg: SelfPlayTrainingConfig, + rng: random.Random, +) -> list[TaskInstance]: + seed_pick = list(seed_tasks) + gen_pick = list(generated_tasks) + rng.shuffle(seed_pick) + rng.shuffle(gen_pick) + + chosen = seed_pick[: max(1, cfg.seed_tasks_per_round)] + chosen.extend(gen_pick[: max(1, cfg.generated_tasks_per_round)]) + return chosen + + + +def _save_rows(path: Path, rows: list[dict[str, Any]]) -> None: + path.parent.mkdir(parents=True, exist_ok=True) + path.write_text(json.dumps(rows, indent=2, sort_keys=True), encoding="utf-8") + + + +def _save_tasks(path: Path, tasks: list[TaskInstance]) -> None: + payload = [] + for task in tasks: + payload.append( + { + "task_id": task.task_id, + "task_type": task.task_type, + "question": task.question, + "answer": task.answer, + "supporting_edges": [ + { + "src": edge.src, + "rel": edge.rel, + "dst": edge.dst, + "confidence": float(edge.confidence), + } + for edge in task.supporting_edges + ], + "metadata": dict(task.metadata or {}), + } + ) + path.parent.mkdir(parents=True, exist_ok=True) + path.write_text(json.dumps(payload, indent=2, sort_keys=True), encoding="utf-8") + + +def _save_payload(path: Path, payload: Any) -> None: + path.parent.mkdir(parents=True, exist_ok=True) + path.write_text(json.dumps(payload, indent=2, sort_keys=True), encoding="utf-8") + + +def _fallback_swarm_v2_completion_texts( + env: OSINTEnvironment, + cfg: SelfPlayTrainingConfig, + round_index: int, + rng: random.Random, +) -> list[str]: + completion_texts: list[str] = [] + path_candidates = build_swarm_v2_path_candidates( + env.graph, + rng=rng, + count=max(1, cfg.generated_tasks_per_round * 2), + min_hops=2, + max_hops=cfg.swarm_v2.validation.max_path_hops, + ) + for idx, path_edges in enumerate(path_candidates): + traced_edges = trace_swarm_v2_path(env.graph, path_edges) + if not traced_edges: + continue + question = emit_swarm_v2_question(traced_edges) + answer = select_swarm_v2_answer(traced_edges) + canonical_graph = build_swarm_v2_canonical_subgraph( + env.graph, + path_edges=traced_edges, + max_extra_edges=max(0, cfg.swarm_v2.shared_context.max_edges - len(traced_edges)), + ) + spawn_count = min( + cfg.swarm_v2.generator_swarm.max_agents, + max(1, len(traced_edges) + 1), + ) + payload = { + "canonical_graph": canonical_graph, + "question": question, + "answer": answer, + "task_type": f"swarm_v2_{len(traced_edges)}hop_trace", + "supporting_edges": [ + { + "src": edge.src, + "rel": edge.rel, + "dst": edge.dst, + "confidence": float(edge.confidence), + } + for edge in traced_edges + ], + "tool_trace": build_swarm_v2_tool_trace(env.graph, traced_edges), + "subagent_outputs": [ + f"path_agent_{edge_idx}: {edge.src} --{edge.rel}--> {edge.dst}" + for edge_idx, edge in enumerate(traced_edges) + ] + + [ + f"question_agent: emitted deterministic relation-path question for round {round_index}", + f"context_agent: shared context path_size={len(traced_edges)} candidate={idx}", + ], + "orchestrator": { + "spawn_count": spawn_count, + "finished_subtasks": spawn_count, + "critical_steps": max(1, len(traced_edges)), + "breadth": min(cfg.swarm_v2.generator_swarm.max_breadth, spawn_count), + "depth": min(cfg.swarm_v2.generator_swarm.max_depth, 1 if len(traced_edges) <= 2 else 2), + }, + } + completion_texts.append(json.dumps(payload, sort_keys=True)) + return completion_texts + + +def _sample_swarm_v2_completion_texts_with_model( + env: OSINTEnvironment, + cfg: SelfPlayTrainingConfig, + model_name_or_path: str, + prompts: list[str], + count: int, + seen_questions: list[str], +) -> list[str]: + from transformers import AutoModelForCausalLM, AutoTokenizer + + if count <= 0: + return [] + + tokenizer = AutoTokenizer.from_pretrained(model_name_or_path) + if tokenizer.pad_token is None and tokenizer.eos_token is not None: + tokenizer.pad_token = tokenizer.eos_token + model = AutoModelForCausalLM.from_pretrained(model_name_or_path) + model.eval() + + import torch + + device = next(model.parameters()).device + completions: list[str] = [] + validator = SwarmV2ReplayValidator( + graph=env.graph, + validation=cfg.swarm_v2.validation, + shared_context=cfg.swarm_v2.shared_context, + seen_questions=seen_questions, + ) + for prompt in prompts: + if len(completions) >= count: + break + encoded = tokenizer(prompt, return_tensors="pt") + encoded = {key: value.to(device) for key, value in encoded.items()} + + best_completion = "" + best_score = -999 + for attempt_idx, (temperature, top_p) in enumerate([(0.7, 0.9), (0.5, 0.85), (0.3, 0.8)]): + with torch.no_grad(): + output = model.generate( + **encoded, + max_new_tokens=max(256, int(cfg.generator_phase.max_completion_length)), + do_sample=True, + top_p=top_p, + temperature=temperature, + num_return_sequences=1, + pad_token_id=tokenizer.eos_token_id, + ) + + completion_ids = output[0][encoded["input_ids"].shape[1] :] + completion = tokenizer.decode(completion_ids, skip_special_tokens=True) + candidate = parse_generated_task_completion( + completion, + max_support_edges=cfg.swarm_v2.validation.max_support_edges, + ) + validation = validator.validate(candidate) + score = int(bool(candidate.question)) + int(bool(candidate.answer)) + len(candidate.supporting_edges) + if validation.is_valid: + print(f"[self_play][generation_retry] valid_completion attempt={attempt_idx + 1}") + best_completion = completion + break + if score > best_score: + best_score = score + best_completion = completion + completions.append(best_completion) + return completions + + +def _materialize_swarm_v2_completions( + env: OSINTEnvironment, + cfg: SelfPlayTrainingConfig, + completion_texts: list[str], + round_index: int, + seen_questions: list[str], + prompt_canonical_candidates: list[dict[str, Any]] | None = None, +) -> tuple[list[TaskInstance], list[dict[str, Any]], list[dict[str, Any]], list[dict[str, Any]]]: + validator = SwarmV2ReplayValidator( + graph=env.graph, + validation=cfg.swarm_v2.validation, + shared_context=cfg.swarm_v2.shared_context, + seen_questions=seen_questions, + ) + + tasks: list[TaskInstance] = [] + validation_reports: list[dict[str, Any]] = [] + canonical_graph_candidates: list[dict[str, Any]] = [] + replay_traces: list[dict[str, Any]] = [] + + for completion_idx, completion_text in enumerate(completion_texts): + candidate = parse_generated_task_completion( + completion_text, + max_support_edges=cfg.swarm_v2.validation.max_support_edges, + ) + validation = validator.validate(candidate) + + use_fixed_canonical = str(cfg.canonical_graph_mode).strip().lower() == "fixed" + if use_fixed_canonical and prompt_canonical_candidates and completion_idx < len(prompt_canonical_candidates): + canonical_graph = dict(prompt_canonical_candidates[completion_idx]) + else: + if candidate.canonical_edges or candidate.canonical_nodes: + canonical_edges = list(candidate.canonical_edges or candidate.supporting_edges) + canonical_nodes = list(candidate.canonical_nodes) + if not canonical_nodes: + canonical_nodes = sorted( + {edge.src for edge in canonical_edges} | {edge.dst for edge in canonical_edges} + ) + canonical_graph = { + "nodes": canonical_nodes, + "edges": [ + { + "src": edge.src, + "rel": edge.rel, + "dst": edge.dst, + "confidence": float(edge.confidence), + } + for edge in canonical_edges + ], + } + else: + canonical_graph = build_swarm_v2_canonical_subgraph( + env.graph, + candidate.supporting_edges, + max_extra_edges=max(0, cfg.swarm_v2.shared_context.max_edges - len(candidate.supporting_edges)), + ) + + canonical_graph_candidates.append( + { + "candidate_index": completion_idx, + "canonical_graph": canonical_graph, + "question": candidate.question, + "answer": candidate.answer, + } + ) + replay_traces.append( + { + "candidate_index": completion_idx, + "question": candidate.question, + "tool_trace": [ + { + "tool_name": call.tool_name, + "args": dict(call.args), + "output": dict(call.output), + } + for call in candidate.tool_trace + ], + "replayed_edges": validation.to_dict()["replayed_edges"], + } + ) + validation_reports.append( + { + "candidate_index": completion_idx, + "question": candidate.question, + "answer": candidate.answer, + "task_type": candidate.task_type, + "validation": validation.to_dict(), + "raw_completion": completion_text, + } + ) + + if not validation.is_valid: + continue + if len(tasks) >= max(1, cfg.generated_tasks_per_round): + continue + + metadata = { + "generated_by": "swarm_v2_generator", + "round": round_index, + "difficulty": "hard", + "scenario": "swarm_v2_trace", + "canonical_graph": canonical_graph, + "tool_trace": [ + { + "tool_name": call.tool_name, + "args": dict(call.args), + "output": dict(call.output), + } + for call in candidate.tool_trace + ], + "subagent_outputs": list(candidate.subagent_outputs), + "validation": validation.to_dict(), + "shared_context_budget": { + "max_nodes": cfg.swarm_v2.shared_context.max_nodes, + "max_edges": cfg.swarm_v2.shared_context.max_edges, + "target_pressure": cfg.swarm_v2.shared_context.target_pressure, + }, + "grader": { + "type": "difficulty_exact_match", + "answer_type": "node_id", + "case_sensitive": True, + "reward_profile": "hard", + }, + } + tasks.append( + TaskInstance( + task_id=f"swarm_v2_r{round_index}_{len(tasks)}", + task_type=candidate.task_type or "swarm_v2_trace", + question=candidate.question, + answer=candidate.answer, + supporting_edges=list(validation.replayed_edges or candidate.supporting_edges), + metadata=metadata, + ) + ) + validator.remember(candidate.question) + + return tasks, validation_reports, canonical_graph_candidates, replay_traces + + +def _run_adversarial_self_play_swarm_v2( + env_config: EnvironmentConfig, + training_config: SelfPlayTrainingConfig, + dry_run: bool = False, +) -> dict[str, Any]: + effective_dry_run = bool(dry_run or training_config.dry_run) + topology = str(training_config.model_topology).strip().lower() or "dual" + phase_schedule = str(training_config.phase_schedule).strip().lower() or "generator_answerer" + tuning_mode = str(training_config.tuning_mode).strip().lower() or "full" + + run_dir = Path(training_config.output_dir) + run_dir.mkdir(parents=True, exist_ok=True) + + env = OSINTEnvironment(env_config, llm=build_llm_client(env_config.llm)) + seed_tasks = list(env.tasks) + seed_questions = [task.question for task in seed_tasks] + generator_model, answerer_model = _resolve_initial_models(training_config) + rng = random.Random(env_config.seed) + + bootstrap_completions = _fallback_swarm_v2_completion_texts( + env=env, + cfg=training_config, + round_index=0, + rng=rng, + ) + rolling_generated_tasks, _, _, _ = _materialize_swarm_v2_completions( + env=env, + cfg=training_config, + completion_texts=bootstrap_completions, + round_index=0, + seen_questions=seed_questions, + ) + if not rolling_generated_tasks: + rolling_generated_tasks = list(seed_tasks[: max(1, training_config.generated_tasks_per_round)]) + + rounds_payload: list[dict[str, Any]] = [] + + for round_index in range(1, max(1, training_config.rounds) + 1): + round_dir = run_dir / f"round_{round_index:03d}" + round_dir.mkdir(parents=True, exist_ok=True) + + answerer_pre_tasks: list[TaskInstance] = [] + answerer_pre_dataset_path: Path | None = None + answerer_pre_train_result: dict[str, Any] | None = None + + if phase_schedule == "answerer_generator_answerer": + answerer_pre_tasks = _select_answerer_tasks( + seed_tasks=seed_tasks, + generated_tasks=rolling_generated_tasks, + cfg=training_config, + rng=rng, + ) + answerer_pre_rows = _build_swarm_v2_answerer_rows(env, answerer_pre_tasks, training_config) + answerer_pre_dataset_path = round_dir / "answerer_pre_dataset.json" + _save_rows(answerer_pre_dataset_path, answerer_pre_rows) + + answerer_pre_train_result = { + "model_path": answerer_model, + "global_step": 0, + "training_loss": 0.0, + "train_rows": len(answerer_pre_rows), + "skipped": effective_dry_run, + "tuning_mode": tuning_mode, + } + + if not effective_dry_run: + answerer_pre_report_to, answerer_pre_run_name = _resolve_reporting( + training_config=training_config, + phase_name="answerer-pre", + round_index=round_index, + ) + answerer_pre_reward = AnswererRewardFunction( + graph=env.graph, + pipeline_mode="swarm_v2", + parl_max_parallel_hint=training_config.swarm_v2.answerer_swarm.max_agents, + ) + answerer_pre_train_result = _train_grpo_phase( + model_name_or_path=answerer_model, + phase=training_config.answerer_phase, + rows=answerer_pre_rows, + reward_function=answerer_pre_reward, + output_dir=round_dir / f"{training_config.answerer_phase.output_subdir}_pre", + tuning_mode=tuning_mode, + lora=training_config.lora, + report_to=answerer_pre_report_to, + run_name=answerer_pre_run_name, + ) + answerer_model = str(answerer_pre_train_result["model_path"]) + if topology == "shared": + generator_model = answerer_model + + generator_rows, prompt_canonical_candidates = _build_swarm_v2_generator_rows(env, training_config, rng) + generator_dataset_path = round_dir / "generator_dataset.json" + _save_rows(generator_dataset_path, generator_rows) + + generator_train_result: dict[str, Any] = { + "model_path": generator_model, + "global_step": 0, + "training_loss": 0.0, + "train_rows": len(generator_rows), + "skipped": effective_dry_run, + "tuning_mode": tuning_mode, + "frozen_answerer_model": answerer_model, + } + + if not effective_dry_run: + generator_report_to, generator_run_name = _resolve_reporting( + training_config=training_config, + phase_name="generator", + round_index=round_index, + ) + generator_reward = GeneratorRewardFunction( + graph=env.graph, + answerer_judge=AnswererJudge( + model_name_or_path=answerer_model, + max_new_tokens=training_config.answerer_judge_max_new_tokens, + ), + weights=training_config.generator_reward_weights, + max_support_edges=training_config.swarm_v2.validation.max_support_edges, + pipeline_mode="swarm_v2", + swarm_v2_validation=training_config.swarm_v2.validation, + swarm_v2_shared_context=training_config.swarm_v2.shared_context, + parl_max_parallel_hint=training_config.swarm_v2.generator_swarm.max_agents, + ) + generator_train_result = _train_grpo_phase( + model_name_or_path=generator_model, + phase=training_config.generator_phase, + rows=generator_rows, + reward_function=generator_reward, + output_dir=round_dir / training_config.generator_phase.output_subdir, + tuning_mode=tuning_mode, + lora=training_config.lora, + report_to=generator_report_to, + run_name=generator_run_name, + ) + generator_model = str(generator_train_result["model_path"]) + if topology == "shared": + answerer_model = generator_model + + if effective_dry_run: + completion_texts = _fallback_swarm_v2_completion_texts( + env=env, + cfg=training_config, + round_index=round_index, + rng=rng, + ) + else: + completion_texts = _sample_swarm_v2_completion_texts_with_model( + env=env, + cfg=training_config, + model_name_or_path=generator_model, + prompts=[row["prompt"] for row in generator_rows], + count=max(1, training_config.generated_tasks_per_round * 2), + seen_questions=seed_questions + [task.question for task in rolling_generated_tasks], + ) + if not completion_texts: + completion_texts = _fallback_swarm_v2_completion_texts( + env=env, + cfg=training_config, + round_index=round_index, + rng=rng, + ) + + generated_tasks, validation_reports, canonical_graph_candidates, replay_traces = _materialize_swarm_v2_completions( + env=env, + cfg=training_config, + completion_texts=completion_texts, + round_index=round_index, + seen_questions=seed_questions + [task.question for task in rolling_generated_tasks], + prompt_canonical_candidates=prompt_canonical_candidates, + ) + if not generated_tasks: + fallback_completions = _fallback_swarm_v2_completion_texts( + env=env, + cfg=training_config, + round_index=round_index, + rng=rng, + ) + generated_tasks, validation_reports, canonical_graph_candidates, replay_traces = _materialize_swarm_v2_completions( + env=env, + cfg=training_config, + completion_texts=fallback_completions, + round_index=round_index, + seen_questions=seed_questions + [task.question for task in rolling_generated_tasks], + prompt_canonical_candidates=None, + ) + + if generated_tasks: + rolling_generated_tasks = list(generated_tasks) + + canonical_graph_candidates_path = round_dir / "canonical_graph_candidates.json" + replay_traces_path = round_dir / "replay_traces.json" + validation_reports_path = round_dir / "validation_reports.json" + generated_tasks_path = round_dir / "generated_tasks.json" + _save_payload(canonical_graph_candidates_path, prompt_canonical_candidates or canonical_graph_candidates) + _save_payload(replay_traces_path, replay_traces) + _save_payload(validation_reports_path, validation_reports) + _save_tasks(generated_tasks_path, generated_tasks) + + answerer_tasks = _select_answerer_tasks( + seed_tasks=seed_tasks, + generated_tasks=generated_tasks, + cfg=training_config, + rng=rng, + ) + answerer_rows = _build_swarm_v2_answerer_rows(env, answerer_tasks, training_config) + answerer_dataset_path = round_dir / "answerer_dataset.json" + _save_rows(answerer_dataset_path, answerer_rows) + + answerer_train_result: dict[str, Any] = { + "model_path": answerer_model, + "global_step": 0, + "training_loss": 0.0, + "train_rows": len(answerer_rows), + "skipped": effective_dry_run, + "tuning_mode": tuning_mode, + } + + if not effective_dry_run: + answerer_report_to, answerer_run_name = _resolve_reporting( + training_config=training_config, + phase_name="answerer", + round_index=round_index, + ) + answerer_reward = AnswererRewardFunction( + graph=env.graph, + pipeline_mode="swarm_v2", + parl_max_parallel_hint=training_config.swarm_v2.answerer_swarm.max_agents, + ) + answerer_train_result = _train_grpo_phase( + model_name_or_path=answerer_model, + phase=training_config.answerer_phase, + rows=answerer_rows, + reward_function=answerer_reward, + output_dir=round_dir / training_config.answerer_phase.output_subdir, + tuning_mode=tuning_mode, + lora=training_config.lora, + report_to=answerer_report_to, + run_name=answerer_run_name, + ) + answerer_model = str(answerer_train_result["model_path"]) + if topology == "shared": + generator_model = answerer_model + + rounds_payload.append( + { + "round": round_index, + "dry_run": effective_dry_run, + "pipeline_mode": "swarm_v2", + "phase_schedule": phase_schedule, + "generator": generator_train_result, + "answerer": answerer_train_result, + "answerer_pre": answerer_pre_train_result, + "generated_task_count": len(generated_tasks), + "answerer_task_count": len(answerer_tasks), + "answerer_pre_task_count": len(answerer_pre_tasks), + "artifacts": { + "generator_dataset": str(generator_dataset_path), + "answerer_dataset": str(answerer_dataset_path), + "generated_tasks": str(generated_tasks_path), + "canonical_graph_candidates": str(canonical_graph_candidates_path), + "replay_traces": str(replay_traces_path), + "validation_reports": str(validation_reports_path), + "answerer_pre_dataset": str(answerer_pre_dataset_path) if answerer_pre_dataset_path else "", + }, + } + ) + + final_payload = { + "dry_run": effective_dry_run, + "pipeline_mode": "swarm_v2", + "output_dir": str(run_dir), + "model_topology": topology, + "phase_schedule": phase_schedule, + "tuning_mode": tuning_mode, + "canonical_graph_mode": str(training_config.canonical_graph_mode).strip().lower() or "generate", + "rounds": rounds_payload, + "final_models": { + "generator": generator_model, + "answerer": answerer_model, + }, + "kimi_objective_mapping": { + "grouped_rollouts": "TRL GRPO num_generations", + "mean_centered_advantage": "GRPO relative reward baseline", + "token_level_clipping": "GRPO epsilon clipping over policy ratios", + "reference_regularization": "GRPO beta KL term", + "toggle_self_play": "Alternating generator and answerer rounds", + "parallel_orchestration": "PARL-inspired auxiliary reward over generator and answerer swarms", + }, + } + + summary_path = run_dir / "self_play_summary.json" + summary_path.write_text(json.dumps(final_payload, indent=2, sort_keys=True), encoding="utf-8") + final_payload["summary_path"] = str(summary_path) + return final_payload + + + +def run_adversarial_self_play( + env_config: EnvironmentConfig, + training_config: SelfPlayTrainingConfig, + dry_run: bool = False, +) -> dict[str, Any]: + if str(training_config.pipeline_mode).strip().lower() == "swarm_v2": + return _run_adversarial_self_play_swarm_v2( + env_config=env_config, + training_config=training_config, + dry_run=dry_run, + ) + + effective_dry_run = bool(dry_run or training_config.dry_run) + topology = str(training_config.model_topology).strip().lower() or "dual" + phase_schedule = str(training_config.phase_schedule).strip().lower() or "generator_answerer" + tuning_mode = str(training_config.tuning_mode).strip().lower() or "full" + + run_dir = Path(training_config.output_dir) + run_dir.mkdir(parents=True, exist_ok=True) + + env = OSINTEnvironment(env_config, llm=build_llm_client(env_config.llm)) + seed_tasks = list(env.tasks) + + generator_model, answerer_model = _resolve_initial_models(training_config) + + rng = random.Random(env_config.seed) + rounds_payload: list[dict[str, Any]] = [] + rolling_generated_tasks = _fallback_generated_tasks( + base_tasks=seed_tasks, + round_index=0, + count=training_config.generated_tasks_per_round, + rng=rng, + ) + if not rolling_generated_tasks: + rolling_generated_tasks = list(seed_tasks[: max(1, training_config.generated_tasks_per_round)]) + + for round_index in range(1, max(1, training_config.rounds) + 1): + round_dir = run_dir / f"round_{round_index:03d}" + round_dir.mkdir(parents=True, exist_ok=True) + + answerer_pre_tasks: list[TaskInstance] = [] + answerer_pre_dataset_path: Path | None = None + answerer_pre_train_result: dict[str, Any] | None = None + + if phase_schedule == "answerer_generator_answerer": + answerer_pre_tasks = _select_answerer_tasks( + seed_tasks=seed_tasks, + generated_tasks=rolling_generated_tasks, + cfg=training_config, + rng=rng, + ) + answerer_pre_rows = _build_answerer_rows(answerer_pre_tasks) + answerer_pre_dataset_path = round_dir / "answerer_pre_dataset.json" + _save_rows(answerer_pre_dataset_path, answerer_pre_rows) + + answerer_pre_train_result = { + "model_path": answerer_model, + "global_step": 0, + "training_loss": 0.0, + "train_rows": len(answerer_pre_rows), + "skipped": effective_dry_run, + "tuning_mode": tuning_mode, + } + + if not effective_dry_run: + answerer_pre_report_to, answerer_pre_run_name = _resolve_reporting( + training_config=training_config, + phase_name="answerer-pre", + round_index=round_index, + ) + answerer_pre_reward = AnswererRewardFunction(graph=env.graph) + answerer_pre_train_result = _train_grpo_phase( + model_name_or_path=answerer_model, + phase=training_config.answerer_phase, + rows=answerer_pre_rows, + reward_function=answerer_pre_reward, + output_dir=round_dir / f"{training_config.answerer_phase.output_subdir}_pre", + tuning_mode=tuning_mode, + lora=training_config.lora, + report_to=answerer_pre_report_to, + run_name=answerer_pre_run_name, + ) + answerer_model = str(answerer_pre_train_result["model_path"]) + if topology == "shared": + generator_model = answerer_model + + generator_rows = _build_generator_rows(env=env, cfg=training_config, rng=rng) + generator_dataset_path = round_dir / "generator_dataset.json" + _save_rows(generator_dataset_path, generator_rows) + + generator_train_result: dict[str, Any] = { + "model_path": generator_model, + "global_step": 0, + "training_loss": 0.0, + "train_rows": len(generator_rows), + "skipped": effective_dry_run, + "tuning_mode": tuning_mode, + } + + if not effective_dry_run: + generator_report_to, generator_run_name = _resolve_reporting( + training_config=training_config, + phase_name="generator", + round_index=round_index, + ) + generator_reward = GeneratorRewardFunction( + graph=env.graph, + answerer_judge=AnswererJudge( + model_name_or_path=answerer_model, + max_new_tokens=training_config.answerer_judge_max_new_tokens, + ), + weights=training_config.generator_reward_weights, + max_support_edges=training_config.max_support_edges, + ) + generator_train_result = _train_grpo_phase( + model_name_or_path=generator_model, + phase=training_config.generator_phase, + rows=generator_rows, + reward_function=generator_reward, + output_dir=round_dir / training_config.generator_phase.output_subdir, + tuning_mode=tuning_mode, + lora=training_config.lora, + report_to=generator_report_to, + run_name=generator_run_name, + ) + generator_model = str(generator_train_result["model_path"]) + if topology == "shared": + answerer_model = generator_model + + generated_tasks: list[TaskInstance] + if effective_dry_run: + generated_tasks = _fallback_generated_tasks( + base_tasks=seed_tasks, + round_index=round_index, + count=training_config.generated_tasks_per_round, + rng=rng, + ) + else: + generated_tasks = _sample_generated_tasks_with_model( + model_name_or_path=generator_model, + prompts=[row["prompt"] for row in generator_rows], + round_index=round_index, + count=training_config.generated_tasks_per_round, + max_support_edges=training_config.max_support_edges, + ) + if not generated_tasks: + generated_tasks = _fallback_generated_tasks( + base_tasks=seed_tasks, + round_index=round_index, + count=training_config.generated_tasks_per_round, + rng=rng, + ) + + if generated_tasks: + rolling_generated_tasks = list(generated_tasks) + + generated_tasks_path = round_dir / "generated_tasks.json" + _save_tasks(generated_tasks_path, generated_tasks) + + answerer_tasks = _select_answerer_tasks( + seed_tasks=seed_tasks, + generated_tasks=generated_tasks, + cfg=training_config, + rng=rng, + ) + answerer_rows = _build_answerer_rows(answerer_tasks) + answerer_dataset_path = round_dir / "answerer_dataset.json" + _save_rows(answerer_dataset_path, answerer_rows) + + answerer_train_result: dict[str, Any] = { + "model_path": answerer_model, + "global_step": 0, + "training_loss": 0.0, + "train_rows": len(answerer_rows), + "skipped": effective_dry_run, + "tuning_mode": tuning_mode, + } + + if not effective_dry_run: + answerer_report_to, answerer_run_name = _resolve_reporting( + training_config=training_config, + phase_name="answerer", + round_index=round_index, + ) + answerer_reward = AnswererRewardFunction(graph=env.graph) + answerer_train_result = _train_grpo_phase( + model_name_or_path=answerer_model, + phase=training_config.answerer_phase, + rows=answerer_rows, + reward_function=answerer_reward, + output_dir=round_dir / training_config.answerer_phase.output_subdir, + tuning_mode=tuning_mode, + lora=training_config.lora, + report_to=answerer_report_to, + run_name=answerer_run_name, + ) + answerer_model = str(answerer_train_result["model_path"]) + if topology == "shared": + generator_model = answerer_model + + artifacts = _RoundArtifacts( + round_index=round_index, + generator_dataset_path=str(generator_dataset_path), + answerer_dataset_path=str(answerer_dataset_path), + generated_tasks_path=str(generated_tasks_path), + ) + + rounds_payload.append( + { + "round": round_index, + "dry_run": effective_dry_run, + "pipeline_mode": "legacy", + "phase_schedule": phase_schedule, + "generator": generator_train_result, + "answerer": answerer_train_result, + "answerer_pre": answerer_pre_train_result, + "generated_task_count": len(generated_tasks), + "answerer_task_count": len(answerer_tasks), + "answerer_pre_task_count": len(answerer_pre_tasks), + "artifacts": { + "generator_dataset": artifacts.generator_dataset_path, + "answerer_dataset": artifacts.answerer_dataset_path, + "generated_tasks": artifacts.generated_tasks_path, + "answerer_pre_dataset": str(answerer_pre_dataset_path) if answerer_pre_dataset_path else "", + }, + } + ) + + final_payload = { + "dry_run": effective_dry_run, + "pipeline_mode": "legacy", + "output_dir": str(run_dir), + "model_topology": topology, + "phase_schedule": phase_schedule, + "tuning_mode": tuning_mode, + "canonical_graph_mode": str(training_config.canonical_graph_mode).strip().lower() or "generate", + "rounds": rounds_payload, + "final_models": { + "generator": generator_model, + "answerer": answerer_model, + }, + "kimi_objective_mapping": { + "grouped_rollouts": "TRL GRPO num_generations", + "mean_centered_advantage": "GRPO relative reward baseline", + "token_level_clipping": "GRPO epsilon clipping over policy ratios", + "reference_regularization": "GRPO beta KL term", + "toggle_self_play": "Alternating generator and answerer rounds", + }, + } + + summary_path = run_dir / "self_play_summary.json" + summary_path.write_text(json.dumps(final_payload, indent=2, sort_keys=True), encoding="utf-8") + final_payload["summary_path"] = str(summary_path) + + return final_payload diff --git a/src/osint_env/validation.py b/src/osint_env/validation.py new file mode 100644 index 0000000000000000000000000000000000000000..8f3e6a230805220309252a3823f72f73820640bc --- /dev/null +++ b/src/osint_env/validation.py @@ -0,0 +1,316 @@ +from __future__ import annotations + +import json +import tempfile +from dataclasses import asdict, dataclass +from pathlib import Path +from types import SimpleNamespace +from typing import Any + +from fastapi.testclient import TestClient + +from server import app +from osint_env.baselines.openai_runner import OpenAIBaselineConfig, OpenAIBaselineRunner, build_action_tools +from osint_env.config import clone_environment_config, load_seeding_config, load_shared_config +from osint_env.env.environment import OSINTEnvironment +from osint_env.env.openenv_compat import Env +from osint_env.env.reward import compute_answer_reward + + +README_PATH = Path("README.md") +DOCKERFILE_PATH = Path("Dockerfile") +OPENENV_SPEC_PATH = Path("openenv.yaml") +SHARED_CONFIG_PATH = "datasets/fixed_levels/shared_config_fixed_levels.json" +SEED_FILE_PATH = "datasets/fixed_levels/seed_fixed_levels.json" + + +@dataclass(slots=True) +class ValidationResult: + name: str + passed: bool + details: dict[str, Any] + + +def _build_environment() -> OSINTEnvironment: + shared = load_shared_config(SHARED_CONFIG_PATH) + env_cfg = clone_environment_config(shared.environment) + env_cfg.seeding = load_seeding_config(SEED_FILE_PATH) + env_cfg.llm.provider = "mock" + return OSINTEnvironment(env_cfg) + + +def check_hf_space_readiness() -> ValidationResult: + text = README_PATH.read_text(encoding="utf-8") + has_sdk = "sdk: docker" in text + has_port = "app_port: 7860" in text + has_openenv_tag = "- openenv" in text + client = TestClient(app) + health = client.get("/healthz") + dashboard = client.get("/api/environment") + spec = client.get("/openenv.yaml") + passed = all( + [ + README_PATH.exists(), + DOCKERFILE_PATH.exists(), + OPENENV_SPEC_PATH.exists(), + has_sdk, + has_port, + has_openenv_tag, + health.status_code == 200, + dashboard.status_code == 200, + spec.status_code == 200, + ] + ) + return ValidationResult( + name="hf_space_readiness", + passed=passed, + details={ + "readme_exists": README_PATH.exists(), + "dockerfile_exists": DOCKERFILE_PATH.exists(), + "openenv_spec_exists": OPENENV_SPEC_PATH.exists(), + "has_sdk_docker": has_sdk, + "has_app_port": has_port, + "has_openenv_tag": has_openenv_tag, + "healthz_status": health.status_code, + "environment_status": dashboard.status_code, + "openenv_spec_status": spec.status_code, + }, + ) + + +def check_openenv_spec_compliance() -> ValidationResult: + env = _build_environment() + obs = env.reset() + client = TestClient(app) + reset = client.post("/openenv/reset", json={"task_index": 0}) + step = client.post( + "/openenv/step", + json={ + "session_id": reset.json()["session_id"] if reset.status_code == 200 else "", + "action_type": "ANSWER", + "payload": {"answer": "unknown"}, + }, + ) + state = client.get(f"/openenv/state/{reset.json()['session_id']}") if reset.status_code == 200 else None + passed = all( + [ + isinstance(env, Env), + hasattr(env, "reset"), + hasattr(env, "step"), + env.name == "OSINTEnvironment", + env.state_space == "json-observation", + env.action_space == ["CALL_TOOL", "ADD_EDGE", "ANSWER"], + env.episode_max_length == env.config.max_steps, + isinstance(obs.task, dict), + "question" in obs.task, + reset.status_code == 200, + step.status_code == 200, + state is not None and state.status_code == 200, + ] + ) + return ValidationResult( + name="openenv_spec_compliance", + passed=passed, + details={ + "env_class": type(env).__name__, + "state_space": env.state_space, + "action_space": list(env.action_space), + "episode_max_length": env.episode_max_length, + "task_keys": sorted(obs.task.keys()), + "reset_status": reset.status_code, + "step_status": step.status_code, + "state_status": 0 if state is None else state.status_code, + }, + ) + + +class _FakeMessage: + def __init__(self, answer: str): + self.content = "" + self.tool_calls = [ + SimpleNamespace( + id="fake_tool_call_0", + function=SimpleNamespace(name="submit_answer", arguments=json.dumps({"answer": answer})), + ) + ] + + +class _FakeCompletion: + def __init__(self, answer: str): + self.choices = [SimpleNamespace(message=_FakeMessage(answer))] + self.usage = SimpleNamespace(prompt_tokens=0, completion_tokens=0, total_tokens=0) + self.system_fingerprint = "validation_fp" + + +class _FakeChatCompletions: + def create(self, **kwargs: Any) -> _FakeCompletion: + messages = list(kwargs.get("messages", [])) + initial_observation = {} + for message in messages: + if message.get("role") == "user": + try: + initial_observation = json.loads(message.get("content", "{}")) + except json.JSONDecodeError: + initial_observation = {} + break + task_id = ((initial_observation.get("task") or {}).get("task_id")) or "" + env = _build_environment() + task = next((task for task in env.tasks if task.task_id == task_id), None) + answer = task.answer if task is not None else "unknown" + return _FakeCompletion(answer) + + +class _FakeOpenAIClient: + def __init__(self) -> None: + self.chat = SimpleNamespace(completions=_FakeChatCompletions()) + + +def _run_fake_baseline_once(output_dir: Path) -> dict[str, Any]: + config = OpenAIBaselineConfig( + api_key="validation", + episodes=3, + max_steps=4, + append_leaderboard=False, + output_path=str(output_dir / "baseline.json"), + dashboard_path=str(output_dir / "baseline.html"), + leaderboard_path=str(output_dir / "leaderboard.json"), + run_name="validation_baseline", + ) + runner = OpenAIBaselineRunner.__new__(OpenAIBaselineRunner) + runner.config = config + runner.client = _FakeOpenAIClient() + runner.tools = build_action_tools() + return runner.run() + + +def check_baseline_reproducibility() -> ValidationResult: + with tempfile.TemporaryDirectory() as left_dir_name, tempfile.TemporaryDirectory() as right_dir_name: + left = _run_fake_baseline_once(Path(left_dir_name)) + right = _run_fake_baseline_once(Path(right_dir_name)) + + left_signature = { + "summary": left["summary"], + "episodes": [ + { + "task_id": episode["task_id"], + "task_answer": episode["task_answer"], + "agent_answer": episode["agent_answer"], + "success": episode["success"], + "steps": episode["steps"], + } + for episode in left["episodes"] + ], + } + right_signature = { + "summary": right["summary"], + "episodes": [ + { + "task_id": episode["task_id"], + "task_answer": episode["task_answer"], + "agent_answer": episode["agent_answer"], + "success": episode["success"], + "steps": episode["steps"], + } + for episode in right["episodes"] + ], + } + passed = left_signature == right_signature + return ValidationResult( + name="baseline_reproducibility", + passed=passed, + details={ + "episodes_checked": len(left_signature["episodes"]), + "left_signature": left_signature, + "right_signature": right_signature, + }, + ) + + +def check_task_and_grader_coverage() -> ValidationResult: + env = _build_environment() + tasks = env.tasks + grader_checks: list[dict[str, Any]] = [] + distinct_types = sorted({str(task.task_type) for task in tasks}) + difficulty_buckets: dict[str, Any] = {} + for idx, task in enumerate(tasks): + token = str((task.metadata or {}).get("difficulty", "")).strip().lower() + if token in {"mid", "m"}: + token = "medium" + if token in {"high", "h"}: + token = "hard" + if token not in {"easy", "medium", "hard"}: + if idx < 10: + token = "easy" + elif idx < 20: + token = "medium" + else: + token = "hard" + difficulty_buckets.setdefault(token, task) + + for difficulty in ["easy", "medium", "hard"]: + task = difficulty_buckets.get(difficulty) + if task is None: + continue + correct = compute_answer_reward( + proposed_answer=task.answer, + task=task, + pred_edges=list(task.supporting_edges), + tool_outputs=[], + step_count=1, + model=env.reward_model, + difficulty=difficulty, + ) + wrong = compute_answer_reward( + proposed_answer="unknown", + task=task, + pred_edges=[], + tool_outputs=[], + step_count=1, + model=env.reward_model, + difficulty=difficulty, + ) + grader = dict(task.metadata.get("grader", {})) if isinstance(task.metadata, dict) else {} + grader_checks.append( + { + "difficulty": difficulty, + "task_id": task.task_id, + "task_type": task.task_type, + "support_edges": len(task.supporting_edges), + "has_grader": bool(grader), + "correct_reward": correct.total, + "wrong_reward": wrong.total, + "grader_prefers_correct": correct.total > wrong.total, + } + ) + passed = ( + len(tasks) >= 3 + and len(distinct_types) >= 3 + and len(grader_checks) >= 3 + and all( + row["support_edges"] > 0 and row["grader_prefers_correct"] and row["has_grader"] + for row in grader_checks + ) + ) + return ValidationResult( + name="task_and_grader_coverage", + passed=passed, + details={ + "task_count": len(tasks), + "distinct_task_types": distinct_types, + "grader_checks": grader_checks, + }, + ) + + +def run_validation_suite() -> dict[str, Any]: + results = [ + check_hf_space_readiness(), + check_openenv_spec_compliance(), + check_baseline_reproducibility(), + check_task_and_grader_coverage(), + ] + passed = all(result.passed for result in results) + return { + "passed": passed, + "checks": [asdict(result) for result in results], + } diff --git a/src/osint_env/viz/__init__.py b/src/osint_env/viz/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..0c98ce2d59d35418cfb871ff0eb5d9c72a3e19c6 --- /dev/null +++ b/src/osint_env/viz/__init__.py @@ -0,0 +1,3 @@ +from osint_env.viz.dashboard import export_dashboard + +__all__ = ["export_dashboard"] diff --git a/src/osint_env/viz/dashboard.py b/src/osint_env/viz/dashboard.py new file mode 100644 index 0000000000000000000000000000000000000000..3e033381d99306acdbc7a2a2cb12e7574135b1c5 --- /dev/null +++ b/src/osint_env/viz/dashboard.py @@ -0,0 +1,801 @@ +from __future__ import annotations + +import json +from pathlib import Path +from typing import Any + +from osint_env.data.generator import PlatformViews +from osint_env.domain.models import CanonicalGraph, Edge, TaskInstance +from osint_env.env.environment import OSINTEnvironment + + +def _safe_label(value: str, fallback: str) -> str: + text = str(value).strip() + return text if text else fallback + + +def _canonical_graph_payload(graph: CanonicalGraph) -> dict[str, Any]: + nodes = [] + for node in graph.nodes.values(): + attrs = node.attrs or {} + title = "\\n".join(f"{k}: {v}" for k, v in attrs.items()) + label = _safe_label(str(attrs.get("name") or attrs.get("handle") or node.node_id), node.node_id) + nodes.append( + { + "id": node.node_id, + "label": label, + "group": str(node.node_type.value), + "title": title, + "attrs": attrs, + } + ) + + edges = [] + for idx, edge in enumerate(graph.edges): + edges.append( + { + "id": f"c_{idx}", + "from": edge.src, + "to": edge.dst, + "label": edge.rel, + "arrows": "to", + "color": "#1f2937", + "width": 1, + "confidence": float(edge.confidence), + "status": "canonical", + } + ) + return {"nodes": nodes, "edges": edges} + + +def _edge_key(edge: Edge) -> tuple[str, str, str]: + return (edge.src, edge.rel, edge.dst) + + +def _episode_graph_payload(pred_edges: list[Edge], truth_edges: list[Edge], graph: CanonicalGraph) -> dict[str, Any]: + pred = {_edge_key(e): e for e in pred_edges} + truth = {_edge_key(e): e for e in truth_edges} + + all_nodes = set() + all_keys = set(pred) | set(truth) + for src, _, dst in all_keys: + all_nodes.add(src) + all_nodes.add(dst) + + nodes = [] + for node_id in sorted(all_nodes): + node = graph.nodes.get(node_id) + if node is None: + nodes.append({"id": node_id, "label": node_id, "group": "episode", "attrs": {}}) + continue + attrs = node.attrs or {} + label = _safe_label(str(attrs.get("name") or attrs.get("handle") or node_id), node_id) + nodes.append({"id": node_id, "label": label, "group": str(node.node_type.value), "attrs": attrs}) + + edges = [] + for idx, key in enumerate(sorted(all_keys)): + src, rel, dst = key + in_pred = key in pred + in_truth = key in truth + if in_pred and in_truth: + color = "#16a34a" + dashes = False + status = "matched" + elif in_pred: + color = "#2563eb" + dashes = False + status = "pred_only" + else: + color = "#f59e0b" + dashes = True + status = "truth_only" + edges.append( + { + "id": f"e_{idx}", + "from": src, + "to": dst, + "label": rel, + "arrows": "to", + "color": color, + "dashes": dashes, + "width": 2, + "status": status, + "confidence": float((pred.get(key) or truth.get(key) or Edge(src, rel, dst)).confidence), + } + ) + + return {"nodes": nodes, "edges": edges} + + +def _views_payload(views: PlatformViews) -> dict[str, Any]: + return { + "microblog_posts": views.microblog_posts, + "forum_threads": views.forum_threads, + "profiles": views.profiles, + } + + +def _leaderboard_payload(records: list[dict[str, Any]]) -> list[dict[str, Any]]: + ranked = sorted(records, key=lambda r: float(r.get("metrics", {}).get("leaderboard_score", 0.0)), reverse=True) + return ranked[:200] + + +def export_dashboard( + env: OSINTEnvironment, + evaluation: dict[str, Any], + leaderboard_records: list[dict[str, Any]], + output_path: str, +) -> str: + summary = evaluation.get("summary", evaluation) + episodes = evaluation.get("episodes", []) + + task: TaskInstance | None = env.state.task if env.state else None + truth_edges = task.supporting_edges if task else [] + pred_edges = env.memory_graph.edges if env.state else [] + + episode_graphs: list[dict[str, Any]] = [] + for episode in episodes: + pred_from_eval = [Edge(str(e.get("src", "")), str(e.get("rel", "")), str(e.get("dst", "")), float(e.get("confidence", 1.0))) for e in episode.get("pred_edges", []) if isinstance(e, dict)] + truth_from_eval = [Edge(str(e.get("src", "")), str(e.get("rel", "")), str(e.get("dst", "")), float(e.get("confidence", 1.0))) for e in episode.get("truth_edges", []) if isinstance(e, dict)] + if pred_from_eval or truth_from_eval: + episode_graphs.append(_episode_graph_payload(pred_from_eval, truth_from_eval, env.graph)) + + if not episode_graphs: + episode_graphs.append(_episode_graph_payload(pred_edges, truth_edges, env.graph)) + + payload = { + "summary": summary, + "episodes": episodes, + "leaderboard": _leaderboard_payload(leaderboard_records), + "canonical_graph": _canonical_graph_payload(env.graph), + "episode_graphs": episode_graphs, + "episode_graph": episode_graphs[-1], + "views": _views_payload(env.views), + "task": { + "task_id": task.task_id if task else "n/a", + "task_type": task.task_type if task else "n/a", + "question": task.question if task else "n/a", + "answer": task.answer if task else "n/a", + }, + } + + html = f""" + + + + + OSINT Environment Dashboard + + + + + + + + + +
    +
    +
    +

    OSINT Benchmark Dashboard

    +

    Interactive explorer for canonical knowledge graph, episode traces, source platform records, and benchmark ranking.

    +
    +
    +
    +
    +

    Episode Explorer

    +
    + + +
    +
    + + +
    +
    Task ID:
    +
    Task Type:
    +
    Question
    +
    +
    Ground Truth Answer:
    +
    Agent Answer:
    +
    Correct:
    +
    +
    + +
    +
    +
    +

    Graph Controls

    +
    + + + + + + + +
    +
    +
    +

    Node Types

    +
    +
    +
    + +
    +

    Graph Explorer

    +
    +
    Layer: Canonical Graph
    +
    +
    +
    + matched edge + predicted only + truth only +
    +
    + +
    +
    +

    Node Inspector

    +
    Click a node to inspect attributes and neighbors.
    +
    +
    +

    Edge Inspector

    +
    Click an edge to inspect relation details.
    +
    +
    +
    + +
    +
    +

    Original Database Explorer

    +
    +
    + + +
    +
    +
    + +
    +

    Selected Source Record

    +
    Click a row in the database table to inspect full JSON.
    +
    +
    + +
    +
    +

    Benchmark Summary Radar

    +
    +
    +
    +

    Episode Reward and Graph F1

    +
    +
    +
    + +
    +

    Benchmark Leaderboard

    +
    + + +
    +
    +
    +
    + + + + +""" + + out = Path(output_path) + out.parent.mkdir(parents=True, exist_ok=True) + out.write_text(html, encoding="utf-8") + return str(out) diff --git a/tests/conftest.py b/tests/conftest.py new file mode 100644 index 0000000000000000000000000000000000000000..c4b4171d9ef25fa2362b246ff84caed0e31ff8fb --- /dev/null +++ b/tests/conftest.py @@ -0,0 +1,12 @@ +from __future__ import annotations + +import sys +from pathlib import Path + + +ROOT = Path(__file__).resolve().parents[1] +SRC = ROOT / "src" + +if str(SRC) not in sys.path: + sys.path.insert(0, str(SRC)) + diff --git a/tests/test_cli_eval_outputs.py b/tests/test_cli_eval_outputs.py new file mode 100644 index 0000000000000000000000000000000000000000..ba83a9036f0fd5c32a193608c737baeb33cb18ef --- /dev/null +++ b/tests/test_cli_eval_outputs.py @@ -0,0 +1,103 @@ +from __future__ import annotations + +import argparse + +from osint_env import cli +from osint_env.domain.models import EnvironmentConfig + + +class _DummyParser: + def __init__(self, namespace: argparse.Namespace): + self._namespace = namespace + + def parse_args(self) -> argparse.Namespace: + return self._namespace + + +class _DummyEnv: + def __init__(self, config: EnvironmentConfig, llm=None): + self.config = config + self.llm = llm + + +def test_eval_exports_dashboard_and_evaluation(monkeypatch, tmp_path, capsys): + dashboard_path = tmp_path / "eval_dashboard.html" + eval_path = tmp_path / "latest_evaluation.json" + + args = argparse.Namespace( + cmd="eval", + episodes=1, + leaderboard="", + dashboard="", + dashboard_dir="", + evaluation="", + ) + + runtime = { + "default_episodes": 20, + "leaderboard_path": str(tmp_path / "leaderboard.json"), + "dashboard_path": str(dashboard_path), + "sweep_dashboard_dir": str(tmp_path / "sweep"), + } + + evaluation_payload = { + "summary": { + "avg_reward": 0.5, + "avg_graph_f1": 0.4, + "task_success_rate": 1.0, + "tool_efficiency": 0.7, + "avg_steps_to_solution": 3.0, + "deanonymization_accuracy": 1.0, + "leaderboard_score": 0.8, + }, + "episodes": [ + { + "task_id": "metaqa_1-hop_train_0", + "task_type": "metaqa_1-hop", + "question": "who directed [inception]?", + "task_answer": "christopher nolan", + "agent_answer": "christopher nolan", + "graph_f1": 1.0, + "reward": 1.0, + "steps": 2, + "tool_calls": 1, + "success": 1, + } + ], + } + + calls: dict[str, object] = {} + + monkeypatch.setattr(cli, "build_parser", lambda: _DummyParser(args)) + monkeypatch.setattr(cli, "_resolve_environment_config", lambda _args: (EnvironmentConfig(), runtime)) + monkeypatch.setattr(cli, "build_llm_client", lambda _cfg: object()) + monkeypatch.setattr(cli, "OSINTEnvironment", _DummyEnv) + monkeypatch.setattr(cli, "run_evaluation", lambda env, episodes, return_details, llm: evaluation_payload) + + def _save(path: str, payload: dict) -> None: + calls["save_path"] = path + calls["save_payload"] = payload + + def _export(env, evaluation, leaderboard_records, output_path): + calls["export_output_path"] = output_path + calls["export_eval"] = evaluation + calls["export_leaderboard"] = leaderboard_records + return output_path + + monkeypatch.setattr(cli, "_save_evaluation", _save) + monkeypatch.setattr(cli, "load_leaderboard", lambda _path: []) + monkeypatch.setattr(cli, "export_dashboard", _export) + + monkeypatch.setattr(cli, "DEFAULT_EVALUATION_PATH", str(eval_path)) + + cli.main() + + assert calls["save_path"] == str(eval_path) + assert calls["save_payload"] == evaluation_payload + assert calls["export_output_path"] == str(dashboard_path) + assert calls["export_eval"] == evaluation_payload + assert calls["export_leaderboard"] == [] + + output = capsys.readouterr().out + assert '"avg_reward": 0.5' in output + assert '"episodes"' not in output diff --git a/tests/test_config.py b/tests/test_config.py new file mode 100644 index 0000000000000000000000000000000000000000..f08353a93f0dc6721e499c0a7362e2809312ca20 --- /dev/null +++ b/tests/test_config.py @@ -0,0 +1,87 @@ +import json +from pathlib import Path + +from osint_env.config.shared import load_seeding_config, load_shared_config + + +def test_shared_config_defaults_when_file_missing(): + config = load_shared_config("/tmp/does_not_exist_for_osint_config.json") + assert config.environment.max_steps > 0 + assert config.runtime.default_episodes > 0 + + +def test_shared_config_parses_swarm_and_seeding(tmp_path: Path): + path = tmp_path / "shared.json" + path.write_text( + json.dumps( + { + "environment": {"seed": 19, "max_steps": 9}, + "dataset": { + "mode": "metaqa", + "metaqa_root": "metaQA", + "metaqa_kb_path": "metaQA/kb.txt", + "metaqa_variant": "vanilla", + "metaqa_hops": ["1-hop", "2-hop", "3-hop"], + "metaqa_splits": ["train", "dev", "test"], + }, + "swarm": {"enabled": True, "max_agents": 3, "max_breadth": 2, "max_width": 2, "max_depth": 2}, + "seeding": { + "seeded_questions": [ + { + "question": "Which canonical user owns alias alias_seed_001?", + "answer": "user_seed_001", + } + ], + "llm_generation_parallel": True, + "llm_generation_workers": 4, + "llm_generation_retries": 3, + "allow_template_fallback_on_llm_failure": False + }, + "runtime": {"default_episodes": 5}, + "llm": {"provider": "ollama", "model": "qwen3:2b", "timeout_seconds": 333}, + } + ), + encoding="utf-8", + ) + + config = load_shared_config(path) + assert config.environment.seed == 19 + assert config.environment.swarm.enabled is True + assert config.environment.swarm.max_width == 2 + assert config.environment.dataset_mode == "metaqa" + assert config.environment.metaqa_root == "metaQA" + assert config.environment.metaqa_kb_path == "metaQA/kb.txt" + assert config.environment.metaqa_variant == "vanilla" + assert config.environment.metaqa_hops == ["1-hop", "2-hop", "3-hop"] + assert config.environment.metaqa_splits == ["train", "dev", "test"] + assert len(config.environment.seeding.seeded_questions) == 1 + assert config.runtime.default_episodes == 5 + assert config.environment.llm.provider == "ollama" + assert config.environment.llm.model == "qwen3:2b" + assert config.environment.llm.timeout_seconds == 333 + assert config.environment.seeding.llm_generation_parallel is True + assert config.environment.seeding.llm_generation_workers == 4 + assert config.environment.seeding.llm_generation_retries == 3 + assert config.environment.seeding.allow_template_fallback_on_llm_failure is False + + +def test_load_seeding_config_supports_top_level_object(tmp_path: Path): + path = tmp_path / "seeding.json" + path.write_text( + json.dumps( + { + "seeded_nodes": [ + {"node_id": "alias_seed_1", "node_type": "alias", "attrs": {"handle": "@seed"}}, + {"node_id": "user_seed_1", "node_type": "user", "attrs": {"name": "Seed"}}, + ], + "seeded_edges": [{"src": "alias_seed_1", "rel": "alias_of", "dst": "user_seed_1"}], + "seeded_questions": [{"question": "Which canonical user owns alias alias_seed_1?", "answer": "user_seed_1"}], + } + ), + encoding="utf-8", + ) + + seeding = load_seeding_config(path) + assert len(seeding.seeded_nodes) == 2 + assert len(seeding.seeded_edges) == 1 + assert seeding.seeded_questions[0].answer == "user_seed_1" diff --git a/tests/test_dashboard.py b/tests/test_dashboard.py new file mode 100644 index 0000000000000000000000000000000000000000..8b396dae7e7b600524d9bc4691fb678940820fae --- /dev/null +++ b/tests/test_dashboard.py @@ -0,0 +1,26 @@ +from pathlib import Path + +from osint_env.domain.models import EnvironmentConfig +from osint_env.env.environment import OSINTEnvironment +from osint_env.viz import export_dashboard + + +def test_dashboard_export(tmp_path: Path): + env = OSINTEnvironment(EnvironmentConfig(seed=9, n_users=14)) + env.reset() + + out = tmp_path / "dashboard.html" + path = export_dashboard( + env=env, + evaluation={"summary": {"leaderboard_score": 0.0, "task_success_rate": 0.0, "avg_graph_f1": 0.0, "tool_efficiency": 0.0, "deanonymization_accuracy": 0.0, "avg_reward": 0.0}, "episodes": []}, + leaderboard_records=[], + output_path=str(out), + ) + + assert path.endswith("dashboard.html") + text = out.read_text(encoding="utf-8") + assert "OSINT Benchmark Dashboard" in text + assert "Canonical Graph" in text + assert "Original Database Explorer" in text + assert "Benchmark Leaderboard" in text + assert "Episode Explorer" in text diff --git a/tests/test_environment.py b/tests/test_environment.py new file mode 100644 index 0000000000000000000000000000000000000000..1a36bd9e6c0404640691a90868f6152abcb13a7d --- /dev/null +++ b/tests/test_environment.py @@ -0,0 +1,39 @@ +from osint_env.domain.models import Action, ActionType, EnvironmentConfig +from osint_env.env.environment import OSINTEnvironment + + +def test_episode_flow(): + env = OSINTEnvironment(EnvironmentConfig(max_steps=5, seed=5)) + obs = env.reset() + assert "question" in obs.task + assert isinstance(obs.task.get("grader"), dict) + assert "type" in obs.task["grader"] + obs, r1, done, _ = env.step(Action(ActionType.CALL_TOOL, {"tool_name": "search_posts", "args": {"query": "Update"}})) + assert done is False + assert isinstance(r1, float) + _, r2, done, info = env.step(Action(ActionType.ANSWER, {"answer": "unknown"})) + assert done is True + assert "total_reward" in info + assert isinstance(r2, float) + + +def test_search_memory_tool_returns_results_after_tool_use(): + env = OSINTEnvironment(EnvironmentConfig(max_steps=6, seed=5)) + env.reset() + env.step(Action(ActionType.CALL_TOOL, {"tool_name": "search_posts", "args": {"query": "Update"}})) + obs, reward, done, _ = env.step( + Action(ActionType.CALL_TOOL, {"tool_name": "search_memory", "args": {"query": "Update", "k": 3}}) + ) + assert done is False + assert isinstance(reward, float) + assert obs.tool_outputs[-1]["tool"] == "search_memory" + assert obs.tool_outputs[-1]["output"]["count"] >= 1 + + +def test_invalid_tool_call_does_not_crash_episode(): + env = OSINTEnvironment(EnvironmentConfig(max_steps=4, seed=8)) + env.reset() + _, reward, done, info = env.step(Action(ActionType.CALL_TOOL, {"tool_name": "no_such_tool", "args": {}})) + assert done is False + assert reward < 0 + assert "invalid_tool_penalty" in info["reward_components"] diff --git a/tests/test_eval.py b/tests/test_eval.py new file mode 100644 index 0000000000000000000000000000000000000000..ad29cfb3eb9f633214a478667fa9dbbb9bb1d036 --- /dev/null +++ b/tests/test_eval.py @@ -0,0 +1,33 @@ +from osint_env.domain.models import EnvironmentConfig, SwarmConfig +from osint_env.env.environment import OSINTEnvironment +from osint_env.eval.runner import run_evaluation + + +def test_eval_runner(): + env = OSINTEnvironment(EnvironmentConfig(seed=17)) + result = run_evaluation(env, episodes=3) + assert "task_success_rate" in result + assert "deanonymization_accuracy" in result + assert "leaderboard_score" in result + assert "avg_knowledge_indexing_reward" in result + + +def test_eval_runner_swarm_mode(): + env = OSINTEnvironment( + EnvironmentConfig(seed=17, swarm=SwarmConfig(enabled=True, max_agents=3, max_breadth=2, max_width=2, max_depth=2)) + ) + result = run_evaluation(env, episodes=2) + assert "spawn_signal" in result + assert "avg_spawn_count" in result + + +def test_eval_runner_details_include_episode_answers(): + env = OSINTEnvironment(EnvironmentConfig(seed=17)) + result = run_evaluation(env, episodes=2, return_details=True) + assert "episodes" in result + assert len(result["episodes"]) == 2 + + row = result["episodes"][0] + assert "question" in row + assert "task_answer" in row + assert "agent_answer" in row diff --git a/tests/test_fixed_levels_dataset.py b/tests/test_fixed_levels_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..50e834694b01613c20a9c176f4b41b02dc7d99fb --- /dev/null +++ b/tests/test_fixed_levels_dataset.py @@ -0,0 +1,18 @@ +import json +from collections import Counter +from pathlib import Path + + +def test_fixed_levels_seed_has_30_questions_and_target_node_spans(): + path = Path("datasets/fixed_levels/seed_fixed_levels.json") + payload = json.loads(path.read_text(encoding="utf-8")) + questions = payload["seeding"]["seeded_questions"] + + counts = Counter(q["metadata"]["difficulty"] for q in questions) + assert counts == {"easy": 10, "mid": 10, "high": 10} + + mid_support_nodes = [int(q["metadata"]["support_nodes"]) for q in questions if q["metadata"]["difficulty"] == "mid"] + high_support_nodes = [int(q["metadata"]["support_nodes"]) for q in questions if q["metadata"]["difficulty"] == "high"] + + assert all(15 <= value <= 20 for value in mid_support_nodes) + assert all(48 <= value <= 55 for value in high_support_nodes) diff --git a/tests/test_generator.py b/tests/test_generator.py new file mode 100644 index 0000000000000000000000000000000000000000..8e603f4b4d9da3dd184371ed14afca8a6a9d9c2d --- /dev/null +++ b/tests/test_generator.py @@ -0,0 +1,164 @@ +import json +import re +from threading import Lock + +from osint_env.data.generator import ( + DatasetGenerator, + build_swarm_v2_canonical_subgraph, + build_swarm_v2_path_candidates, + build_swarm_v2_tool_trace, + emit_swarm_v2_question, + select_swarm_v2_answer, + trace_swarm_v2_path, +) +from osint_env.domain.models import EnvironmentConfig +from osint_env.llm.interface import LLMResponse + + +class SharedContextLLM: + def __init__(self): + self.prompts: list[str] = [] + self._lock = Lock() + + def generate(self, messages, tools): + prompt = str(messages[0].get("content", "")) if messages else "" + with self._lock: + self.prompts.append(prompt) + + if "SEED_GRAPH_EXPANSION_AGENT" in prompt: + worker_match = re.search(r"worker_id:\s*(\d+)", prompt) + worker_idx = int(worker_match.group(1)) if worker_match else 0 + payload = { + "edges": [ + { + "src": "user_0", + "rel": f"llm_rel_{worker_idx}", + "dst": "user_1", + "confidence": 0.9, + } + ] + } + return LLMResponse(content=json.dumps(payload), tool_calls=[]) + + if "SEED_TASK_EXPANSION_AGENT" in prompt: + worker_match = re.search(r"worker_id:\s*(\d+)", prompt) + worker_idx = int(worker_match.group(1)) if worker_match else 0 + budget_match = re.search(r"task_budget:\s*(\d+)", prompt) + task_budget = int(budget_match.group(1)) if budget_match else 1 + tasks = [] + for local_idx in range(max(1, task_budget)): + tasks.append( + { + "task_type": "identity_resolution", + "question": f"Which canonical user is tied to alias alias_seed_{worker_idx}_{local_idx}?", + "answer": "user_1", + "supporting_edges": [ + { + "src": "alias_seed_0", + "rel": "alias_of", + "dst": "user_1", + "confidence": 0.95, + } + ], + } + ) + payload = {"tasks": tasks} + return LLMResponse(content=json.dumps(payload), tool_calls=[]) + + return LLMResponse(content="{}", tool_calls=[]) + + +def test_generator_outputs(): + gen = DatasetGenerator(EnvironmentConfig(n_users=20, seed=11)) + graph = gen.build_canonical_graph() + views = gen.build_platform_views(graph) + tasks = gen.generate_tasks(graph, views, count=5) + assert len(graph.nodes) >= 20 + assert len(views.microblog_posts) >= 20 + assert len(tasks) == 5 + + +def test_seeded_views_include_seeded_posts_and_threads(): + from osint_env.config import clone_environment_config, load_seeding_config, load_shared_config + + shared = load_shared_config("datasets/fixed_levels/shared_config_fixed_levels.json") + cfg = clone_environment_config(shared.environment) + cfg.seeding = load_seeding_config("datasets/fixed_levels/seed_fixed_levels.json") + cfg.llm.provider = "mock" + + gen = DatasetGenerator(cfg) + graph = gen.build_canonical_graph() + views = gen.build_platform_views(graph) + + seeded_post = next((post for post in views.microblog_posts if post["post_id"] == "post_midnight_manifest"), None) + seeded_thread = next((thread for thread in views.forum_threads if thread["thread_id"] == "thr_supply_leak"), None) + + assert seeded_post is not None + assert "loc_dockyard17" in seeded_post["references"] + assert seeded_thread is not None + assert "org_northbridge_logistics" in seeded_thread["references"] + + +def test_graph_generation_uses_parallel_shared_context_workers(): + cfg = EnvironmentConfig(n_users=12, seed=9) + cfg.seeding.llm_generate_remaining_graph = True + cfg.seeding.llm_generated_edge_budget = 4 + cfg.seeding.llm_generate_remaining_tasks = False + cfg.seeding.llm_generation_parallel = True + cfg.seeding.llm_generation_workers = 3 + cfg.seeding.llm_generation_retries = 1 + cfg.seeding.allow_template_fallback_on_llm_failure = False + + llm = SharedContextLLM() + gen = DatasetGenerator(cfg, llm=llm) + graph = gen.build_canonical_graph() + + assert any(edge.rel.startswith("llm_rel_") for edge in graph.edges) + graph_prompts = [prompt for prompt in llm.prompts if "SEED_GRAPH_EXPANSION_AGENT" in prompt] + assert len(graph_prompts) >= 2 + assert all("SHARED_CONTEXT" in prompt for prompt in graph_prompts) + + +def test_task_generation_uses_parallel_shared_context_workers(): + cfg = EnvironmentConfig(n_users=12, seed=13) + cfg.seeding.llm_generate_remaining_graph = False + cfg.seeding.llm_generate_remaining_tasks = True + cfg.seeding.llm_generated_task_budget = 4 + cfg.seeding.llm_generation_parallel = True + cfg.seeding.llm_generation_workers = 3 + cfg.seeding.llm_generation_retries = 1 + cfg.seeding.allow_template_fallback_on_llm_failure = False + + llm = SharedContextLLM() + gen = DatasetGenerator(cfg, llm=llm) + graph = gen.build_canonical_graph() + views = gen.build_platform_views(graph) + tasks = gen.generate_tasks(graph, views, count=4) + + assert len(tasks) == 4 + assert any(task.metadata.get("shared_context") for task in tasks) + task_prompts = [prompt for prompt in llm.prompts if "SEED_TASK_EXPANSION_AGENT" in prompt] + assert len(task_prompts) >= 2 + assert all("SHARED_CONTEXT" in prompt for prompt in task_prompts) + + +def test_swarm_v2_path_tools_replay_a_valid_multi_hop_trace(): + gen = DatasetGenerator(EnvironmentConfig(n_users=20, seed=17)) + graph = gen.build_canonical_graph() + candidates = build_swarm_v2_path_candidates(graph, gen.rng, count=4, min_hops=2, max_hops=3) + + assert candidates + traced = trace_swarm_v2_path(graph, candidates[0]) + assert traced + assert len(traced) >= 2 + + question = emit_swarm_v2_question(traced) + answer = select_swarm_v2_answer(traced) + tool_trace = build_swarm_v2_tool_trace(graph, traced) + canonical = build_swarm_v2_canonical_subgraph(graph, traced, max_extra_edges=2) + + assert question.startswith("If you start at") + assert answer == traced[-1].dst + assert any(call["tool_name"] == "trace_path" for call in tool_trace) + assert canonical["path"] + assert canonical["answer"] == answer diff --git a/tests/test_inference.py b/tests/test_inference.py new file mode 100644 index 0000000000000000000000000000000000000000..7059162c76bfda1331382d1723089c0318449e74 --- /dev/null +++ b/tests/test_inference.py @@ -0,0 +1,74 @@ +import inference +from inference import _format_action, _looks_like_placeholder_api_key, _tool_result_message +from osint_env.domain.models import EnvironmentConfig + + +def test_placeholder_api_key_detection(): + assert _looks_like_placeholder_api_key("your_openai_api_key") is True + assert _looks_like_placeholder_api_key("sk-your-real-openai-key") is True + assert _looks_like_placeholder_api_key("replace-me") is True + assert _looks_like_placeholder_api_key("sk-proj-realistic-looking-token") is False + + +def test_tool_result_message_reuses_assistant_tool_call_id(): + assistant_message = { + "role": "assistant", + "content": "", + "tool_calls": [ + { + "id": "call_123", + "type": "function", + "function": {"name": "get_post", "arguments": "{\"post_id\":\"post_midnight_manifest\"}"}, + } + ], + } + result = {"reward": 0.1, "done": False} + tool_message = _tool_result_message(assistant_message, result) + assert tool_message is not None + assert tool_message["tool_call_id"] == "call_123" + assert tool_message["role"] == "tool" + + +def test_action_formatter_matches_single_line_style(): + assert _format_action({"action_type": "ANSWER", "payload": {"answer": "user_bharat"}}) == "answer(user_bharat)" + assert _format_action( + { + "action_type": "CALL_TOOL", + "payload": {"tool_name": "get_post", "args": {"post_id": "post_midnight_manifest"}}, + } + ) == "get_post(post_id=post_midnight_manifest)" + + +def test_resolve_environment_config_applies_metaqa_overrides(monkeypatch): + base_cfg = EnvironmentConfig() + base_cfg.dataset_mode = "canonical" + + monkeypatch.setattr(inference, "load_shared_config", lambda _path: type("S", (), {"environment": base_cfg})()) + monkeypatch.setattr(inference, "clone_environment_config", lambda cfg: cfg) + monkeypatch.setattr(inference, "SEED_FILE", "") + + monkeypatch.setattr(inference, "DATASET_MODE", "metaqa") + monkeypatch.setattr(inference, "METAQA_ROOT", "metaQA") + monkeypatch.setattr(inference, "METAQA_KB_PATH", "metaQA/kb.txt") + monkeypatch.setattr(inference, "METAQA_VARIANT", "vanilla") + monkeypatch.setattr(inference, "METAQA_HOPS_RAW", "1-hop,2-hop,3-hop") + monkeypatch.setattr(inference, "METAQA_SPLITS_RAW", "train") + + monkeypatch.setattr(inference, "HF_TOKEN", "token") + monkeypatch.setattr(inference, "API_KEY", "") + monkeypatch.setattr(inference, "OPENAI_API_KEY", "") + monkeypatch.setattr(inference, "OPENAI_API_KEY_ENV", "OPENAI_API_KEY") + monkeypatch.setattr(inference, "API_BASE_URL", "https://api.openai.com/v1") + monkeypatch.setattr(inference, "OPENAI_BASE_URL", "") + monkeypatch.setattr(inference, "HF_SPACE_URL", "") + monkeypatch.setattr(inference, "MODEL_NAME", "gpt-5.4") + monkeypatch.setattr(inference, "LLM_TIMEOUT_SECONDS", 0) + + cfg = inference._resolve_environment_config() + + assert cfg.dataset_mode == "metaqa" + assert cfg.metaqa_root == "metaQA" + assert cfg.metaqa_kb_path == "metaQA/kb.txt" + assert cfg.metaqa_variant == "vanilla" + assert cfg.metaqa_hops == ["1-hop", "2-hop", "3-hop"] + assert cfg.metaqa_splits == ["train"] diff --git a/tests/test_leaderboard.py b/tests/test_leaderboard.py new file mode 100644 index 0000000000000000000000000000000000000000..7752b8646cc1785e505fd6422373a7e30fb26449 --- /dev/null +++ b/tests/test_leaderboard.py @@ -0,0 +1,47 @@ +from pathlib import Path + +from osint_env.eval.leaderboard import append_leaderboard_record, load_leaderboard, render_leaderboard_table, sorted_leaderboard + + +def test_leaderboard_roundtrip(tmp_path: Path): + board = tmp_path / "leaderboard.json" + append_leaderboard_record( + path=board, + summary={ + "leaderboard_score": 0.42, + "task_success_rate": 0.5, + "avg_graph_f1": 0.4, + "avg_reward": 0.1, + "tool_efficiency": 0.9, + "retrieval_signal": 0.3, + "structural_signal": 0.4, + }, + episodes=5, + run_name="baseline", + ) + append_leaderboard_record( + path=board, + summary={ + "leaderboard_score": 0.75, + "task_success_rate": 0.7, + "avg_graph_f1": 0.6, + "avg_reward": 0.5, + "tool_efficiency": 0.8, + "retrieval_signal": 0.6, + "structural_signal": 0.7, + }, + episodes=5, + run_name="improved", + ) + + records = load_leaderboard(board) + ranked = sorted_leaderboard(records) + assert len(records) == 2 + assert ranked[0]["run_name"] == "improved" + + ranked_by_success = sorted_leaderboard(records, sort_by="task_success_rate") + assert ranked_by_success[0]["run_name"] == "improved" + + table = render_leaderboard_table(records, top_k=5) + assert "| rank | run |" in table + assert "retrieval" in table diff --git a/tests/test_llm_interface.py b/tests/test_llm_interface.py new file mode 100644 index 0000000000000000000000000000000000000000..c615c5bddac5e7217996bb6632e0a88baa018f57 --- /dev/null +++ b/tests/test_llm_interface.py @@ -0,0 +1,44 @@ +import os + +import pytest +import requests + +from osint_env.domain.models import LLMConfig +from osint_env.llm.interface import OllamaLLMClient, RuleBasedMockLLM, build_llm_client + + +def test_build_llm_client_mock_default(): + client = build_llm_client(LLMConfig(provider="mock")) + assert isinstance(client, RuleBasedMockLLM) + + +def test_build_llm_client_openai_requires_key(monkeypatch: pytest.MonkeyPatch): + monkeypatch.delenv("OPENAI_API_KEY", raising=False) + with pytest.raises(ValueError): + build_llm_client(LLMConfig(provider="openai", openai_api_key="", openai_api_key_env="OPENAI_API_KEY")) + + +def test_build_llm_client_openai_with_key(monkeypatch: pytest.MonkeyPatch): + monkeypatch.setenv("OPENAI_API_KEY", "test-key") + cfg = LLMConfig(provider="openai", model="gpt-4o-mini", openai_api_key_env="OPENAI_API_KEY") + # Constructing should not fail when a key is present; actual API call is not made in this test. + client = build_llm_client(cfg) + assert client is not None + + +def test_openai_key_can_come_from_config_value(monkeypatch: pytest.MonkeyPatch): + monkeypatch.delenv("OPENAI_API_KEY", raising=False) + cfg = LLMConfig(provider="openai", model="gpt-4o-mini", openai_api_key="cfg-key") + client = build_llm_client(cfg) + assert client is not None + + +def test_ollama_client_gracefully_handles_request_failure(monkeypatch: pytest.MonkeyPatch): + def _raise(*args, **kwargs): + raise requests.exceptions.ReadTimeout("timed out") + + monkeypatch.setattr("osint_env.llm.interface.requests.post", _raise) + client = OllamaLLMClient(model="qwen3:2b", timeout_seconds=1) + response = client.generate([{"role": "system", "content": "ping"}], tools=[]) + assert response.content == "" + assert response.tool_calls == [] diff --git a/tests/test_metaqa_dataset.py b/tests/test_metaqa_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..e73a58b991136b93536a19e7d11447d6cbc3ac2a --- /dev/null +++ b/tests/test_metaqa_dataset.py @@ -0,0 +1,66 @@ +from pathlib import Path + +from osint_env.data.generator import DatasetGenerator +from osint_env.domain.models import EnvironmentConfig + + +def _write_metaqa_fixture(root: Path) -> None: + root.mkdir(parents=True, exist_ok=True) + (root / "kb.txt").write_text( + "\n".join( + [ + "Movie A|starred_actors|Actor X", + "Movie B|starred_actors|Actor X", + "Movie A|directed_by|Director D", + "Movie C|directed_by|Director D", + "Movie C|release_year|2002", + ] + ), + encoding="utf-8", + ) + + rows = { + "1-hop": ("what movies did [Actor X] act in\tMovie A|Movie B\n", "actor_to_movie\n"), + "2-hop": ("which films share the director of [Movie A]\tMovie C\n", "movie_to_director_to_movie\n"), + "3-hop": ( + "which release year corresponds to films with same director as [Movie A]\t2002\n", + "movie_to_director_to_movie_to_year\n", + ), + } + + for hop, (qa_line, qtype_line) in rows.items(): + qa_dir = root / hop / "vanilla" + qa_dir.mkdir(parents=True, exist_ok=True) + (qa_dir / "qa_train.txt").write_text(qa_line, encoding="utf-8") + (root / hop / "qa_train_qtype.txt").write_text(qtype_line, encoding="utf-8") + + +def test_metaqa_mode_builds_graph_and_hop_tasks(tmp_path: Path): + metaqa_root = tmp_path / "metaQA" + _write_metaqa_fixture(metaqa_root) + + cfg = EnvironmentConfig( + seed=5, + dataset_mode="metaqa", + metaqa_root=str(metaqa_root), + metaqa_variant="vanilla", + metaqa_hops=["1-hop", "2-hop", "3-hop"], + metaqa_splits=["train"], + ) + + gen = DatasetGenerator(cfg) + graph = gen.build_canonical_graph() + views = gen.build_platform_views(graph) + tasks = gen.generate_tasks(graph, views, count=24) + + assert len(graph.nodes) >= 5 + assert any(edge.rel == "directed_by" for edge in graph.edges) + assert any(post["post_id"].startswith("post_metaqa_") for post in views.microblog_posts) + assert any(profile["user_id"] == "Actor X" for profile in views.profiles) + + hop_labels = {str(task.metadata.get("hop", "")) for task in tasks} + difficulties = {str(task.metadata.get("difficulty", "")) for task in tasks} + + assert hop_labels == {"1-hop", "2-hop", "3-hop"} + assert difficulties == {"easy", "medium", "hard"} + assert all(task.supporting_edges for task in tasks) diff --git a/tests/test_openai_baseline.py b/tests/test_openai_baseline.py new file mode 100644 index 0000000000000000000000000000000000000000..d11342d26c88ea41b0119a261c0f8b5d4a946cde --- /dev/null +++ b/tests/test_openai_baseline.py @@ -0,0 +1,30 @@ +from osint_env.baselines.openai_runner import OpenAIBaselineConfig, OpenAIBaselineRunner, build_action_tools + + +def test_openai_baseline_toolset_contains_answer_and_graph_actions(): + tools = build_action_tools() + names = {tool["function"]["name"] for tool in tools} + assert "submit_answer" in names + assert "add_edge" in names + assert "search_memory" in names + assert "get_post" in names + + +def test_gpt5_request_kwargs_avoid_temperature_and_use_max_completion_tokens(): + runner = OpenAIBaselineRunner.__new__(OpenAIBaselineRunner) + runner.config = OpenAIBaselineConfig(model="gpt-5-nano", max_tokens=321, temperature=0.0, seed=7) + runner.tools = build_action_tools() + kwargs = runner._request_kwargs(messages=[{"role": "user", "content": "hi"}], episode_index=0) + assert kwargs["max_completion_tokens"] == 321 + assert kwargs["reasoning_effort"] == "none" + assert "temperature" not in kwargs + + +def test_gpt54_mini_request_kwargs_skip_reasoning_effort_for_chat_completions(): + runner = OpenAIBaselineRunner.__new__(OpenAIBaselineRunner) + runner.config = OpenAIBaselineConfig(model="gpt-5.4-mini", max_tokens=321, temperature=0.0, seed=7) + runner.tools = build_action_tools() + kwargs = runner._request_kwargs(messages=[{"role": "user", "content": "hi"}], episode_index=0) + assert kwargs["max_completion_tokens"] == 321 + assert "reasoning_effort" not in kwargs + assert "temperature" not in kwargs diff --git a/tests/test_reward.py b/tests/test_reward.py new file mode 100644 index 0000000000000000000000000000000000000000..03bb251136dd7eb850a07d97fbc96fb82b11f981 --- /dev/null +++ b/tests/test_reward.py @@ -0,0 +1,53 @@ +from osint_env.domain.models import Edge, EnvironmentConfig +from osint_env.env.environment import OSINTEnvironment +from osint_env.env.reward import build_reward_model, compute_answer_reward, compute_edge_reward + + +def test_composite_edge_reward_returns_breakdown(): + env = OSINTEnvironment(EnvironmentConfig(seed=13, n_users=16, max_steps=6)) + obs = env.reset() + task = env.state.task + + model = build_reward_model(env.graph) + edge = task.supporting_edges[0] + breakdown = compute_edge_reward( + edge=edge, + task=task, + existing_edges=[], + step_count=1, + model=model, + graph=env.graph, + ) + assert isinstance(breakdown.total, float) + assert breakdown.global_accuracy > 0 + assert isinstance(breakdown.connectivity_gain, float) + + +def test_answer_reward_uses_graph_and_tool_context(): + env = OSINTEnvironment(EnvironmentConfig(seed=21, n_users=18, max_steps=6)) + env.reset() + task = env.state.task + + pred_edges = [Edge(task.supporting_edges[0].src, task.supporting_edges[0].rel, task.supporting_edges[0].dst)] + tool_outputs = [{"tool": "get_profile", "output": {"result": {"user_id": task.answer}}}] + + good = compute_answer_reward( + proposed_answer=task.answer, + task=task, + pred_edges=pred_edges, + tool_outputs=tool_outputs, + step_count=2, + ) + bad = compute_answer_reward( + proposed_answer="wrong", + task=task, + pred_edges=[], + tool_outputs=[], + step_count=2, + ) + + assert good.total > bad.total + assert good.graph_f1 >= 0 + assert isinstance(good.relation_informativeness, float) + assert isinstance(good.entity_informativeness, float) + assert isinstance(good.repetition_penalty, float) diff --git a/tests/test_seeding.py b/tests/test_seeding.py new file mode 100644 index 0000000000000000000000000000000000000000..7381a3e2ad12bf469693ae39ef75558e8a6ec08c --- /dev/null +++ b/tests/test_seeding.py @@ -0,0 +1,40 @@ +from osint_env.domain.models import ( + EnvironmentConfig, + NodeType, + SeedEdgeSpec, + SeedNodeSpec, + SeedQuestionSpec, + SeedingConfig, +) +from osint_env.env.environment import OSINTEnvironment + + +def test_environment_includes_seeded_graph_and_questions(): + seeding = SeedingConfig( + seeded_nodes=[ + SeedNodeSpec(node_id="alias_seed_001", node_type=NodeType.ALIAS, attrs={"handle": "@seed001"}), + SeedNodeSpec( + node_id="user_seed_001", + node_type=NodeType.USER, + attrs={"name": "Seed User", "org": "Helios Labs", "location": "Pune"}, + ), + ], + seeded_edges=[SeedEdgeSpec(src="alias_seed_001", rel="alias_of", dst="user_seed_001")], + seeded_questions=[ + SeedQuestionSpec( + question="Which canonical user owns alias alias_seed_001?", + answer="user_seed_001", + task_type="identity_resolution", + supporting_edges=[SeedEdgeSpec(src="alias_seed_001", rel="alias_of", dst="user_seed_001")], + ) + ], + llm_generate_remaining_graph=False, + llm_generate_remaining_tasks=False, + llm_generated_edge_budget=0, + llm_generated_task_budget=0, + ) + env = OSINTEnvironment(EnvironmentConfig(seed=33, n_users=12, seeding=seeding)) + + assert "alias_seed_001" in env.graph.nodes + assert any(edge.src == "alias_seed_001" and edge.rel == "alias_of" and edge.dst == "user_seed_001" for edge in env.graph.edges) + assert any("alias_seed_001" in task.question for task in env.tasks) diff --git a/tests/test_self_play_swarm_v2.py b/tests/test_self_play_swarm_v2.py new file mode 100644 index 0000000000000000000000000000000000000000..0d60670aee05792fc5bc2a8825e907bf034dad68 --- /dev/null +++ b/tests/test_self_play_swarm_v2.py @@ -0,0 +1,305 @@ +import json +import random +from copy import deepcopy +from pathlib import Path + +from osint_env.data.generator import ( + build_swarm_v2_canonical_subgraph, + build_swarm_v2_path_candidates, + build_swarm_v2_tool_trace, + emit_swarm_v2_question, + select_swarm_v2_answer, +) +from osint_env.domain.models import CanonicalGraph, Edge, EnvironmentConfig, Node, NodeType +from osint_env.env.environment import OSINTEnvironment +from osint_env.training import SelfPlayTrainingConfig, run_adversarial_self_play +from osint_env.training.config import GeneratorRewardWeights +from osint_env.training.rewards import ( + GeneratorRewardFunction, + SwarmV2ReplayValidator, + parse_generated_task_completion, +) + + +class DummyJudge: + def __init__(self, answer: str): + self._answer = answer + + def answer(self, question: str) -> str: + del question + return self._answer + + +def _edge_payload(edge: Edge) -> dict[str, object]: + return { + "src": edge.src, + "rel": edge.rel, + "dst": edge.dst, + "confidence": float(edge.confidence), + } + + +def _build_valid_candidate_payload(env: OSINTEnvironment, cfg: SelfPlayTrainingConfig) -> dict[str, object]: + path_candidates = build_swarm_v2_path_candidates( + env.graph, + rng=random.Random(17), + count=1, + min_hops=2, + max_hops=cfg.swarm_v2.validation.max_path_hops, + ) + assert path_candidates + path_edges = path_candidates[0] + question = emit_swarm_v2_question(path_edges) + answer = select_swarm_v2_answer(path_edges) + return { + "canonical_graph": build_swarm_v2_canonical_subgraph( + env.graph, + path_edges, + max_extra_edges=max(0, cfg.swarm_v2.shared_context.max_edges - len(path_edges)), + ), + "question": question, + "answer": answer, + "task_type": "swarm_v2_trace", + "supporting_edges": [_edge_payload(edge) for edge in path_edges], + "tool_trace": build_swarm_v2_tool_trace(env.graph, path_edges), + "subagent_outputs": [ + f"path_agent_{idx}: {edge.src} --{edge.rel}--> {edge.dst}" + for idx, edge in enumerate(path_edges) + ] + + ["question_agent: deterministic relation-path question"], + "orchestrator": { + "spawn_count": 3, + "finished_subtasks": 3, + "critical_steps": 2, + "breadth": 3, + "depth": 1, + }, + } + + +def test_swarm_v2_replay_validator_accepts_valid_candidate_and_rejects_invalid_cases(): + cfg = SelfPlayTrainingConfig(pipeline_mode="swarm_v2") + env = OSINTEnvironment(EnvironmentConfig(seed=23, n_users=18, max_steps=6)) + payload = _build_valid_candidate_payload(env, cfg) + + validator = SwarmV2ReplayValidator( + graph=env.graph, + validation=cfg.swarm_v2.validation, + shared_context=cfg.swarm_v2.shared_context, + seen_questions=[], + ) + valid = validator.validate(parse_generated_task_completion(json.dumps(payload))) + assert valid.is_valid is True + + leaked_payload = deepcopy(payload) + leaked_payload["question"] = f"{payload['question']} {payload['answer']}" + leaked = validator.validate(parse_generated_task_completion(json.dumps(leaked_payload))) + assert leaked.is_valid is False + assert "answer_leakage" in leaked.reasons + + no_trace_payload = deepcopy(payload) + no_trace_payload["tool_trace"] = [] + no_trace = validator.validate(parse_generated_task_completion(json.dumps(no_trace_payload))) + assert no_trace.is_valid is False + assert "non_replayable_tool_calls" in no_trace.reasons + + unseen_payload = deepcopy(payload) + unseen_payload["supporting_edges"][0]["dst"] = "user_missing" + unseen = validator.validate(parse_generated_task_completion(json.dumps(unseen_payload))) + assert unseen.is_valid is False + assert "unseen_nodes_or_edges" in unseen.reasons + + +def test_swarm_v2_replay_validator_rejects_non_unique_paths(): + graph = CanonicalGraph( + nodes={ + "user_root": Node("user_root", NodeType.USER, {}), + "user_mid1": Node("user_mid1", NodeType.USER, {}), + "user_mid2": Node("user_mid2", NodeType.USER, {}), + "user_target": Node("user_target", NodeType.USER, {}), + }, + edges=[ + Edge("user_root", "linked_to", "user_mid1"), + Edge("user_root", "linked_to", "user_mid2"), + Edge("user_mid1", "knows", "user_target"), + Edge("user_mid2", "knows", "user_target"), + ], + ) + cfg = SelfPlayTrainingConfig(pipeline_mode="swarm_v2") + ambiguous_path = [ + Edge("user_root", "linked_to", "user_mid1"), + Edge("user_mid1", "knows", "user_target"), + ] + payload = { + "canonical_graph": build_swarm_v2_canonical_subgraph(graph, ambiguous_path, max_extra_edges=1), + "question": emit_swarm_v2_question(ambiguous_path), + "answer": select_swarm_v2_answer(ambiguous_path), + "task_type": "swarm_v2_trace", + "supporting_edges": [_edge_payload(edge) for edge in ambiguous_path], + "tool_trace": build_swarm_v2_tool_trace(graph, ambiguous_path), + "subagent_outputs": ["path_agent: ambiguous linked_to -> knows trace"], + "orchestrator": {"spawn_count": 2, "finished_subtasks": 2, "critical_steps": 2, "breadth": 2, "depth": 1}, + } + validator = SwarmV2ReplayValidator( + graph=graph, + validation=cfg.swarm_v2.validation, + shared_context=cfg.swarm_v2.shared_context, + seen_questions=[], + ) + result = validator.validate(parse_generated_task_completion(json.dumps(payload))) + assert result.is_valid is False + assert "non_unique_derivation_path" in result.reasons + + +def test_swarm_v2_generator_reward_prefers_valid_parallel_diverse_tasks(): + cfg = SelfPlayTrainingConfig(pipeline_mode="swarm_v2") + env = OSINTEnvironment(EnvironmentConfig(seed=29, n_users=18, max_steps=6)) + payload = _build_valid_candidate_payload(env, cfg) + + reward_fn = GeneratorRewardFunction( + graph=env.graph, + answerer_judge=DummyJudge(answer="wrong_answer"), + weights=GeneratorRewardWeights(), + max_support_edges=cfg.swarm_v2.validation.max_support_edges, + pipeline_mode="swarm_v2", + swarm_v2_validation=cfg.swarm_v2.validation, + swarm_v2_shared_context=cfg.swarm_v2.shared_context, + parl_max_parallel_hint=cfg.swarm_v2.generator_swarm.max_agents, + ) + + spawn_only = deepcopy(payload) + spawn_only["orchestrator"]["spawn_count"] = 6 + spawn_only["orchestrator"]["finished_subtasks"] = 0 + spawn_only["orchestrator"]["critical_steps"] = 6 + + duplicate_workers = deepcopy(payload) + duplicate_workers["subagent_outputs"] = ["same worker trace"] * 4 + + answer_leak = deepcopy(payload) + answer_leak["question"] = f"{payload['question']} {payload['answer']}" + + overflow = deepcopy(payload) + overflow["supporting_edges"] = payload["supporting_edges"] + payload["supporting_edges"] + + unsupported_answer = deepcopy(payload) + unsupported_answer["answer"] = "user_not_in_graph" + + serial_collapse = deepcopy(payload) + serial_collapse["orchestrator"] = { + "spawn_count": 1, + "finished_subtasks": 1, + "critical_steps": 7, + "breadth": 1, + "depth": 1, + } + + scores = reward_fn( + completions=[ + json.dumps(payload), + json.dumps(spawn_only), + json.dumps(duplicate_workers), + json.dumps(answer_leak), + json.dumps(overflow), + json.dumps(unsupported_answer), + json.dumps(serial_collapse), + ] + ) + + assert scores[0] > scores[1] + assert scores[0] > scores[2] + assert scores[0] > scores[6] + assert scores[3] < 0 + assert scores[4] < 0 + assert scores[5] < 0 + + +def test_swarm_v2_generator_reward_grades_invalid_outputs_instead_of_constant_penalty(): + cfg = SelfPlayTrainingConfig(pipeline_mode="swarm_v2") + env = OSINTEnvironment(EnvironmentConfig(seed=31, n_users=18, max_steps=6)) + valid_payload = _build_valid_candidate_payload(env, cfg) + + reward_fn = GeneratorRewardFunction( + graph=env.graph, + answerer_judge=DummyJudge(answer="wrong_answer"), + weights=GeneratorRewardWeights(), + max_support_edges=cfg.swarm_v2.validation.max_support_edges, + pipeline_mode="swarm_v2", + swarm_v2_validation=cfg.swarm_v2.validation, + swarm_v2_shared_context=cfg.swarm_v2.shared_context, + parl_max_parallel_hint=cfg.swarm_v2.generator_swarm.max_agents, + ) + + missing_everything = "not json" + partial_json = json.dumps({"question": "Who is linked by this path?", "answer": valid_payload["answer"]}) + partial_edges = json.dumps( + { + "question": valid_payload["question"], + "answer": valid_payload["answer"], + "supporting_edges": valid_payload["supporting_edges"], + } + ) + + scores = reward_fn(completions=[missing_everything, partial_json, partial_edges, json.dumps(valid_payload)]) + + assert len(set(scores)) > 2 + assert scores[0] < scores[1] < scores[2] < scores[3] + assert reward_fn._debug_last_batch["batch_reward_std"] > 0.0 + assert reward_fn._debug_last_batch["valid_output_ratio"] == 0.25 + + +def test_swarm_v2_dry_run_writes_new_artifacts_and_preserves_legacy_contract(tmp_path: Path): + env_cfg = EnvironmentConfig(seed=11, n_users=14, max_steps=6) + train_cfg = SelfPlayTrainingConfig( + rounds=1, + output_dir=str(tmp_path / "self_play"), + dry_run=True, + pipeline_mode="swarm_v2", + generated_tasks_per_round=3, + generator_prompts_per_round=3, + ) + + payload = run_adversarial_self_play(env_config=env_cfg, training_config=train_cfg, dry_run=True) + assert payload["pipeline_mode"] == "swarm_v2" + assert len(payload["rounds"]) == 1 + + artifacts = payload["rounds"][0]["artifacts"] + for key in [ + "generator_dataset", + "answerer_dataset", + "generated_tasks", + "canonical_graph_candidates", + "replay_traces", + "validation_reports", + ]: + assert Path(artifacts[key]).exists() + loaded = json.loads(Path(artifacts[key]).read_text(encoding="utf-8")) + assert loaded is not None + + +def test_swarm_v2_fixed_canonical_mode_reuses_prompt_candidates(tmp_path: Path): + env_cfg = EnvironmentConfig(seed=19, n_users=14, max_steps=6) + train_cfg = SelfPlayTrainingConfig( + rounds=1, + output_dir=str(tmp_path / "self_play_fixed_canonical"), + dry_run=True, + pipeline_mode="swarm_v2", + canonical_graph_mode="fixed", + generated_tasks_per_round=3, + generator_prompts_per_round=3, + ) + + payload = run_adversarial_self_play(env_config=env_cfg, training_config=train_cfg, dry_run=True) + artifacts = payload["rounds"][0]["artifacts"] + candidates_payload = json.loads(Path(artifacts["canonical_graph_candidates"]).read_text(encoding="utf-8")) + generated_payload = json.loads(Path(artifacts["generated_tasks"]).read_text(encoding="utf-8")) + + expected_graphs = { + json.dumps((item.get("canonical_graph") if isinstance(item.get("canonical_graph"), dict) else item), sort_keys=True) + for item in candidates_payload + if isinstance(item, dict) + } + assert expected_graphs + + for task in generated_payload: + canonical_graph = ((task.get("metadata") or {}).get("canonical_graph")) or {} + assert json.dumps(canonical_graph, sort_keys=True) in expected_graphs diff --git a/tests/test_server.py b/tests/test_server.py new file mode 100644 index 0000000000000000000000000000000000000000..6e3db4bda127b897b20c85ea9f69217b0ab15654 --- /dev/null +++ b/tests/test_server.py @@ -0,0 +1,235 @@ +import json +import os + +from fastapi.testclient import TestClient + +import server +from server import app + + +client = TestClient(app) + + +def test_server_health(): + response = client.get("/healthz") + assert response.status_code == 200 + assert response.json()["status"] == "ok" + + +def test_server_health_alias(): + response = client.get("/health") + assert response.status_code == 200 + assert response.json()["status"] == "ok" + + +def test_server_environment_metadata(): + response = client.get("/api/environment") + assert response.status_code == 200 + body = response.json() + assert "action_space" in body + assert "observation_space" in body + assert "summary" in body + + +def test_openenv_spec_and_tasks_endpoints(): + spec = client.get("/openenv.yaml") + assert spec.status_code == 200 + assert "reset" in spec.text + + tasks = client.get("/openenv/tasks") + assert tasks.status_code == 200 + body = tasks.json() + assert len(body) >= 3 + assert {"task_id", "task_type", "question", "difficulty"} <= set(body[0].keys()) + + +def test_openenv_reset_step_and_state_cycle(): + reset = client.post("/openenv/reset", json={"task_index": 0}) + assert reset.status_code == 200 + body = reset.json() + session_id = body["session_id"] + assert body["done"] is False + assert "question" in body["observation"]["task"] + + state = client.get(f"/openenv/state/{session_id}") + assert state.status_code == 200 + assert state.json()["session_id"] == session_id + + step = client.post( + "/openenv/step", + json={ + "session_id": session_id, + "action_type": "ANSWER", + "payload": {"answer": "unknown"}, + }, + ) + assert step.status_code == 200 + step_body = step.json() + assert step_body["session_id"] == session_id + assert step_body["done"] is True + assert "task_answer" in step_body["info"] + + +def test_openenv_reset_accepts_empty_body(): + reset = client.post("/openenv/reset") + assert reset.status_code == 200 + body = reset.json() + assert body["done"] is False + assert "session_id" in body + + +def test_openenv_reset_accepts_empty_json_body(): + reset = client.post( + "/openenv/reset", + data="", + headers={"Content-Type": "application/json"}, + ) + assert reset.status_code == 200 + body = reset.json() + assert body["done"] is False + assert "session_id" in body + + +def test_openenv_reset_trailing_slash_post_returns_json(): + reset = client.post( + "/openenv/reset/", + data="", + headers={"Content-Type": "application/json"}, + ) + assert reset.status_code == 200 + body = reset.json() + assert body["done"] is False + assert "session_id" in body + + +def test_openenv_step_accepts_nested_action_payload(): + reset = client.post("/openenv/reset", json={"task_index": 0}) + assert reset.status_code == 200 + session_id = reset.json()["session_id"] + + step = client.post( + "/openenv/step", + json={ + "session_id": session_id, + "action": { + "action_type": "ANSWER", + "payload": {"answer": "unknown"}, + }, + }, + ) + assert step.status_code == 200 + assert step.json()["done"] is True + + +def test_step_alias_uses_latest_session_when_session_id_missing(): + reset = client.post("/reset", json={"task_index": 0}) + assert reset.status_code == 200 + session_id = reset.json()["session_id"] + + step = client.post( + "/step", + json={ + "action_type": "ANSWER", + "payload": {"answer": "unknown"}, + }, + ) + assert step.status_code == 200 + body = step.json() + assert body["session_id"] == session_id + assert body["done"] is True + + +def test_state_alias_returns_latest_session(): + reset = client.post("/reset", json={"task_index": 0}) + assert reset.status_code == 200 + session_id = reset.json()["session_id"] + + state = client.get("/state") + assert state.status_code == 200 + body = state.json() + assert body["session_id"] == session_id + assert "task" in body["observation"] + + +def test_report_inference_updates_latest_evaluation_and_dashboard(tmp_path, monkeypatch): + latest_evaluation = tmp_path / "latest_evaluation.json" + space_dashboard = tmp_path / "space_dashboard.html" + + monkeypatch.setattr(server, "LATEST_EVALUATION_OUTPUT", latest_evaluation) + monkeypatch.setattr(server, "SPACE_DASHBOARD", space_dashboard) + monkeypatch.setattr(server, "load_leaderboard", lambda path: []) + monkeypatch.setattr(server, "export_dashboard", lambda env, evaluation, leaderboard_records, output_path: str(space_dashboard)) + + response = client.post( + "/openenv/report_inference", + json={ + "run": {"name": "inference_py_run"}, + "summary": {"leaderboard_score": 0.75, "task_success_rate": 1.0}, + "episodes": [ + { + "task_id": "seed_task_0", + "agent_answer": "user_bharat", + "graph_f1": 0.5, + "reward": 1.2, + "steps": 5, + "tool_calls": 4, + "success": 1, + } + ], + }, + ) + assert response.status_code == 200 + body = response.json() + assert body["status"] == "ok" + assert latest_evaluation.exists() + stored = json.loads(latest_evaluation.read_text(encoding="utf-8")) + assert stored["summary"]["leaderboard_score"] == 0.75 + assert stored["episodes"][0]["task_id"] == "seed_task_0" + assert stored["episodes"][0]["truth_edges"] + + +def test_space_snapshot_prefers_newer_evaluation_payload(tmp_path, monkeypatch): + baseline_path = tmp_path / "baseline.json" + evaluation_path = tmp_path / "evaluation.json" + baseline_dashboard = tmp_path / "baseline_dashboard.html" + space_dashboard = tmp_path / "space_dashboard.html" + + baseline_path.write_text( + json.dumps( + { + "run": {"dashboard_path": str(baseline_dashboard)}, + "summary": {"leaderboard_score": 0.1, "task_success_rate": 0.1}, + } + ), + encoding="utf-8", + ) + baseline_dashboard.write_text("baseline", encoding="utf-8") + evaluation_path.write_text( + json.dumps({"summary": {"leaderboard_score": 0.9, "task_success_rate": 0.9}, "episodes": []}), + encoding="utf-8", + ) + space_dashboard.write_text("space", encoding="utf-8") + os.utime(evaluation_path, (baseline_path.stat().st_atime + 5, baseline_path.stat().st_mtime + 5)) + + monkeypatch.setattr(server, "LATEST_BASELINE_OUTPUT", baseline_path) + monkeypatch.setattr(server, "LATEST_EVALUATION_OUTPUT", evaluation_path) + monkeypatch.setattr(server, "SPACE_DASHBOARD", space_dashboard) + monkeypatch.setattr( + server, + "_base_environment_snapshot", + lambda: { + "task_count": 30, + "difficulty_counts": {}, + "action_space": ["CALL_TOOL", "ADD_EDGE", "ANSWER"], + "observation_space": {}, + "task_types": [], + "config": {}, + }, + ) + monkeypatch.setattr(server, "_build_environment", lambda: object()) + monkeypatch.setattr(server, "export_dashboard", lambda env, evaluation, leaderboard_records, output_path: str(space_dashboard)) + + snapshot = server._space_snapshot() + assert snapshot["source"] == "latest_evaluation" + assert snapshot["summary"]["leaderboard_score"] == 0.9 + assert snapshot["dashboard_path"] == str(space_dashboard) diff --git a/tests/test_spawn_reward_hooks.py b/tests/test_spawn_reward_hooks.py new file mode 100644 index 0000000000000000000000000000000000000000..95c5106774ec738fb5b5d365b3ce82e62e592834 --- /dev/null +++ b/tests/test_spawn_reward_hooks.py @@ -0,0 +1,43 @@ +from osint_env.env.spawn_reward_hooks import critical_steps, parl_style_spawn_reward + + +def test_critical_steps_matches_parallel_path_length(): + total = critical_steps(main_steps=[1, 1, 1], parallel_subagent_steps=[[3, 2], [0], [4, 1, 2]]) + assert total == 1 + 3 + 1 + 0 + 1 + 4 + + +def test_parl_reward_prefers_finished_parallel_work(): + base = parl_style_spawn_reward( + task_outcome_reward=0.2, + spawn_count=4, + finished_subtasks=1, + critical_steps=12, + lambda_parallel=0.2, + lambda_finish=0.25, + anneal=1.0, + breadth=2, + depth=3, + ) + better = parl_style_spawn_reward( + task_outcome_reward=0.2, + spawn_count=4, + finished_subtasks=4, + critical_steps=8, + lambda_parallel=0.2, + lambda_finish=0.25, + anneal=1.0, + breadth=4, + depth=2, + ) + assert better > base + + +def test_parl_auxiliary_can_be_annealed_out(): + frozen = parl_style_spawn_reward( + task_outcome_reward=0.7, + spawn_count=8, + finished_subtasks=8, + critical_steps=5, + anneal=0.0, + ) + assert frozen == 0.7 diff --git a/tests/test_swarm_agent.py b/tests/test_swarm_agent.py new file mode 100644 index 0000000000000000000000000000000000000000..712b2fbd3246ca528338f9bfb7e300b24690dc88 --- /dev/null +++ b/tests/test_swarm_agent.py @@ -0,0 +1,17 @@ +from osint_env.agents.swarm_agent import SwarmAgentRunner +from osint_env.domain.models import EnvironmentConfig, SwarmConfig +from osint_env.env.environment import OSINTEnvironment + + +def test_swarm_runner_emits_spawn_telemetry(): + config = EnvironmentConfig( + seed=14, + max_steps=8, + swarm=SwarmConfig(enabled=True, max_agents=3, max_breadth=2, max_width=2, max_depth=2, planner_rounds=2), + ) + env = OSINTEnvironment(config) + info = SwarmAgentRunner(env).run_episode() + + assert info["spawn_count"] > 0 + assert "spawn_auxiliary" in info["reward_components"] + assert info["spawn_critical_steps"] > 0 diff --git a/tests/test_tools.py b/tests/test_tools.py new file mode 100644 index 0000000000000000000000000000000000000000..dc5a67f5f17b05b44bca34979cf852fb4a13f217 --- /dev/null +++ b/tests/test_tools.py @@ -0,0 +1,39 @@ +from osint_env.config import clone_environment_config, load_seeding_config, load_shared_config +from osint_env.data.generator import DatasetGenerator +from osint_env.domain.models import EnvironmentConfig +from osint_env.env.environment import OSINTEnvironment +from osint_env.platforms.tools import ToolRegistry + + +def test_tools_basics(): + gen = DatasetGenerator(EnvironmentConfig(n_users=12, seed=3)) + g = gen.build_canonical_graph() + views = gen.build_platform_views(g) + tools = ToolRegistry(views) + out = tools.search_posts(query="Update") + assert out["count"] > 0 + profile_any = next(iter([p["user_id"] for p in views.profiles if p["user_id"].startswith("user_")])) + profile = tools.get_profile(profile_any) + assert profile["found"] is True + + +def test_seeded_tools_expose_seed_question_entities(): + shared = load_shared_config("datasets/fixed_levels/shared_config_fixed_levels.json") + env_cfg = clone_environment_config(shared.environment) + env_cfg.seeding = load_seeding_config("datasets/fixed_levels/seed_fixed_levels.json") + env_cfg.llm.provider = "mock" + env = OSINTEnvironment(env_cfg) + tools = env.tools + + post = tools.get_post("post_midnight_manifest") + assert post["found"] is True + assert "loc_dockyard17" in post["result"]["references"] + + people = tools.search_people(org="org_northbridge_logistics") + user_ids = {row["user_id"] for row in people["results"]} + assert "user_bharat" in user_ids + assert "user_hiro" in user_ids + + alias_profile = tools.get_profile("alias_docksparrow") + assert alias_profile["found"] is True + assert alias_profile["result"]["user_id"] == "user_hiro" diff --git a/tests/test_training_config.py b/tests/test_training_config.py new file mode 100644 index 0000000000000000000000000000000000000000..9d55caf3b9b0c670e52051413f7923a5788ed2ba --- /dev/null +++ b/tests/test_training_config.py @@ -0,0 +1,161 @@ +from pathlib import Path +import json + +from osint_env.training.config import load_self_play_config + + +def test_self_play_config_defaults_when_missing(): + cfg = load_self_play_config("/tmp/does_not_exist_for_self_play_config.json") + assert cfg.rounds >= 1 + assert cfg.pipeline_mode in {"legacy", "swarm_v2"} + assert cfg.model_topology in {"dual", "shared"} + assert cfg.phase_schedule in {"generator_answerer", "answerer_generator_answerer"} + assert cfg.tuning_mode in {"full", "lora"} + assert cfg.generator_phase.max_steps >= 1 + assert cfg.answerer_phase.max_steps >= 1 + assert cfg.generator_reward_weights.hardness > 0.0 + assert cfg.swarm_v2.generator_swarm.shared_context is True + assert cfg.swarm_v2.validation.max_support_edges >= 1 + assert cfg.wandb_enabled is False + assert cfg.wandb_project == "osint-self-play" + assert cfg.canonical_graph_mode == "generate" + + +def test_self_play_config_parses_overrides(tmp_path: Path): + cfg_path = tmp_path / "self_play.json" + cfg_path.write_text( + json.dumps( + { + "rounds": 5, + "output_dir": "artifacts/custom_self_play", + "dry_run": False, + "pipeline_mode": "swarm_v2", + "wandb_enabled": True, + "wandb_project": "osint-train-tests", + "wandb_entity": "example-team", + "wandb_run_name_prefix": "ci-self-play", + "canonical_graph_mode": "fixed", + "model_topology": "shared", + "phase_schedule": "answerer_generator_answerer", + "tuning_mode": "lora", + "shared_model_name_or_path": "/models/local-base", + "seed_tasks_per_round": 12, + "generated_tasks_per_round": 18, + "swarm_v2": { + "generator_swarm": { + "shared_context": True, + "max_agents": 5, + "max_breadth": 4, + "max_depth": 3, + "planner_rounds": 3, + "tools_per_agent": 2, + }, + "answerer_swarm": { + "shared_context": True, + "max_agents": 4, + "max_breadth": 3, + "max_depth": 2, + "planner_rounds": 2, + "tools_per_agent": 2, + }, + "validation": { + "max_support_edges": 6, + "max_path_hops": 3, + "max_context_nodes": 10, + "max_context_edges": 6, + "duplicate_similarity_threshold": 0.75, + }, + "shared_context": { + "shared_by_default": True, + "max_nodes": 10, + "max_edges": 6, + "target_pressure": 0.9, + }, + }, + "generator_reward_weights": { + "validity": 0.2, + "hardness": 0.6, + "diversity": 0.1, + "consistency": 0.1, + }, + "lora": { + "r": 32, + "alpha": 64, + "dropout": 0.1, + "target_modules": ["q_proj", "v_proj"], + "bias": "none", + "task_type": "CAUSAL_LM", + }, + "generator_phase": { + "model_name_or_path": "Qwen/Qwen2.5-3B-Instruct", + "max_steps": 77, + "num_generations": 6, + "loss_type": "grpo", + "scale_rewards": "group", + "output_subdir": "gen_phase", + }, + "answerer_phase": { + "model_name_or_path": "Qwen/Qwen2.5-1.5B-Instruct", + "max_steps": 55, + "num_generations": 5, + "output_subdir": "ans_phase", + }, + } + ), + encoding="utf-8", + ) + + cfg = load_self_play_config(cfg_path) + assert cfg.rounds == 5 + assert cfg.output_dir == "artifacts/custom_self_play" + assert cfg.dry_run is False + assert cfg.pipeline_mode == "swarm_v2" + assert cfg.wandb_enabled is True + assert cfg.wandb_project == "osint-train-tests" + assert cfg.wandb_entity == "example-team" + assert cfg.wandb_run_name_prefix == "ci-self-play" + assert cfg.canonical_graph_mode == "fixed" + assert cfg.model_topology == "shared" + assert cfg.phase_schedule == "answerer_generator_answerer" + assert cfg.tuning_mode == "lora" + assert cfg.shared_model_name_or_path == "/models/local-base" + assert cfg.seed_tasks_per_round == 12 + assert cfg.generated_tasks_per_round == 18 + assert cfg.swarm_v2.generator_swarm.max_agents == 5 + assert cfg.swarm_v2.answerer_swarm.max_agents == 4 + assert cfg.swarm_v2.validation.max_support_edges == 6 + assert cfg.swarm_v2.shared_context.target_pressure == 0.9 + assert cfg.generator_reward_weights.hardness == 0.6 + assert cfg.lora.r == 32 + assert cfg.lora.alpha == 64 + assert cfg.lora.target_modules == ["q_proj", "v_proj"] + + assert cfg.generator_phase.model_name_or_path == "Qwen/Qwen2.5-3B-Instruct" + assert cfg.generator_phase.max_steps == 77 + assert cfg.generator_phase.num_generations == 6 + assert cfg.generator_phase.loss_type == "grpo" + assert cfg.generator_phase.scale_rewards == "group" + assert cfg.generator_phase.output_subdir == "gen_phase" + + assert cfg.answerer_phase.model_name_or_path == "Qwen/Qwen2.5-1.5B-Instruct" + assert cfg.answerer_phase.max_steps == 55 + assert cfg.answerer_phase.num_generations == 5 + assert cfg.answerer_phase.output_subdir == "ans_phase" + + +def test_self_play_config_keeps_legacy_mode_when_not_set(tmp_path: Path): + cfg_path = tmp_path / "legacy_self_play.json" + cfg_path.write_text( + json.dumps( + { + "rounds": 2, + "max_support_edges": 11, + } + ), + encoding="utf-8", + ) + + cfg = load_self_play_config(cfg_path) + assert cfg.pipeline_mode == "legacy" + assert cfg.max_support_edges == 11 + assert cfg.swarm_v2.validation.max_support_edges == 11 diff --git a/tests/test_validation.py b/tests/test_validation.py new file mode 100644 index 0000000000000000000000000000000000000000..ce1e398b01983303f221ea96dc9d2a731137265d --- /dev/null +++ b/tests/test_validation.py @@ -0,0 +1,7 @@ +from osint_env.validation import run_validation_suite + + +def test_validation_suite_passes_repo_gate(): + result = run_validation_suite() + assert result["passed"] is True + assert len(result["checks"]) >= 4 diff --git a/uv.lock b/uv.lock new file mode 100644 index 0000000000000000000000000000000000000000..e697d8c795db0704eca2b78ccc5867b673f148c7 --- /dev/null +++ b/uv.lock @@ -0,0 +1,895 @@ +version = 1 +revision = 3 +requires-python = ">=3.10" +resolution-markers = [ + "python_full_version >= '3.11'", + "python_full_version < '3.11'", +] + +[[package]] +name = "annotated-doc" +version = "0.0.4" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/57/ba/046ceea27344560984e26a590f90bc7f4a75b06701f653222458922b558c/annotated_doc-0.0.4.tar.gz", hash = "sha256:fbcda96e87e9c92ad167c2e53839e57503ecfda18804ea28102353485033faa4", size = 7288, upload-time = "2025-11-10T22:07:42.062Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/1e/d3/26bf1008eb3d2daa8ef4cacc7f3bfdc11818d111f7e2d0201bc6e3b49d45/annotated_doc-0.0.4-py3-none-any.whl", hash = "sha256:571ac1dc6991c450b25a9c2d84a3705e2ae7a53467b5d111c24fa8baabbed320", size = 5303, upload-time = "2025-11-10T22:07:40.673Z" }, +] + +[[package]] +name = "annotated-types" +version = "0.7.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ee/67/531ea369ba64dcff5ec9c3402f9f51bf748cec26dde048a2f973a4eea7f5/annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89", size = 16081, upload-time = "2024-05-20T21:33:25.928Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/78/b6/6307fbef88d9b5ee7421e68d78a9f162e0da4900bc5f5793f6d3d0e34fb8/annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53", size = 13643, upload-time = "2024-05-20T21:33:24.1Z" }, +] + +[[package]] +name = "anyio" +version = "4.13.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "exceptiongroup", marker = "python_full_version < '3.11'" }, + { name = "idna" }, + { name = "typing-extensions", marker = "python_full_version < '3.13'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/19/14/2c5dd9f512b66549ae92767a9c7b330ae88e1932ca57876909410251fe13/anyio-4.13.0.tar.gz", hash = "sha256:334b70e641fd2221c1505b3890c69882fe4a2df910cba14d97019b90b24439dc", size = 231622, upload-time = "2026-03-24T12:59:09.671Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/da/42/e921fccf5015463e32a3cf6ee7f980a6ed0f395ceeaa45060b61d86486c2/anyio-4.13.0-py3-none-any.whl", hash = "sha256:08b310f9e24a9594186fd75b4f73f4a4152069e3853f1ed8bfbf58369f4ad708", size = 114353, upload-time = "2026-03-24T12:59:08.246Z" }, +] + +[[package]] +name = "certifi" +version = "2026.2.25" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/af/2d/7bf41579a8986e348fa033a31cdd0e4121114f6bce2457e8876010b092dd/certifi-2026.2.25.tar.gz", hash = "sha256:e887ab5cee78ea814d3472169153c2d12cd43b14bd03329a39a9c6e2e80bfba7", size = 155029, upload-time = "2026-02-25T02:54:17.342Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/9a/3c/c17fb3ca2d9c3acff52e30b309f538586f9f5b9c9cf454f3845fc9af4881/certifi-2026.2.25-py3-none-any.whl", hash = "sha256:027692e4402ad994f1c42e52a4997a9763c646b73e4096e4d5d6db8af1d6f0fa", size = 153684, upload-time = "2026-02-25T02:54:15.766Z" }, +] + +[[package]] +name = "charset-normalizer" +version = "3.4.7" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/e7/a1/67fe25fac3c7642725500a3f6cfe5821ad557c3abb11c9d20d12c7008d3e/charset_normalizer-3.4.7.tar.gz", hash = "sha256:ae89db9e5f98a11a4bf50407d4363e7b09b31e55bc117b4f7d80aab97ba009e5", size = 144271, upload-time = "2026-04-02T09:28:39.342Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/26/08/0f303cb0b529e456bb116f2d50565a482694fbb94340bf56d44677e7ed03/charset_normalizer-3.4.7-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:cdd68a1fb318e290a2077696b7eb7a21a49163c455979c639bf5a5dcdc46617d", size = 315182, upload-time = "2026-04-02T09:25:40.673Z" }, + { url = "https://files.pythonhosted.org/packages/24/47/b192933e94b546f1b1fe4df9cc1f84fcdbf2359f8d1081d46dd029b50207/charset_normalizer-3.4.7-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e17b8d5d6a8c47c85e68ca8379def1303fd360c3e22093a807cd34a71cd082b8", size = 209329, upload-time = "2026-04-02T09:25:42.354Z" }, + { url = "https://files.pythonhosted.org/packages/c2/b4/01fa81c5ca6141024d89a8fc15968002b71da7f825dd14113207113fabbd/charset_normalizer-3.4.7-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:511ef87c8aec0783e08ac18565a16d435372bc1ac25a91e6ac7f5ef2b0bff790", size = 231230, upload-time = "2026-04-02T09:25:44.281Z" }, + { url = "https://files.pythonhosted.org/packages/20/f7/7b991776844dfa058017e600e6e55ff01984a063290ca5622c0b63162f68/charset_normalizer-3.4.7-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:007d05ec7321d12a40227aae9e2bc6dca73f3cb21058999a1df9e193555a9dcc", size = 225890, upload-time = "2026-04-02T09:25:45.475Z" }, + { url = "https://files.pythonhosted.org/packages/20/e7/bed0024a0f4ab0c8a9c64d4445f39b30c99bd1acd228291959e3de664247/charset_normalizer-3.4.7-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:cf29836da5119f3c8a8a70667b0ef5fdca3bb12f80fd06487cfa575b3909b393", size = 216930, upload-time = "2026-04-02T09:25:46.58Z" }, + { url = "https://files.pythonhosted.org/packages/e2/ab/b18f0ab31cdd7b3ddb8bb76c4a414aeb8160c9810fdf1bc62f269a539d87/charset_normalizer-3.4.7-cp310-cp310-manylinux_2_31_armv7l.whl", hash = "sha256:12d8baf840cc7889b37c7c770f478adea7adce3dcb3944d02ec87508e2dcf153", size = 202109, upload-time = "2026-04-02T09:25:48.031Z" }, + { url = "https://files.pythonhosted.org/packages/82/e5/7e9440768a06dfb3075936490cb82dbf0ee20a133bf0dd8551fa096914ec/charset_normalizer-3.4.7-cp310-cp310-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:d560742f3c0d62afaccf9f41fe485ed69bd7661a241f86a3ef0f0fb8b1a397af", size = 214684, upload-time = "2026-04-02T09:25:49.245Z" }, + { url = "https://files.pythonhosted.org/packages/71/94/8c61d8da9f062fdf457c80acfa25060ec22bf1d34bbeaca4350f13bcfd07/charset_normalizer-3.4.7-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:b14b2d9dac08e28bb8046a1a0434b1750eb221c8f5b87a68f4fa11a6f97b5e34", size = 212785, upload-time = "2026-04-02T09:25:50.671Z" }, + { url = "https://files.pythonhosted.org/packages/66/cd/6e9889c648e72c0ab2e5967528bb83508f354d706637bc7097190c874e13/charset_normalizer-3.4.7-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:bc17a677b21b3502a21f66a8cc64f5bfad4df8a0b8434d661666f8ce90ac3af1", size = 203055, upload-time = "2026-04-02T09:25:51.802Z" }, + { url = "https://files.pythonhosted.org/packages/92/2e/7a951d6a08aefb7eb8e1b54cdfb580b1365afdd9dd484dc4bee9e5d8f258/charset_normalizer-3.4.7-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:750e02e074872a3fad7f233b47734166440af3cdea0add3e95163110816d6752", size = 232502, upload-time = "2026-04-02T09:25:53.388Z" }, + { url = "https://files.pythonhosted.org/packages/58/d5/abcf2d83bf8e0a1286df55cd0dc1d49af0da4282aa77e986df343e7de124/charset_normalizer-3.4.7-cp310-cp310-musllinux_1_2_riscv64.whl", hash = "sha256:4e5163c14bffd570ef2affbfdd77bba66383890797df43dc8b4cc7d6f500bf53", size = 214295, upload-time = "2026-04-02T09:25:54.765Z" }, + { url = "https://files.pythonhosted.org/packages/47/3a/7d4cd7ed54be99973a0dc176032cba5cb1f258082c31fa6df35cff46acfc/charset_normalizer-3.4.7-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:6ed74185b2db44f41ef35fd1617c5888e59792da9bbc9190d6c7300617182616", size = 227145, upload-time = "2026-04-02T09:25:55.904Z" }, + { url = "https://files.pythonhosted.org/packages/1d/98/3a45bf8247889cf28262ebd3d0872edff11565b2a1e3064ccb132db3fbb0/charset_normalizer-3.4.7-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:94e1885b270625a9a828c9793b4d52a64445299baa1fea5a173bf1d3dd9a1a5a", size = 218884, upload-time = "2026-04-02T09:25:57.074Z" }, + { url = "https://files.pythonhosted.org/packages/ad/80/2e8b7f8915ed5c9ef13aa828d82738e33888c485b65ebf744d615040c7ea/charset_normalizer-3.4.7-cp310-cp310-win32.whl", hash = "sha256:6785f414ae0f3c733c437e0f3929197934f526d19dfaa75e18fdb4f94c6fb374", size = 148343, upload-time = "2026-04-02T09:25:58.199Z" }, + { url = "https://files.pythonhosted.org/packages/35/1b/3b8c8c77184af465ee9ad88b5aea46ea6b2e1f7b9dc9502891e37af21e30/charset_normalizer-3.4.7-cp310-cp310-win_amd64.whl", hash = "sha256:6696b7688f54f5af4462118f0bfa7c1621eeb87154f77fa04b9295ce7a8f2943", size = 159174, upload-time = "2026-04-02T09:25:59.322Z" }, + { url = "https://files.pythonhosted.org/packages/be/c1/feb40dca40dbb21e0a908801782d9288c64fc8d8e562c2098e9994c8c21b/charset_normalizer-3.4.7-cp310-cp310-win_arm64.whl", hash = "sha256:66671f93accb62ed07da56613636f3641f1a12c13046ce91ffc923721f23c008", size = 147805, upload-time = "2026-04-02T09:26:00.756Z" }, + { url = "https://files.pythonhosted.org/packages/c2/d7/b5b7020a0565c2e9fa8c09f4b5fa6232feb326b8c20081ccded47ea368fd/charset_normalizer-3.4.7-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:7641bb8895e77f921102f72833904dcd9901df5d6d72a2ab8f31d04b7e51e4e7", size = 309705, upload-time = "2026-04-02T09:26:02.191Z" }, + { url = "https://files.pythonhosted.org/packages/5a/53/58c29116c340e5456724ecd2fff4196d236b98f3da97b404bc5e51ac3493/charset_normalizer-3.4.7-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:202389074300232baeb53ae2569a60901f7efadd4245cf3a3bf0617d60b439d7", size = 206419, upload-time = "2026-04-02T09:26:03.583Z" }, + { url = "https://files.pythonhosted.org/packages/b2/02/e8146dc6591a37a00e5144c63f29fb7c97a734ea8a111190783c0e60ab63/charset_normalizer-3.4.7-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:30b8d1d8c52a48c2c5690e152c169b673487a2a58de1ec7393196753063fcd5e", size = 227901, upload-time = "2026-04-02T09:26:04.738Z" }, + { url = "https://files.pythonhosted.org/packages/fb/73/77486c4cd58f1267bf17db420e930c9afa1b3be3fe8c8b8ebbebc9624359/charset_normalizer-3.4.7-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:532bc9bf33a68613fd7d65e4b1c71a6a38d7d42604ecf239c77392e9b4e8998c", size = 222742, upload-time = "2026-04-02T09:26:06.36Z" }, + { url = "https://files.pythonhosted.org/packages/a1/fa/f74eb381a7d94ded44739e9d94de18dc5edc9c17fb8c11f0a6890696c0a9/charset_normalizer-3.4.7-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2fe249cb4651fd12605b7288b24751d8bfd46d35f12a20b1ba33dea122e690df", size = 214061, upload-time = "2026-04-02T09:26:08.347Z" }, + { url = "https://files.pythonhosted.org/packages/dc/92/42bd3cefcf7687253fb86694b45f37b733c97f59af3724f356fa92b8c344/charset_normalizer-3.4.7-cp311-cp311-manylinux_2_31_armv7l.whl", hash = "sha256:65bcd23054beab4d166035cabbc868a09c1a49d1efe458fe8e4361215df40265", size = 199239, upload-time = "2026-04-02T09:26:09.823Z" }, + { url = "https://files.pythonhosted.org/packages/4c/3d/069e7184e2aa3b3cddc700e3dd267413dc259854adc3380421c805c6a17d/charset_normalizer-3.4.7-cp311-cp311-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:08e721811161356f97b4059a9ba7bafb23ea5ee2255402c42881c214e173c6b4", size = 210173, upload-time = "2026-04-02T09:26:10.953Z" }, + { url = "https://files.pythonhosted.org/packages/62/51/9d56feb5f2e7074c46f93e0ebdbe61f0848ee246e2f0d89f8e20b89ebb8f/charset_normalizer-3.4.7-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:e060d01aec0a910bdccb8be71faf34e7799ce36950f8294c8bf612cba65a2c9e", size = 209841, upload-time = "2026-04-02T09:26:12.142Z" }, + { url = "https://files.pythonhosted.org/packages/d2/59/893d8f99cc4c837dda1fe2f1139079703deb9f321aabcb032355de13b6c7/charset_normalizer-3.4.7-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:38c0109396c4cfc574d502df99742a45c72c08eff0a36158b6f04000043dbf38", size = 200304, upload-time = "2026-04-02T09:26:13.711Z" }, + { url = "https://files.pythonhosted.org/packages/7d/1d/ee6f3be3464247578d1ed5c46de545ccc3d3ff933695395c402c21fa6b77/charset_normalizer-3.4.7-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:1c2a768fdd44ee4a9339a9b0b130049139b8ce3c01d2ce09f67f5a68048d477c", size = 229455, upload-time = "2026-04-02T09:26:14.941Z" }, + { url = "https://files.pythonhosted.org/packages/54/bb/8fb0a946296ea96a488928bdce8ef99023998c48e4713af533e9bb98ef07/charset_normalizer-3.4.7-cp311-cp311-musllinux_1_2_riscv64.whl", hash = "sha256:1a87ca9d5df6fe460483d9a5bbf2b18f620cbed41b432e2bddb686228282d10b", size = 210036, upload-time = "2026-04-02T09:26:16.478Z" }, + { url = "https://files.pythonhosted.org/packages/9a/bc/015b2387f913749f82afd4fcba07846d05b6d784dd16123cb66860e0237d/charset_normalizer-3.4.7-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:d635aab80466bc95771bb78d5370e74d36d1fe31467b6b29b8b57b2a3cd7d22c", size = 224739, upload-time = "2026-04-02T09:26:17.751Z" }, + { url = "https://files.pythonhosted.org/packages/17/ab/63133691f56baae417493cba6b7c641571a2130eb7bceba6773367ab9ec5/charset_normalizer-3.4.7-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:ae196f021b5e7c78e918242d217db021ed2a6ace2bc6ae94c0fc596221c7f58d", size = 216277, upload-time = "2026-04-02T09:26:18.981Z" }, + { url = "https://files.pythonhosted.org/packages/06/6d/3be70e827977f20db77c12a97e6a9f973631a45b8d186c084527e53e77a4/charset_normalizer-3.4.7-cp311-cp311-win32.whl", hash = "sha256:adb2597b428735679446b46c8badf467b4ca5f5056aae4d51a19f9570301b1ad", size = 147819, upload-time = "2026-04-02T09:26:20.295Z" }, + { url = "https://files.pythonhosted.org/packages/20/d9/5f67790f06b735d7c7637171bbfd89882ad67201891b7275e51116ed8207/charset_normalizer-3.4.7-cp311-cp311-win_amd64.whl", hash = "sha256:8e385e4267ab76874ae30db04c627faaaf0b509e1ccc11a95b3fc3e83f855c00", size = 159281, upload-time = "2026-04-02T09:26:21.74Z" }, + { url = "https://files.pythonhosted.org/packages/ca/83/6413f36c5a34afead88ce6f66684d943d91f233d76dd083798f9602b75ae/charset_normalizer-3.4.7-cp311-cp311-win_arm64.whl", hash = "sha256:d4a48e5b3c2a489fae013b7589308a40146ee081f6f509e047e0e096084ceca1", size = 147843, upload-time = "2026-04-02T09:26:22.901Z" }, + { url = "https://files.pythonhosted.org/packages/0c/eb/4fc8d0a7110eb5fc9cc161723a34a8a6c200ce3b4fbf681bc86feee22308/charset_normalizer-3.4.7-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:eca9705049ad3c7345d574e3510665cb2cf844c2f2dcfe675332677f081cbd46", size = 311328, upload-time = "2026-04-02T09:26:24.331Z" }, + { url = "https://files.pythonhosted.org/packages/f8/e3/0fadc706008ac9d7b9b5be6dc767c05f9d3e5df51744ce4cc9605de7b9f4/charset_normalizer-3.4.7-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6178f72c5508bfc5fd446a5905e698c6212932f25bcdd4b47a757a50605a90e2", size = 208061, upload-time = "2026-04-02T09:26:25.568Z" }, + { url = "https://files.pythonhosted.org/packages/42/f0/3dd1045c47f4a4604df85ec18ad093912ae1344ac706993aff91d38773a2/charset_normalizer-3.4.7-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:e1421b502d83040e6d7fb2fb18dff63957f720da3d77b2fbd3187ceb63755d7b", size = 229031, upload-time = "2026-04-02T09:26:26.865Z" }, + { url = "https://files.pythonhosted.org/packages/dc/67/675a46eb016118a2fbde5a277a5d15f4f69d5f3f5f338e5ee2f8948fcf43/charset_normalizer-3.4.7-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:edac0f1ab77644605be2cbba52e6b7f630731fc42b34cb0f634be1a6eface56a", size = 225239, upload-time = "2026-04-02T09:26:28.044Z" }, + { url = "https://files.pythonhosted.org/packages/4b/f8/d0118a2f5f23b02cd166fa385c60f9b0d4f9194f574e2b31cef350ad7223/charset_normalizer-3.4.7-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:5649fd1c7bade02f320a462fdefd0b4bd3ce036065836d4f42e0de958038e116", size = 216589, upload-time = "2026-04-02T09:26:29.239Z" }, + { url = "https://files.pythonhosted.org/packages/b1/f1/6d2b0b261b6c4ceef0fcb0d17a01cc5bc53586c2d4796fa04b5c540bc13d/charset_normalizer-3.4.7-cp312-cp312-manylinux_2_31_armv7l.whl", hash = "sha256:203104ed3e428044fd943bc4bf45fa73c0730391f9621e37fe39ecf477b128cb", size = 202733, upload-time = "2026-04-02T09:26:30.5Z" }, + { url = "https://files.pythonhosted.org/packages/6f/c0/7b1f943f7e87cc3db9626ba17807d042c38645f0a1d4415c7a14afb5591f/charset_normalizer-3.4.7-cp312-cp312-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:298930cec56029e05497a76988377cbd7457ba864beeea92ad7e844fe74cd1f1", size = 212652, upload-time = "2026-04-02T09:26:31.709Z" }, + { url = "https://files.pythonhosted.org/packages/38/dd/5a9ab159fe45c6e72079398f277b7d2b523e7f716acc489726115a910097/charset_normalizer-3.4.7-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:708838739abf24b2ceb208d0e22403dd018faeef86ddac04319a62ae884c4f15", size = 211229, upload-time = "2026-04-02T09:26:33.282Z" }, + { url = "https://files.pythonhosted.org/packages/d5/ff/531a1cad5ca855d1c1a8b69cb71abfd6d85c0291580146fda7c82857caa1/charset_normalizer-3.4.7-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:0f7eb884681e3938906ed0434f20c63046eacd0111c4ba96f27b76084cd679f5", size = 203552, upload-time = "2026-04-02T09:26:34.845Z" }, + { url = "https://files.pythonhosted.org/packages/c1/4c/a5fb52d528a8ca41f7598cb619409ece30a169fbdf9cdce592e53b46c3a6/charset_normalizer-3.4.7-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:4dc1e73c36828f982bfe79fadf5919923f8a6f4df2860804db9a98c48824ce8d", size = 230806, upload-time = "2026-04-02T09:26:36.152Z" }, + { url = "https://files.pythonhosted.org/packages/59/7a/071feed8124111a32b316b33ae4de83d36923039ef8cf48120266844285b/charset_normalizer-3.4.7-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:aed52fea0513bac0ccde438c188c8a471c4e0f457c2dd20cdbf6ea7a450046c7", size = 212316, upload-time = "2026-04-02T09:26:37.672Z" }, + { url = "https://files.pythonhosted.org/packages/fd/35/f7dba3994312d7ba508e041eaac39a36b120f32d4c8662b8814dab876431/charset_normalizer-3.4.7-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:fea24543955a6a729c45a73fe90e08c743f0b3334bbf3201e6c4bc1b0c7fa464", size = 227274, upload-time = "2026-04-02T09:26:38.93Z" }, + { url = "https://files.pythonhosted.org/packages/8a/2d/a572df5c9204ab7688ec1edc895a73ebded3b023bb07364710b05dd1c9be/charset_normalizer-3.4.7-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:bb6d88045545b26da47aa879dd4a89a71d1dce0f0e549b1abcb31dfe4a8eac49", size = 218468, upload-time = "2026-04-02T09:26:40.17Z" }, + { url = "https://files.pythonhosted.org/packages/86/eb/890922a8b03a568ca2f336c36585a4713c55d4d67bf0f0c78924be6315ca/charset_normalizer-3.4.7-cp312-cp312-win32.whl", hash = "sha256:2257141f39fe65a3fdf38aeccae4b953e5f3b3324f4ff0daf9f15b8518666a2c", size = 148460, upload-time = "2026-04-02T09:26:41.416Z" }, + { url = "https://files.pythonhosted.org/packages/35/d9/0e7dffa06c5ab081f75b1b786f0aefc88365825dfcd0ac544bdb7b2b6853/charset_normalizer-3.4.7-cp312-cp312-win_amd64.whl", hash = "sha256:5ed6ab538499c8644b8a3e18debabcd7ce684f3fa91cf867521a7a0279cab2d6", size = 159330, upload-time = "2026-04-02T09:26:42.554Z" }, + { url = "https://files.pythonhosted.org/packages/9e/5d/481bcc2a7c88ea6b0878c299547843b2521ccbc40980cb406267088bc701/charset_normalizer-3.4.7-cp312-cp312-win_arm64.whl", hash = "sha256:56be790f86bfb2c98fb742ce566dfb4816e5a83384616ab59c49e0604d49c51d", size = 147828, upload-time = "2026-04-02T09:26:44.075Z" }, + { url = "https://files.pythonhosted.org/packages/c1/3b/66777e39d3ae1ddc77ee606be4ec6d8cbd4c801f65e5a1b6f2b11b8346dd/charset_normalizer-3.4.7-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:f496c9c3cc02230093d8330875c4c3cdfc3b73612a5fd921c65d39cbcef08063", size = 309627, upload-time = "2026-04-02T09:26:45.198Z" }, + { url = "https://files.pythonhosted.org/packages/2e/4e/b7f84e617b4854ade48a1b7915c8ccfadeba444d2a18c291f696e37f0d3b/charset_normalizer-3.4.7-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0ea948db76d31190bf08bd371623927ee1339d5f2a0b4b1b4a4439a65298703c", size = 207008, upload-time = "2026-04-02T09:26:46.824Z" }, + { url = "https://files.pythonhosted.org/packages/c4/bb/ec73c0257c9e11b268f018f068f5d00aa0ef8c8b09f7753ebd5f2880e248/charset_normalizer-3.4.7-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:a277ab8928b9f299723bc1a2dabb1265911b1a76341f90a510368ca44ad9ab66", size = 228303, upload-time = "2026-04-02T09:26:48.397Z" }, + { url = "https://files.pythonhosted.org/packages/85/fb/32d1f5033484494619f701e719429c69b766bfc4dbc61aa9e9c8c166528b/charset_normalizer-3.4.7-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:3bec022aec2c514d9cf199522a802bd007cd588ab17ab2525f20f9c34d067c18", size = 224282, upload-time = "2026-04-02T09:26:49.684Z" }, + { url = "https://files.pythonhosted.org/packages/fa/07/330e3a0dda4c404d6da83b327270906e9654a24f6c546dc886a0eb0ffb23/charset_normalizer-3.4.7-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:e044c39e41b92c845bc815e5ae4230804e8e7bc29e399b0437d64222d92809dd", size = 215595, upload-time = "2026-04-02T09:26:50.915Z" }, + { url = "https://files.pythonhosted.org/packages/e3/7c/fc890655786e423f02556e0216d4b8c6bcb6bdfa890160dc66bf52dee468/charset_normalizer-3.4.7-cp313-cp313-manylinux_2_31_armv7l.whl", hash = "sha256:f495a1652cf3fbab2eb0639776dad966c2fb874d79d87ca07f9d5f059b8bd215", size = 201986, upload-time = "2026-04-02T09:26:52.197Z" }, + { url = "https://files.pythonhosted.org/packages/d8/97/bfb18b3db2aed3b90cf54dc292ad79fdd5ad65c4eae454099475cbeadd0d/charset_normalizer-3.4.7-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:e712b419df8ba5e42b226c510472b37bd57b38e897d3eca5e8cfd410a29fa859", size = 211711, upload-time = "2026-04-02T09:26:53.49Z" }, + { url = "https://files.pythonhosted.org/packages/6f/a5/a581c13798546a7fd557c82614a5c65a13df2157e9ad6373166d2a3e645d/charset_normalizer-3.4.7-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:7804338df6fcc08105c7745f1502ba68d900f45fd770d5bdd5288ddccb8a42d8", size = 210036, upload-time = "2026-04-02T09:26:54.975Z" }, + { url = "https://files.pythonhosted.org/packages/8c/bf/b3ab5bcb478e4193d517644b0fb2bf5497fbceeaa7a1bc0f4d5b50953861/charset_normalizer-3.4.7-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:481551899c856c704d58119b5025793fa6730adda3571971af568f66d2424bb5", size = 202998, upload-time = "2026-04-02T09:26:56.303Z" }, + { url = "https://files.pythonhosted.org/packages/e7/4e/23efd79b65d314fa320ec6017b4b5834d5c12a58ba4610aa353af2e2f577/charset_normalizer-3.4.7-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:f59099f9b66f0d7145115e6f80dd8b1d847176df89b234a5a6b3f00437aa0832", size = 230056, upload-time = "2026-04-02T09:26:57.554Z" }, + { url = "https://files.pythonhosted.org/packages/b9/9f/1e1941bc3f0e01df116e68dc37a55c4d249df5e6fa77f008841aef68264f/charset_normalizer-3.4.7-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:f59ad4c0e8f6bba240a9bb85504faa1ab438237199d4cce5f622761507b8f6a6", size = 211537, upload-time = "2026-04-02T09:26:58.843Z" }, + { url = "https://files.pythonhosted.org/packages/80/0f/088cbb3020d44428964a6c97fe1edfb1b9550396bf6d278330281e8b709c/charset_normalizer-3.4.7-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:3dedcc22d73ec993f42055eff4fcfed9318d1eeb9a6606c55892a26964964e48", size = 226176, upload-time = "2026-04-02T09:27:00.437Z" }, + { url = "https://files.pythonhosted.org/packages/6a/9f/130394f9bbe06f4f63e22641d32fc9b202b7e251c9aef4db044324dac493/charset_normalizer-3.4.7-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:64f02c6841d7d83f832cd97ccf8eb8a906d06eb95d5276069175c696b024b60a", size = 217723, upload-time = "2026-04-02T09:27:02.021Z" }, + { url = "https://files.pythonhosted.org/packages/73/55/c469897448a06e49f8fa03f6caae97074fde823f432a98f979cc42b90e69/charset_normalizer-3.4.7-cp313-cp313-win32.whl", hash = "sha256:4042d5c8f957e15221d423ba781e85d553722fc4113f523f2feb7b188cc34c5e", size = 148085, upload-time = "2026-04-02T09:27:03.192Z" }, + { url = "https://files.pythonhosted.org/packages/5d/78/1b74c5bbb3f99b77a1715c91b3e0b5bdb6fe302d95ace4f5b1bec37b0167/charset_normalizer-3.4.7-cp313-cp313-win_amd64.whl", hash = "sha256:3946fa46a0cf3e4c8cb1cc52f56bb536310d34f25f01ca9b6c16afa767dab110", size = 158819, upload-time = "2026-04-02T09:27:04.454Z" }, + { url = "https://files.pythonhosted.org/packages/68/86/46bd42279d323deb8687c4a5a811fd548cb7d1de10cf6535d099877a9a9f/charset_normalizer-3.4.7-cp313-cp313-win_arm64.whl", hash = "sha256:80d04837f55fc81da168b98de4f4b797ef007fc8a79ab71c6ec9bc4dd662b15b", size = 147915, upload-time = "2026-04-02T09:27:05.971Z" }, + { url = "https://files.pythonhosted.org/packages/97/c8/c67cb8c70e19ef1960b97b22ed2a1567711de46c4ddf19799923adc836c2/charset_normalizer-3.4.7-cp314-cp314-macosx_10_15_universal2.whl", hash = "sha256:c36c333c39be2dbca264d7803333c896ab8fa7d4d6f0ab7edb7dfd7aea6e98c0", size = 309234, upload-time = "2026-04-02T09:27:07.194Z" }, + { url = "https://files.pythonhosted.org/packages/99/85/c091fdee33f20de70d6c8b522743b6f831a2f1cd3ff86de4c6a827c48a76/charset_normalizer-3.4.7-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1c2aed2e5e41f24ea8ef1590b8e848a79b56f3a5564a65ceec43c9d692dc7d8a", size = 208042, upload-time = "2026-04-02T09:27:08.749Z" }, + { url = "https://files.pythonhosted.org/packages/87/1c/ab2ce611b984d2fd5d86a5a8a19c1ae26acac6bad967da4967562c75114d/charset_normalizer-3.4.7-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:54523e136b8948060c0fa0bc7b1b50c32c186f2fceee897a495406bb6e311d2b", size = 228706, upload-time = "2026-04-02T09:27:09.951Z" }, + { url = "https://files.pythonhosted.org/packages/a8/29/2b1d2cb00bf085f59d29eb773ce58ec2d325430f8c216804a0a5cd83cbca/charset_normalizer-3.4.7-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:715479b9a2802ecac752a3b0efa2b0b60285cf962ee38414211abdfccc233b41", size = 224727, upload-time = "2026-04-02T09:27:11.175Z" }, + { url = "https://files.pythonhosted.org/packages/47/5c/032c2d5a07fe4d4855fea851209cca2b6f03ebeb6d4e3afdb3358386a684/charset_normalizer-3.4.7-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:bd6c2a1c7573c64738d716488d2cdd3c00e340e4835707d8fdb8dc1a66ef164e", size = 215882, upload-time = "2026-04-02T09:27:12.446Z" }, + { url = "https://files.pythonhosted.org/packages/2c/c2/356065d5a8b78ed04499cae5f339f091946a6a74f91e03476c33f0ab7100/charset_normalizer-3.4.7-cp314-cp314-manylinux_2_31_armv7l.whl", hash = "sha256:c45e9440fb78f8ddabcf714b68f936737a121355bf59f3907f4e17721b9d1aae", size = 200860, upload-time = "2026-04-02T09:27:13.721Z" }, + { url = "https://files.pythonhosted.org/packages/0c/cd/a32a84217ced5039f53b29f460962abb2d4420def55afabe45b1c3c7483d/charset_normalizer-3.4.7-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:3534e7dcbdcf757da6b85a0bbf5b6868786d5982dd959b065e65481644817a18", size = 211564, upload-time = "2026-04-02T09:27:15.272Z" }, + { url = "https://files.pythonhosted.org/packages/44/86/58e6f13ce26cc3b8f4a36b94a0f22ae2f00a72534520f4ae6857c4b81f89/charset_normalizer-3.4.7-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:e8ac484bf18ce6975760921bb6148041faa8fef0547200386ea0b52b5d27bf7b", size = 211276, upload-time = "2026-04-02T09:27:16.834Z" }, + { url = "https://files.pythonhosted.org/packages/8f/fe/d17c32dc72e17e155e06883efa84514ca375f8a528ba2546bee73fc4df81/charset_normalizer-3.4.7-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:a5fe03b42827c13cdccd08e6c0247b6a6d4b5e3cdc53fd1749f5896adcdc2356", size = 201238, upload-time = "2026-04-02T09:27:18.229Z" }, + { url = "https://files.pythonhosted.org/packages/6a/29/f33daa50b06525a237451cdb6c69da366c381a3dadcd833fa5676bc468b3/charset_normalizer-3.4.7-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:2d6eb928e13016cea4f1f21d1e10c1cebd5a421bc57ddf5b1142ae3f86824fab", size = 230189, upload-time = "2026-04-02T09:27:19.445Z" }, + { url = "https://files.pythonhosted.org/packages/b6/6e/52c84015394a6a0bdcd435210a7e944c5f94ea1055f5cc5d56c5fe368e7b/charset_normalizer-3.4.7-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:e74327fb75de8986940def6e8dee4f127cc9752bee7355bb323cc5b2659b6d46", size = 211352, upload-time = "2026-04-02T09:27:20.79Z" }, + { url = "https://files.pythonhosted.org/packages/8c/d7/4353be581b373033fb9198bf1da3cf8f09c1082561e8e922aa7b39bf9fe8/charset_normalizer-3.4.7-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:d6038d37043bced98a66e68d3aa2b6a35505dc01328cd65217cefe82f25def44", size = 227024, upload-time = "2026-04-02T09:27:22.063Z" }, + { url = "https://files.pythonhosted.org/packages/30/45/99d18aa925bd1740098ccd3060e238e21115fffbfdcb8f3ece837d0ace6c/charset_normalizer-3.4.7-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:7579e913a5339fb8fa133f6bbcfd8e6749696206cf05acdbdca71a1b436d8e72", size = 217869, upload-time = "2026-04-02T09:27:23.486Z" }, + { url = "https://files.pythonhosted.org/packages/5c/05/5ee478aa53f4bb7996482153d4bfe1b89e0f087f0ab6b294fcf92d595873/charset_normalizer-3.4.7-cp314-cp314-win32.whl", hash = "sha256:5b77459df20e08151cd6f8b9ef8ef1f961ef73d85c21a555c7eed5b79410ec10", size = 148541, upload-time = "2026-04-02T09:27:25.146Z" }, + { url = "https://files.pythonhosted.org/packages/48/77/72dcb0921b2ce86420b2d79d454c7022bf5be40202a2a07906b9f2a35c97/charset_normalizer-3.4.7-cp314-cp314-win_amd64.whl", hash = "sha256:92a0a01ead5e668468e952e4238cccd7c537364eb7d851ab144ab6627dbbe12f", size = 159634, upload-time = "2026-04-02T09:27:26.642Z" }, + { url = "https://files.pythonhosted.org/packages/c6/a3/c2369911cd72f02386e4e340770f6e158c7980267da16af8f668217abaa0/charset_normalizer-3.4.7-cp314-cp314-win_arm64.whl", hash = "sha256:67f6279d125ca0046a7fd386d01b311c6363844deac3e5b069b514ba3e63c246", size = 148384, upload-time = "2026-04-02T09:27:28.271Z" }, + { url = "https://files.pythonhosted.org/packages/94/09/7e8a7f73d24dba1f0035fbbf014d2c36828fc1bf9c88f84093e57d315935/charset_normalizer-3.4.7-cp314-cp314t-macosx_10_15_universal2.whl", hash = "sha256:effc3f449787117233702311a1b7d8f59cba9ced946ba727bdc329ec69028e24", size = 330133, upload-time = "2026-04-02T09:27:29.474Z" }, + { url = "https://files.pythonhosted.org/packages/8d/da/96975ddb11f8e977f706f45cddd8540fd8242f71ecdb5d18a80723dcf62c/charset_normalizer-3.4.7-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:fbccdc05410c9ee21bbf16a35f4c1d16123dcdeb8a1d38f33654fa21d0234f79", size = 216257, upload-time = "2026-04-02T09:27:30.793Z" }, + { url = "https://files.pythonhosted.org/packages/e5/e8/1d63bf8ef2d388e95c64b2098f45f84758f6d102a087552da1485912637b/charset_normalizer-3.4.7-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:733784b6d6def852c814bce5f318d25da2ee65dd4839a0718641c696e09a2960", size = 234851, upload-time = "2026-04-02T09:27:32.44Z" }, + { url = "https://files.pythonhosted.org/packages/9b/40/e5ff04233e70da2681fa43969ad6f66ca5611d7e669be0246c4c7aaf6dc8/charset_normalizer-3.4.7-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:a89c23ef8d2c6b27fd200a42aa4ac72786e7c60d40efdc76e6011260b6e949c4", size = 233393, upload-time = "2026-04-02T09:27:34.03Z" }, + { url = "https://files.pythonhosted.org/packages/be/c1/06c6c49d5a5450f76899992f1ee40b41d076aee9279b49cf9974d2f313d5/charset_normalizer-3.4.7-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6c114670c45346afedc0d947faf3c7f701051d2518b943679c8ff88befe14f8e", size = 223251, upload-time = "2026-04-02T09:27:35.369Z" }, + { url = "https://files.pythonhosted.org/packages/2b/9f/f2ff16fb050946169e3e1f82134d107e5d4ae72647ec8a1b1446c148480f/charset_normalizer-3.4.7-cp314-cp314t-manylinux_2_31_armv7l.whl", hash = "sha256:a180c5e59792af262bf263b21a3c49353f25945d8d9f70628e73de370d55e1e1", size = 206609, upload-time = "2026-04-02T09:27:36.661Z" }, + { url = "https://files.pythonhosted.org/packages/69/d5/a527c0cd8d64d2eab7459784fb4169a0ac76e5a6fc5237337982fd61347e/charset_normalizer-3.4.7-cp314-cp314t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:3c9a494bc5ec77d43cea229c4f6db1e4d8fe7e1bbffa8b6f0f0032430ff8ab44", size = 220014, upload-time = "2026-04-02T09:27:38.019Z" }, + { url = "https://files.pythonhosted.org/packages/7e/80/8a7b8104a3e203074dc9aa2c613d4b726c0e136bad1cc734594b02867972/charset_normalizer-3.4.7-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:8d828b6667a32a728a1ad1d93957cdf37489c57b97ae6c4de2860fa749b8fc1e", size = 218979, upload-time = "2026-04-02T09:27:39.37Z" }, + { url = "https://files.pythonhosted.org/packages/02/9a/b759b503d507f375b2b5c153e4d2ee0a75aa215b7f2489cf314f4541f2c0/charset_normalizer-3.4.7-cp314-cp314t-musllinux_1_2_armv7l.whl", hash = "sha256:cf1493cd8607bec4d8a7b9b004e699fcf8f9103a9284cc94962cb73d20f9d4a3", size = 209238, upload-time = "2026-04-02T09:27:40.722Z" }, + { url = "https://files.pythonhosted.org/packages/c2/4e/0f3f5d47b86bdb79256e7290b26ac847a2832d9a4033f7eb2cd4bcf4bb5b/charset_normalizer-3.4.7-cp314-cp314t-musllinux_1_2_ppc64le.whl", hash = "sha256:0c96c3b819b5c3e9e165495db84d41914d6894d55181d2d108cc1a69bfc9cce0", size = 236110, upload-time = "2026-04-02T09:27:42.33Z" }, + { url = "https://files.pythonhosted.org/packages/96/23/bce28734eb3ed2c91dcf93abeb8a5cf393a7b2749725030bb630e554fdd8/charset_normalizer-3.4.7-cp314-cp314t-musllinux_1_2_riscv64.whl", hash = "sha256:752a45dc4a6934060b3b0dab47e04edc3326575f82be64bc4fc293914566503e", size = 219824, upload-time = "2026-04-02T09:27:43.924Z" }, + { url = "https://files.pythonhosted.org/packages/2c/6f/6e897c6984cc4d41af319b077f2f600fc8214eb2fe2d6bcb79141b882400/charset_normalizer-3.4.7-cp314-cp314t-musllinux_1_2_s390x.whl", hash = "sha256:8778f0c7a52e56f75d12dae53ae320fae900a8b9b4164b981b9c5ce059cd1fcb", size = 233103, upload-time = "2026-04-02T09:27:45.348Z" }, + { url = "https://files.pythonhosted.org/packages/76/22/ef7bd0fe480a0ae9b656189ec00744b60933f68b4f42a7bb06589f6f576a/charset_normalizer-3.4.7-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:ce3412fbe1e31eb81ea42f4169ed94861c56e643189e1e75f0041f3fe7020abe", size = 225194, upload-time = "2026-04-02T09:27:46.706Z" }, + { url = "https://files.pythonhosted.org/packages/c5/a7/0e0ab3e0b5bc1219bd80a6a0d4d72ca74d9250cb2382b7c699c147e06017/charset_normalizer-3.4.7-cp314-cp314t-win32.whl", hash = "sha256:c03a41a8784091e67a39648f70c5f97b5b6a37f216896d44d2cdcb82615339a0", size = 159827, upload-time = "2026-04-02T09:27:48.053Z" }, + { url = "https://files.pythonhosted.org/packages/7a/1d/29d32e0fb40864b1f878c7f5a0b343ae676c6e2b271a2d55cc3a152391da/charset_normalizer-3.4.7-cp314-cp314t-win_amd64.whl", hash = "sha256:03853ed82eeebbce3c2abfdbc98c96dc205f32a79627688ac9a27370ea61a49c", size = 174168, upload-time = "2026-04-02T09:27:49.795Z" }, + { url = "https://files.pythonhosted.org/packages/de/32/d92444ad05c7a6e41fb2036749777c163baf7a0301a040cb672d6b2b1ae9/charset_normalizer-3.4.7-cp314-cp314t-win_arm64.whl", hash = "sha256:c35abb8bfff0185efac5878da64c45dafd2b37fb0383add1be155a763c1f083d", size = 153018, upload-time = "2026-04-02T09:27:51.116Z" }, + { url = "https://files.pythonhosted.org/packages/db/8f/61959034484a4a7c527811f4721e75d02d653a35afb0b6054474d8185d4c/charset_normalizer-3.4.7-py3-none-any.whl", hash = "sha256:3dce51d0f5e7951f8bb4900c257dad282f49190fdbebecd4ba99bcc41fef404d", size = 61958, upload-time = "2026-04-02T09:28:37.794Z" }, +] + +[[package]] +name = "click" +version = "8.3.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "colorama", marker = "sys_platform == 'win32'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/3d/fa/656b739db8587d7b5dfa22e22ed02566950fbfbcdc20311993483657a5c0/click-8.3.1.tar.gz", hash = "sha256:12ff4785d337a1bb490bb7e9c2b1ee5da3112e94a8622f26a6c77f5d2fc6842a", size = 295065, upload-time = "2025-11-15T20:45:42.706Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/98/78/01c019cdb5d6498122777c1a43056ebb3ebfeef2076d9d026bfe15583b2b/click-8.3.1-py3-none-any.whl", hash = "sha256:981153a64e25f12d547d3426c367a4857371575ee7ad18df2a6183ab0545b2a6", size = 108274, upload-time = "2025-11-15T20:45:41.139Z" }, +] + +[[package]] +name = "colorama" +version = "0.4.6" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d8/53/6f443c9a4a8358a93a6792e2acffb9d9d5cb0a5cfd8802644b7b1c9a02e4/colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44", size = 27697, upload-time = "2022-10-25T02:36:22.414Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d1/d6/3965ed04c63042e047cb6a3e6ed1a63a35087b6a609aa3a15ed8ac56c221/colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6", size = 25335, upload-time = "2022-10-25T02:36:20.889Z" }, +] + +[[package]] +name = "distro" +version = "1.9.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/fc/f8/98eea607f65de6527f8a2e8885fc8015d3e6f5775df186e443e0964a11c3/distro-1.9.0.tar.gz", hash = "sha256:2fa77c6fd8940f116ee1d6b94a2f90b13b5ea8d019b98bc8bafdcabcdd9bdbed", size = 60722, upload-time = "2023-12-24T09:54:32.31Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/12/b3/231ffd4ab1fc9d679809f356cebee130ac7daa00d6d6f3206dd4fd137e9e/distro-1.9.0-py3-none-any.whl", hash = "sha256:7bffd925d65168f85027d8da9af6bddab658135b840670a223589bc0c8ef02b2", size = 20277, upload-time = "2023-12-24T09:54:30.421Z" }, +] + +[[package]] +name = "exceptiongroup" +version = "1.3.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "typing-extensions", marker = "python_full_version < '3.13'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/50/79/66800aadf48771f6b62f7eb014e352e5d06856655206165d775e675a02c9/exceptiongroup-1.3.1.tar.gz", hash = "sha256:8b412432c6055b0b7d14c310000ae93352ed6754f70fa8f7c34141f91c4e3219", size = 30371, upload-time = "2025-11-21T23:01:54.787Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/8a/0e/97c33bf5009bdbac74fd2beace167cab3f978feb69cc36f1ef79360d6c4e/exceptiongroup-1.3.1-py3-none-any.whl", hash = "sha256:a7a39a3bd276781e98394987d3a5701d0c4edffb633bb7a5144577f82c773598", size = 16740, upload-time = "2025-11-21T23:01:53.443Z" }, +] + +[[package]] +name = "fastapi" +version = "0.135.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "annotated-doc" }, + { name = "pydantic" }, + { name = "starlette" }, + { name = "typing-extensions" }, + { name = "typing-inspection" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/f7/e6/7adb4c5fa231e82c35b8f5741a9f2d055f520c29af5546fd70d3e8e1cd2e/fastapi-0.135.3.tar.gz", hash = "sha256:bd6d7caf1a2bdd8d676843cdcd2287729572a1ef524fc4d65c17ae002a1be654", size = 396524, upload-time = "2026-04-01T16:23:58.188Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/84/a4/5caa2de7f917a04ada20018eccf60d6cc6145b0199d55ca3711b0fc08312/fastapi-0.135.3-py3-none-any.whl", hash = "sha256:9b0f590c813acd13d0ab43dd8494138eb58e484bfac405db1f3187cfc5810d98", size = 117734, upload-time = "2026-04-01T16:23:59.328Z" }, +] + +[[package]] +name = "h11" +version = "0.16.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/01/ee/02a2c011bdab74c6fb3c75474d40b3052059d95df7e73351460c8588d963/h11-0.16.0.tar.gz", hash = "sha256:4e35b956cf45792e4caa5885e69fba00bdbc6ffafbfa020300e549b208ee5ff1", size = 101250, upload-time = "2025-04-24T03:35:25.427Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/04/4b/29cac41a4d98d144bf5f6d33995617b185d14b22401f75ca86f384e87ff1/h11-0.16.0-py3-none-any.whl", hash = "sha256:63cf8bbe7522de3bf65932fda1d9c2772064ffb3dae62d55932da54b31cb6c86", size = 37515, upload-time = "2025-04-24T03:35:24.344Z" }, +] + +[[package]] +name = "httpcore" +version = "1.0.9" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "certifi" }, + { name = "h11" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/06/94/82699a10bca87a5556c9c59b5963f2d039dbd239f25bc2a63907a05a14cb/httpcore-1.0.9.tar.gz", hash = "sha256:6e34463af53fd2ab5d807f399a9b45ea31c3dfa2276f15a2c3f00afff6e176e8", size = 85484, upload-time = "2025-04-24T22:06:22.219Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7e/f5/f66802a942d491edb555dd61e3a9961140fd64c90bce1eafd741609d334d/httpcore-1.0.9-py3-none-any.whl", hash = "sha256:2d400746a40668fc9dec9810239072b40b4484b640a8c38fd654a024c7a1bf55", size = 78784, upload-time = "2025-04-24T22:06:20.566Z" }, +] + +[[package]] +name = "httpx" +version = "0.28.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "anyio" }, + { name = "certifi" }, + { name = "httpcore" }, + { name = "idna" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/b1/df/48c586a5fe32a0f01324ee087459e112ebb7224f646c0b5023f5e79e9956/httpx-0.28.1.tar.gz", hash = "sha256:75e98c5f16b0f35b567856f597f06ff2270a374470a5c2392242528e3e3e42fc", size = 141406, upload-time = "2024-12-06T15:37:23.222Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2a/39/e50c7c3a983047577ee07d2a9e53faf5a69493943ec3f6a384bdc792deb2/httpx-0.28.1-py3-none-any.whl", hash = "sha256:d909fcccc110f8c7faf814ca82a9a4d816bc5a6dbfea25d6591d6985b8ba59ad", size = 73517, upload-time = "2024-12-06T15:37:21.509Z" }, +] + +[[package]] +name = "idna" +version = "3.11" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/6f/6d/0703ccc57f3a7233505399edb88de3cbd678da106337b9fcde432b65ed60/idna-3.11.tar.gz", hash = "sha256:795dafcc9c04ed0c1fb032c2aa73654d8e8c5023a7df64a53f39190ada629902", size = 194582, upload-time = "2025-10-12T14:55:20.501Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0e/61/66938bbb5fc52dbdf84594873d5b51fb1f7c7794e9c0f5bd885f30bc507b/idna-3.11-py3-none-any.whl", hash = "sha256:771a87f49d9defaf64091e6e6fe9c18d4833f140bd19464795bc32d966ca37ea", size = 71008, upload-time = "2025-10-12T14:55:18.883Z" }, +] + +[[package]] +name = "iniconfig" +version = "2.3.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/72/34/14ca021ce8e5dfedc35312d08ba8bf51fdd999c576889fc2c24cb97f4f10/iniconfig-2.3.0.tar.gz", hash = "sha256:c76315c77db068650d49c5b56314774a7804df16fee4402c1f19d6d15d8c4730", size = 20503, upload-time = "2025-10-18T21:55:43.219Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/cb/b1/3846dd7f199d53cb17f49cba7e651e9ce294d8497c8c150530ed11865bb8/iniconfig-2.3.0-py3-none-any.whl", hash = "sha256:f631c04d2c48c52b84d0d0549c99ff3859c98df65b3101406327ecc7d53fbf12", size = 7484, upload-time = "2025-10-18T21:55:41.639Z" }, +] + +[[package]] +name = "jiter" +version = "0.13.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/0d/5e/4ec91646aee381d01cdb9974e30882c9cd3b8c5d1079d6b5ff4af522439a/jiter-0.13.0.tar.gz", hash = "sha256:f2839f9c2c7e2dffc1bc5929a510e14ce0a946be9365fd1219e7ef342dae14f4", size = 164847, upload-time = "2026-02-02T12:37:56.441Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d0/5a/41da76c5ea07bec1b0472b6b2fdb1b651074d504b19374d7e130e0cdfb25/jiter-0.13.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:2ffc63785fd6c7977defe49b9824ae6ce2b2e2b77ce539bdaf006c26da06342e", size = 311164, upload-time = "2026-02-02T12:35:17.688Z" }, + { url = "https://files.pythonhosted.org/packages/40/cb/4a1bf994a3e869f0d39d10e11efb471b76d0ad70ecbfb591427a46c880c2/jiter-0.13.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:4a638816427006c1e3f0013eb66d391d7a3acda99a7b0cf091eff4497ccea33a", size = 320296, upload-time = "2026-02-02T12:35:19.828Z" }, + { url = "https://files.pythonhosted.org/packages/09/82/acd71ca9b50ecebadc3979c541cd717cce2fe2bc86236f4fa597565d8f1a/jiter-0.13.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:19928b5d1ce0ff8c1ee1b9bdef3b5bfc19e8304f1b904e436caf30bc15dc6cf5", size = 352742, upload-time = "2026-02-02T12:35:21.258Z" }, + { url = "https://files.pythonhosted.org/packages/71/03/d1fc996f3aecfd42eb70922edecfb6dd26421c874503e241153ad41df94f/jiter-0.13.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:309549b778b949d731a2f0e1594a3f805716be704a73bf3ad9a807eed5eb5721", size = 363145, upload-time = "2026-02-02T12:35:24.653Z" }, + { url = "https://files.pythonhosted.org/packages/f1/61/a30492366378cc7a93088858f8991acd7d959759fe6138c12a4644e58e81/jiter-0.13.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bcdabaea26cb04e25df3103ce47f97466627999260290349a88c8136ecae0060", size = 487683, upload-time = "2026-02-02T12:35:26.162Z" }, + { url = "https://files.pythonhosted.org/packages/20/4e/4223cffa9dbbbc96ed821c5aeb6bca510848c72c02086d1ed3f1da3d58a7/jiter-0.13.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a3a377af27b236abbf665a69b2bdd680e3b5a0bd2af825cd3b81245279a7606c", size = 373579, upload-time = "2026-02-02T12:35:27.582Z" }, + { url = "https://files.pythonhosted.org/packages/fe/c9/b0489a01329ab07a83812d9ebcffe7820a38163c6d9e7da644f926ff877c/jiter-0.13.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fe49d3ff6db74321f144dff9addd4a5874d3105ac5ba7c5b77fac099cfae31ae", size = 362904, upload-time = "2026-02-02T12:35:28.925Z" }, + { url = "https://files.pythonhosted.org/packages/05/af/53e561352a44afcba9a9bc67ee1d320b05a370aed8df54eafe714c4e454d/jiter-0.13.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2113c17c9a67071b0f820733c0893ed1d467b5fcf4414068169e5c2cabddb1e2", size = 392380, upload-time = "2026-02-02T12:35:30.385Z" }, + { url = "https://files.pythonhosted.org/packages/76/2a/dd805c3afb8ed5b326c5ae49e725d1b1255b9754b1b77dbecdc621b20773/jiter-0.13.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:ab1185ca5c8b9491b55ebf6c1e8866b8f68258612899693e24a92c5fdb9455d5", size = 517939, upload-time = "2026-02-02T12:35:31.865Z" }, + { url = "https://files.pythonhosted.org/packages/20/2a/7b67d76f55b8fe14c937e7640389612f05f9a4145fc28ae128aaa5e62257/jiter-0.13.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:9621ca242547edc16400981ca3231e0c91c0c4c1ab8573a596cd9bb3575d5c2b", size = 551696, upload-time = "2026-02-02T12:35:33.306Z" }, + { url = "https://files.pythonhosted.org/packages/85/9c/57cdd64dac8f4c6ab8f994fe0eb04dc9fd1db102856a4458fcf8a99dfa62/jiter-0.13.0-cp310-cp310-win32.whl", hash = "sha256:a7637d92b1c9d7a771e8c56f445c7f84396d48f2e756e5978840ecba2fac0894", size = 204592, upload-time = "2026-02-02T12:35:34.58Z" }, + { url = "https://files.pythonhosted.org/packages/a7/38/f4f3ea5788b8a5bae7510a678cdc747eda0c45ffe534f9878ff37e7cf3b3/jiter-0.13.0-cp310-cp310-win_amd64.whl", hash = "sha256:c1b609e5cbd2f52bb74fb721515745b407df26d7b800458bd97cb3b972c29e7d", size = 206016, upload-time = "2026-02-02T12:35:36.435Z" }, + { url = "https://files.pythonhosted.org/packages/71/29/499f8c9eaa8a16751b1c0e45e6f5f1761d180da873d417996cc7bddc8eef/jiter-0.13.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:ea026e70a9a28ebbdddcbcf0f1323128a8db66898a06eaad3a4e62d2f554d096", size = 311157, upload-time = "2026-02-02T12:35:37.758Z" }, + { url = "https://files.pythonhosted.org/packages/50/f6/566364c777d2ab450b92100bea11333c64c38d32caf8dc378b48e5b20c46/jiter-0.13.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:66aa3e663840152d18cc8ff1e4faad3dd181373491b9cfdc6004b92198d67911", size = 319729, upload-time = "2026-02-02T12:35:39.246Z" }, + { url = "https://files.pythonhosted.org/packages/73/dd/560f13ec5e4f116d8ad2658781646cca91b617ae3b8758d4a5076b278f70/jiter-0.13.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c3524798e70655ff19aec58c7d05adb1f074fecff62da857ea9be2b908b6d701", size = 354766, upload-time = "2026-02-02T12:35:40.662Z" }, + { url = "https://files.pythonhosted.org/packages/7c/0d/061faffcfe94608cbc28a0d42a77a74222bdf5055ccdbe5fd2292b94f510/jiter-0.13.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ec7e287d7fbd02cb6e22f9a00dd9c9cd504c40a61f2c61e7e1f9690a82726b4c", size = 362587, upload-time = "2026-02-02T12:35:42.025Z" }, + { url = "https://files.pythonhosted.org/packages/92/c9/c66a7864982fd38a9773ec6e932e0398d1262677b8c60faecd02ffb67bf3/jiter-0.13.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:47455245307e4debf2ce6c6e65a717550a0244231240dcf3b8f7d64e4c2f22f4", size = 487537, upload-time = "2026-02-02T12:35:43.459Z" }, + { url = "https://files.pythonhosted.org/packages/6c/86/84eb4352cd3668f16d1a88929b5888a3fe0418ea8c1dfc2ad4e7bf6e069a/jiter-0.13.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ee9da221dca6e0429c2704c1b3655fe7b025204a71d4d9b73390c759d776d165", size = 373717, upload-time = "2026-02-02T12:35:44.928Z" }, + { url = "https://files.pythonhosted.org/packages/6e/09/9fe4c159358176f82d4390407a03f506a8659ed13ca3ac93a843402acecf/jiter-0.13.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:24ab43126d5e05f3d53a36a8e11eb2f23304c6c1117844aaaf9a0aa5e40b5018", size = 362683, upload-time = "2026-02-02T12:35:46.636Z" }, + { url = "https://files.pythonhosted.org/packages/c9/5e/85f3ab9caca0c1d0897937d378b4a515cae9e119730563572361ea0c48ae/jiter-0.13.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:9da38b4fedde4fb528c740c2564628fbab737166a0e73d6d46cb4bb5463ff411", size = 392345, upload-time = "2026-02-02T12:35:48.088Z" }, + { url = "https://files.pythonhosted.org/packages/12/4c/05b8629ad546191939e6f0c2f17e29f542a398f4a52fb987bc70b6d1eb8b/jiter-0.13.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:0b34c519e17658ed88d5047999a93547f8889f3c1824120c26ad6be5f27b6cf5", size = 517775, upload-time = "2026-02-02T12:35:49.482Z" }, + { url = "https://files.pythonhosted.org/packages/4d/88/367ea2eb6bc582c7052e4baf5ddf57ebe5ab924a88e0e09830dfb585c02d/jiter-0.13.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d2a6394e6af690d462310a86b53c47ad75ac8c21dc79f120714ea449979cb1d3", size = 551325, upload-time = "2026-02-02T12:35:51.104Z" }, + { url = "https://files.pythonhosted.org/packages/f3/12/fa377ffb94a2f28c41afaed093e0d70cfe512035d5ecb0cad0ae4792d35e/jiter-0.13.0-cp311-cp311-win32.whl", hash = "sha256:0f0c065695f616a27c920a56ad0d4fc46415ef8b806bf8fc1cacf25002bd24e1", size = 204709, upload-time = "2026-02-02T12:35:52.467Z" }, + { url = "https://files.pythonhosted.org/packages/cb/16/8e8203ce92f844dfcd3d9d6a5a7322c77077248dbb12da52d23193a839cd/jiter-0.13.0-cp311-cp311-win_amd64.whl", hash = "sha256:0733312953b909688ae3c2d58d043aa040f9f1a6a75693defed7bc2cc4bf2654", size = 204560, upload-time = "2026-02-02T12:35:53.925Z" }, + { url = "https://files.pythonhosted.org/packages/44/26/97cc40663deb17b9e13c3a5cf29251788c271b18ee4d262c8f94798b8336/jiter-0.13.0-cp311-cp311-win_arm64.whl", hash = "sha256:5d9b34ad56761b3bf0fbe8f7e55468704107608512350962d3317ffd7a4382d5", size = 189608, upload-time = "2026-02-02T12:35:55.304Z" }, + { url = "https://files.pythonhosted.org/packages/2e/30/7687e4f87086829955013ca12a9233523349767f69653ebc27036313def9/jiter-0.13.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:0a2bd69fc1d902e89925fc34d1da51b2128019423d7b339a45d9e99c894e0663", size = 307958, upload-time = "2026-02-02T12:35:57.165Z" }, + { url = "https://files.pythonhosted.org/packages/c3/27/e57f9a783246ed95481e6749cc5002a8a767a73177a83c63ea71f0528b90/jiter-0.13.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:f917a04240ef31898182f76a332f508f2cc4b57d2b4d7ad2dbfebbfe167eb505", size = 318597, upload-time = "2026-02-02T12:35:58.591Z" }, + { url = "https://files.pythonhosted.org/packages/cf/52/e5719a60ac5d4d7c5995461a94ad5ef962a37c8bf5b088390e6fad59b2ff/jiter-0.13.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c1e2b199f446d3e82246b4fd9236d7cb502dc2222b18698ba0d986d2fecc6152", size = 348821, upload-time = "2026-02-02T12:36:00.093Z" }, + { url = "https://files.pythonhosted.org/packages/61/db/c1efc32b8ba4c740ab3fc2d037d8753f67685f475e26b9d6536a4322bcdd/jiter-0.13.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:04670992b576fa65bd056dbac0c39fe8bd67681c380cb2b48efa885711d9d726", size = 364163, upload-time = "2026-02-02T12:36:01.937Z" }, + { url = "https://files.pythonhosted.org/packages/55/8a/fb75556236047c8806995671a18e4a0ad646ed255276f51a20f32dceaeec/jiter-0.13.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5a1aff1fbdb803a376d4d22a8f63f8e7ccbce0b4890c26cc7af9e501ab339ef0", size = 483709, upload-time = "2026-02-02T12:36:03.41Z" }, + { url = "https://files.pythonhosted.org/packages/7e/16/43512e6ee863875693a8e6f6d532e19d650779d6ba9a81593ae40a9088ff/jiter-0.13.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3b3fb8c2053acaef8580809ac1d1f7481a0a0bdc012fd7f5d8b18fb696a5a089", size = 370480, upload-time = "2026-02-02T12:36:04.791Z" }, + { url = "https://files.pythonhosted.org/packages/f8/4c/09b93e30e984a187bc8aaa3510e1ec8dcbdcd71ca05d2f56aac0492453aa/jiter-0.13.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bdaba7d87e66f26a2c45d8cbadcbfc4bf7884182317907baf39cfe9775bb4d93", size = 360735, upload-time = "2026-02-02T12:36:06.994Z" }, + { url = "https://files.pythonhosted.org/packages/1a/1b/46c5e349019874ec5dfa508c14c37e29864ea108d376ae26d90bee238cd7/jiter-0.13.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:7b88d649135aca526da172e48083da915ec086b54e8e73a425ba50999468cc08", size = 391814, upload-time = "2026-02-02T12:36:08.368Z" }, + { url = "https://files.pythonhosted.org/packages/15/9e/26184760e85baee7162ad37b7912797d2077718476bf91517641c92b3639/jiter-0.13.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:e404ea551d35438013c64b4f357b0474c7abf9f781c06d44fcaf7a14c69ff9e2", size = 513990, upload-time = "2026-02-02T12:36:09.993Z" }, + { url = "https://files.pythonhosted.org/packages/e9/34/2c9355247d6debad57a0a15e76ab1566ab799388042743656e566b3b7de1/jiter-0.13.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:1f4748aad1b4a93c8bdd70f604d0f748cdc0e8744c5547798acfa52f10e79228", size = 548021, upload-time = "2026-02-02T12:36:11.376Z" }, + { url = "https://files.pythonhosted.org/packages/ac/4a/9f2c23255d04a834398b9c2e0e665382116911dc4d06b795710503cdad25/jiter-0.13.0-cp312-cp312-win32.whl", hash = "sha256:0bf670e3b1445fc4d31612199f1744f67f889ee1bbae703c4b54dc097e5dd394", size = 203024, upload-time = "2026-02-02T12:36:12.682Z" }, + { url = "https://files.pythonhosted.org/packages/09/ee/f0ae675a957ae5a8f160be3e87acea6b11dc7b89f6b7ab057e77b2d2b13a/jiter-0.13.0-cp312-cp312-win_amd64.whl", hash = "sha256:15db60e121e11fe186c0b15236bd5d18381b9ddacdcf4e659feb96fc6c969c92", size = 205424, upload-time = "2026-02-02T12:36:13.93Z" }, + { url = "https://files.pythonhosted.org/packages/1b/02/ae611edf913d3cbf02c97cdb90374af2082c48d7190d74c1111dde08bcdd/jiter-0.13.0-cp312-cp312-win_arm64.whl", hash = "sha256:41f92313d17989102f3cb5dd533a02787cdb99454d494344b0361355da52fcb9", size = 186818, upload-time = "2026-02-02T12:36:15.308Z" }, + { url = "https://files.pythonhosted.org/packages/91/9c/7ee5a6ff4b9991e1a45263bfc46731634c4a2bde27dfda6c8251df2d958c/jiter-0.13.0-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:1f8a55b848cbabf97d861495cd65f1e5c590246fabca8b48e1747c4dfc8f85bf", size = 306897, upload-time = "2026-02-02T12:36:16.748Z" }, + { url = "https://files.pythonhosted.org/packages/7c/02/be5b870d1d2be5dd6a91bdfb90f248fbb7dcbd21338f092c6b89817c3dbf/jiter-0.13.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f556aa591c00f2c45eb1b89f68f52441a016034d18b65da60e2d2875bbbf344a", size = 317507, upload-time = "2026-02-02T12:36:18.351Z" }, + { url = "https://files.pythonhosted.org/packages/da/92/b25d2ec333615f5f284f3a4024f7ce68cfa0604c322c6808b2344c7f5d2b/jiter-0.13.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f7e1d61da332ec412350463891923f960c3073cf1aae93b538f0bb4c8cd46efb", size = 350560, upload-time = "2026-02-02T12:36:19.746Z" }, + { url = "https://files.pythonhosted.org/packages/be/ec/74dcb99fef0aca9fbe56b303bf79f6bd839010cb18ad41000bf6cc71eec0/jiter-0.13.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:3097d665a27bc96fd9bbf7f86178037db139f319f785e4757ce7ccbf390db6c2", size = 363232, upload-time = "2026-02-02T12:36:21.243Z" }, + { url = "https://files.pythonhosted.org/packages/1b/37/f17375e0bb2f6a812d4dd92d7616e41917f740f3e71343627da9db2824ce/jiter-0.13.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9d01ecc3a8cbdb6f25a37bd500510550b64ddf9f7d64a107d92f3ccb25035d0f", size = 483727, upload-time = "2026-02-02T12:36:22.688Z" }, + { url = "https://files.pythonhosted.org/packages/77/d2/a71160a5ae1a1e66c1395b37ef77da67513b0adba73b993a27fbe47eb048/jiter-0.13.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ed9bbc30f5d60a3bdf63ae76beb3f9db280d7f195dfcfa61af792d6ce912d159", size = 370799, upload-time = "2026-02-02T12:36:24.106Z" }, + { url = "https://files.pythonhosted.org/packages/01/99/ed5e478ff0eb4e8aa5fd998f9d69603c9fd3f32de3bd16c2b1194f68361c/jiter-0.13.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:98fbafb6e88256f4454de33c1f40203d09fc33ed19162a68b3b257b29ca7f663", size = 359120, upload-time = "2026-02-02T12:36:25.519Z" }, + { url = "https://files.pythonhosted.org/packages/16/be/7ffd08203277a813f732ba897352797fa9493faf8dc7995b31f3d9cb9488/jiter-0.13.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:5467696f6b827f1116556cb0db620440380434591e93ecee7fd14d1a491b6daa", size = 390664, upload-time = "2026-02-02T12:36:26.866Z" }, + { url = "https://files.pythonhosted.org/packages/d1/84/e0787856196d6d346264d6dcccb01f741e5f0bd014c1d9a2ebe149caf4f3/jiter-0.13.0-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:2d08c9475d48b92892583df9da592a0e2ac49bcd41fae1fec4f39ba6cf107820", size = 513543, upload-time = "2026-02-02T12:36:28.217Z" }, + { url = "https://files.pythonhosted.org/packages/65/50/ecbd258181c4313cf79bca6c88fb63207d04d5bf5e4f65174114d072aa55/jiter-0.13.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:aed40e099404721d7fcaf5b89bd3b4568a4666358bcac7b6b15c09fb6252ab68", size = 547262, upload-time = "2026-02-02T12:36:29.678Z" }, + { url = "https://files.pythonhosted.org/packages/27/da/68f38d12e7111d2016cd198161b36e1f042bd115c169255bcb7ec823a3bf/jiter-0.13.0-cp313-cp313-win32.whl", hash = "sha256:36ebfbcffafb146d0e6ffb3e74d51e03d9c35ce7c625c8066cdbfc7b953bdc72", size = 200630, upload-time = "2026-02-02T12:36:31.808Z" }, + { url = "https://files.pythonhosted.org/packages/25/65/3bd1a972c9a08ecd22eb3b08a95d1941ebe6938aea620c246cf426ae09c2/jiter-0.13.0-cp313-cp313-win_amd64.whl", hash = "sha256:8d76029f077379374cf0dbc78dbe45b38dec4a2eb78b08b5194ce836b2517afc", size = 202602, upload-time = "2026-02-02T12:36:33.679Z" }, + { url = "https://files.pythonhosted.org/packages/15/fe/13bd3678a311aa67686bb303654792c48206a112068f8b0b21426eb6851e/jiter-0.13.0-cp313-cp313-win_arm64.whl", hash = "sha256:bb7613e1a427cfcb6ea4544f9ac566b93d5bf67e0d48c787eca673ff9c9dff2b", size = 185939, upload-time = "2026-02-02T12:36:35.065Z" }, + { url = "https://files.pythonhosted.org/packages/49/19/a929ec002ad3228bc97ca01dbb14f7632fffdc84a95ec92ceaf4145688ae/jiter-0.13.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:fa476ab5dd49f3bf3a168e05f89358c75a17608dbabb080ef65f96b27c19ab10", size = 316616, upload-time = "2026-02-02T12:36:36.579Z" }, + { url = "https://files.pythonhosted.org/packages/52/56/d19a9a194afa37c1728831e5fb81b7722c3de18a3109e8f282bfc23e587a/jiter-0.13.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ade8cb6ff5632a62b7dbd4757d8c5573f7a2e9ae285d6b5b841707d8363205ef", size = 346850, upload-time = "2026-02-02T12:36:38.058Z" }, + { url = "https://files.pythonhosted.org/packages/36/4a/94e831c6bf287754a8a019cb966ed39ff8be6ab78cadecf08df3bb02d505/jiter-0.13.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9950290340acc1adaded363edd94baebcee7dabdfa8bee4790794cd5cfad2af6", size = 358551, upload-time = "2026-02-02T12:36:39.417Z" }, + { url = "https://files.pythonhosted.org/packages/a2/ec/a4c72c822695fa80e55d2b4142b73f0012035d9fcf90eccc56bc060db37c/jiter-0.13.0-cp313-cp313t-win_amd64.whl", hash = "sha256:2b4972c6df33731aac0742b64fd0d18e0a69bc7d6e03108ce7d40c85fd9e3e6d", size = 201950, upload-time = "2026-02-02T12:36:40.791Z" }, + { url = "https://files.pythonhosted.org/packages/b6/00/393553ec27b824fbc29047e9c7cd4a3951d7fbe4a76743f17e44034fa4e4/jiter-0.13.0-cp313-cp313t-win_arm64.whl", hash = "sha256:701a1e77d1e593c1b435315ff625fd071f0998c5f02792038a5ca98899261b7d", size = 185852, upload-time = "2026-02-02T12:36:42.077Z" }, + { url = "https://files.pythonhosted.org/packages/6e/f5/f1997e987211f6f9bd71b8083047b316208b4aca0b529bb5f8c96c89ef3e/jiter-0.13.0-cp314-cp314-macosx_10_12_x86_64.whl", hash = "sha256:cc5223ab19fe25e2f0bf2643204ad7318896fe3729bf12fde41b77bfc4fafff0", size = 308804, upload-time = "2026-02-02T12:36:43.496Z" }, + { url = "https://files.pythonhosted.org/packages/cd/8f/5482a7677731fd44881f0204981ce2d7175db271f82cba2085dd2212e095/jiter-0.13.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:9776ebe51713acf438fd9b4405fcd86893ae5d03487546dae7f34993217f8a91", size = 318787, upload-time = "2026-02-02T12:36:45.071Z" }, + { url = "https://files.pythonhosted.org/packages/f3/b9/7257ac59778f1cd025b26a23c5520a36a424f7f1b068f2442a5b499b7464/jiter-0.13.0-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:879e768938e7b49b5e90b7e3fecc0dbec01b8cb89595861fb39a8967c5220d09", size = 353880, upload-time = "2026-02-02T12:36:47.365Z" }, + { url = "https://files.pythonhosted.org/packages/c3/87/719eec4a3f0841dad99e3d3604ee4cba36af4419a76f3cb0b8e2e691ad67/jiter-0.13.0-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:682161a67adea11e3aae9038c06c8b4a9a71023228767477d683f69903ebc607", size = 366702, upload-time = "2026-02-02T12:36:48.871Z" }, + { url = "https://files.pythonhosted.org/packages/d2/65/415f0a75cf6921e43365a1bc227c565cb949caca8b7532776e430cbaa530/jiter-0.13.0-cp314-cp314-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a13b68cd1cd8cc9de8f244ebae18ccb3e4067ad205220ef324c39181e23bbf66", size = 486319, upload-time = "2026-02-02T12:36:53.006Z" }, + { url = "https://files.pythonhosted.org/packages/54/a2/9e12b48e82c6bbc6081fd81abf915e1443add1b13d8fc586e1d90bb02bb8/jiter-0.13.0-cp314-cp314-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:87ce0f14c6c08892b610686ae8be350bf368467b6acd5085a5b65441e2bf36d2", size = 372289, upload-time = "2026-02-02T12:36:54.593Z" }, + { url = "https://files.pythonhosted.org/packages/4e/c1/e4693f107a1789a239c759a432e9afc592366f04e901470c2af89cfd28e1/jiter-0.13.0-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0c365005b05505a90d1c47856420980d0237adf82f70c4aff7aebd3c1cc143ad", size = 360165, upload-time = "2026-02-02T12:36:56.112Z" }, + { url = "https://files.pythonhosted.org/packages/17/08/91b9ea976c1c758240614bd88442681a87672eebc3d9a6dde476874e706b/jiter-0.13.0-cp314-cp314-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:1317fdffd16f5873e46ce27d0e0f7f4f90f0cdf1d86bf6abeaea9f63ca2c401d", size = 389634, upload-time = "2026-02-02T12:36:57.495Z" }, + { url = "https://files.pythonhosted.org/packages/18/23/58325ef99390d6d40427ed6005bf1ad54f2577866594bcf13ce55675f87d/jiter-0.13.0-cp314-cp314-musllinux_1_1_aarch64.whl", hash = "sha256:c05b450d37ba0c9e21c77fef1f205f56bcee2330bddca68d344baebfc55ae0df", size = 514933, upload-time = "2026-02-02T12:36:58.909Z" }, + { url = "https://files.pythonhosted.org/packages/5b/25/69f1120c7c395fd276c3996bb8adefa9c6b84c12bb7111e5c6ccdcd8526d/jiter-0.13.0-cp314-cp314-musllinux_1_1_x86_64.whl", hash = "sha256:775e10de3849d0631a97c603f996f518159272db00fdda0a780f81752255ee9d", size = 548842, upload-time = "2026-02-02T12:37:00.433Z" }, + { url = "https://files.pythonhosted.org/packages/18/05/981c9669d86850c5fbb0d9e62bba144787f9fba84546ba43d624ee27ef29/jiter-0.13.0-cp314-cp314-win32.whl", hash = "sha256:632bf7c1d28421c00dd8bbb8a3bac5663e1f57d5cd5ed962bce3c73bf62608e6", size = 202108, upload-time = "2026-02-02T12:37:01.718Z" }, + { url = "https://files.pythonhosted.org/packages/8d/96/cdcf54dd0b0341db7d25413229888a346c7130bd20820530905fdb65727b/jiter-0.13.0-cp314-cp314-win_amd64.whl", hash = "sha256:f22ef501c3f87ede88f23f9b11e608581c14f04db59b6a801f354397ae13739f", size = 204027, upload-time = "2026-02-02T12:37:03.075Z" }, + { url = "https://files.pythonhosted.org/packages/fb/f9/724bcaaab7a3cd727031fe4f6995cb86c4bd344909177c186699c8dec51a/jiter-0.13.0-cp314-cp314-win_arm64.whl", hash = "sha256:07b75fe09a4ee8e0c606200622e571e44943f47254f95e2436c8bdcaceb36d7d", size = 187199, upload-time = "2026-02-02T12:37:04.414Z" }, + { url = "https://files.pythonhosted.org/packages/62/92/1661d8b9fd6a3d7a2d89831db26fe3c1509a287d83ad7838831c7b7a5c7e/jiter-0.13.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:964538479359059a35fb400e769295d4b315ae61e4105396d355a12f7fef09f0", size = 318423, upload-time = "2026-02-02T12:37:05.806Z" }, + { url = "https://files.pythonhosted.org/packages/4f/3b/f77d342a54d4ebcd128e520fc58ec2f5b30a423b0fd26acdfc0c6fef8e26/jiter-0.13.0-cp314-cp314t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e104da1db1c0991b3eaed391ccd650ae8d947eab1480c733e5a3fb28d4313e40", size = 351438, upload-time = "2026-02-02T12:37:07.189Z" }, + { url = "https://files.pythonhosted.org/packages/76/b3/ba9a69f0e4209bd3331470c723c2f5509e6f0482e416b612431a5061ed71/jiter-0.13.0-cp314-cp314t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:0e3a5f0cde8ff433b8e88e41aa40131455420fb3649a3c7abdda6145f8cb7202", size = 364774, upload-time = "2026-02-02T12:37:08.579Z" }, + { url = "https://files.pythonhosted.org/packages/b3/16/6cdb31fa342932602458dbb631bfbd47f601e03d2e4950740e0b2100b570/jiter-0.13.0-cp314-cp314t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:57aab48f40be1db920a582b30b116fe2435d184f77f0e4226f546794cedd9cf0", size = 487238, upload-time = "2026-02-02T12:37:10.066Z" }, + { url = "https://files.pythonhosted.org/packages/ed/b1/956cc7abaca8d95c13aa8d6c9b3f3797241c246cd6e792934cc4c8b250d2/jiter-0.13.0-cp314-cp314t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7772115877c53f62beeb8fd853cab692dbc04374ef623b30f997959a4c0e7e95", size = 372892, upload-time = "2026-02-02T12:37:11.656Z" }, + { url = "https://files.pythonhosted.org/packages/26/c4/97ecde8b1e74f67b8598c57c6fccf6df86ea7861ed29da84629cdbba76c4/jiter-0.13.0-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1211427574b17b633cfceba5040de8081e5abf114f7a7602f73d2e16f9fdaa59", size = 360309, upload-time = "2026-02-02T12:37:13.244Z" }, + { url = "https://files.pythonhosted.org/packages/4b/d7/eabe3cf46715854ccc80be2cd78dd4c36aedeb30751dbf85a1d08c14373c/jiter-0.13.0-cp314-cp314t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:7beae3a3d3b5212d3a55d2961db3c292e02e302feb43fce6a3f7a31b90ea6dfe", size = 389607, upload-time = "2026-02-02T12:37:14.881Z" }, + { url = "https://files.pythonhosted.org/packages/df/2d/03963fc0804e6109b82decfb9974eb92df3797fe7222428cae12f8ccaa0c/jiter-0.13.0-cp314-cp314t-musllinux_1_1_aarch64.whl", hash = "sha256:e5562a0f0e90a6223b704163ea28e831bd3a9faa3512a711f031611e6b06c939", size = 514986, upload-time = "2026-02-02T12:37:16.326Z" }, + { url = "https://files.pythonhosted.org/packages/f6/6c/8c83b45eb3eb1c1e18d841fe30b4b5bc5619d781267ca9bc03e005d8fd0a/jiter-0.13.0-cp314-cp314t-musllinux_1_1_x86_64.whl", hash = "sha256:6c26a424569a59140fb51160a56df13f438a2b0967365e987889186d5fc2f6f9", size = 548756, upload-time = "2026-02-02T12:37:17.736Z" }, + { url = "https://files.pythonhosted.org/packages/47/66/eea81dfff765ed66c68fd2ed8c96245109e13c896c2a5015c7839c92367e/jiter-0.13.0-cp314-cp314t-win32.whl", hash = "sha256:24dc96eca9f84da4131cdf87a95e6ce36765c3b156fc9ae33280873b1c32d5f6", size = 201196, upload-time = "2026-02-02T12:37:19.101Z" }, + { url = "https://files.pythonhosted.org/packages/ff/32/4ac9c7a76402f8f00d00842a7f6b83b284d0cf7c1e9d4227bc95aa6d17fa/jiter-0.13.0-cp314-cp314t-win_amd64.whl", hash = "sha256:0a8d76c7524087272c8ae913f5d9d608bd839154b62c4322ef65723d2e5bb0b8", size = 204215, upload-time = "2026-02-02T12:37:20.495Z" }, + { url = "https://files.pythonhosted.org/packages/f9/8e/7def204fea9f9be8b3c21a6f2dd6c020cf56c7d5ff753e0e23ed7f9ea57e/jiter-0.13.0-cp314-cp314t-win_arm64.whl", hash = "sha256:2c26cf47e2cad140fa23b6d58d435a7c0161f5c514284802f25e87fddfe11024", size = 187152, upload-time = "2026-02-02T12:37:22.124Z" }, + { url = "https://files.pythonhosted.org/packages/79/b3/3c29819a27178d0e461a8571fb63c6ae38be6dc36b78b3ec2876bbd6a910/jiter-0.13.0-graalpy311-graalpy242_311_native-macosx_10_12_x86_64.whl", hash = "sha256:b1cbfa133241d0e6bdab48dcdc2604e8ba81512f6bbd68ec3e8e1357dd3c316c", size = 307016, upload-time = "2026-02-02T12:37:42.755Z" }, + { url = "https://files.pythonhosted.org/packages/eb/ae/60993e4b07b1ac5ebe46da7aa99fdbb802eb986c38d26e3883ac0125c4e0/jiter-0.13.0-graalpy311-graalpy242_311_native-macosx_11_0_arm64.whl", hash = "sha256:db367d8be9fad6e8ebbac4a7578b7af562e506211036cba2c06c3b998603c3d2", size = 305024, upload-time = "2026-02-02T12:37:44.774Z" }, + { url = "https://files.pythonhosted.org/packages/77/fa/2227e590e9cf98803db2811f172b2d6460a21539ab73006f251c66f44b14/jiter-0.13.0-graalpy311-graalpy242_311_native-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:45f6f8efb2f3b0603092401dc2df79fa89ccbc027aaba4174d2d4133ed661434", size = 339337, upload-time = "2026-02-02T12:37:46.668Z" }, + { url = "https://files.pythonhosted.org/packages/2d/92/015173281f7eb96c0ef580c997da8ef50870d4f7f4c9e03c845a1d62ae04/jiter-0.13.0-graalpy311-graalpy242_311_native-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:597245258e6ad085d064780abfb23a284d418d3e61c57362d9449c6c7317ee2d", size = 346395, upload-time = "2026-02-02T12:37:48.09Z" }, + { url = "https://files.pythonhosted.org/packages/80/60/e50fa45dd7e2eae049f0ce964663849e897300433921198aef94b6ffa23a/jiter-0.13.0-graalpy312-graalpy250_312_native-macosx_10_12_x86_64.whl", hash = "sha256:3d744a6061afba08dd7ae375dcde870cffb14429b7477e10f67e9e6d68772a0a", size = 305169, upload-time = "2026-02-02T12:37:50.376Z" }, + { url = "https://files.pythonhosted.org/packages/d2/73/a009f41c5eed71c49bec53036c4b33555afcdee70682a18c6f66e396c039/jiter-0.13.0-graalpy312-graalpy250_312_native-macosx_11_0_arm64.whl", hash = "sha256:ff732bd0a0e778f43d5009840f20b935e79087b4dc65bd36f1cd0f9b04b8ff7f", size = 303808, upload-time = "2026-02-02T12:37:52.092Z" }, + { url = "https://files.pythonhosted.org/packages/c4/10/528b439290763bff3d939268085d03382471b442f212dca4ff5f12802d43/jiter-0.13.0-graalpy312-graalpy250_312_native-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ab44b178f7981fcaea7e0a5df20e773c663d06ffda0198f1a524e91b2fde7e59", size = 337384, upload-time = "2026-02-02T12:37:53.582Z" }, + { url = "https://files.pythonhosted.org/packages/67/8a/a342b2f0251f3dac4ca17618265d93bf244a2a4d089126e81e4c1056ac50/jiter-0.13.0-graalpy312-graalpy250_312_native-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7bb00b6d26db67a05fe3e12c76edc75f32077fb51deed13822dc648fa373bc19", size = 343768, upload-time = "2026-02-02T12:37:55.055Z" }, +] + +[[package]] +name = "numpy" +version = "2.2.6" +source = { registry = "https://pypi.org/simple" } +resolution-markers = [ + "python_full_version < '3.11'", +] +sdist = { url = "https://files.pythonhosted.org/packages/76/21/7d2a95e4bba9dc13d043ee156a356c0a8f0c6309dff6b21b4d71a073b8a8/numpy-2.2.6.tar.gz", hash = "sha256:e29554e2bef54a90aa5cc07da6ce955accb83f21ab5de01a62c8478897b264fd", size = 20276440, upload-time = "2025-05-17T22:38:04.611Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/9a/3e/ed6db5be21ce87955c0cbd3009f2803f59fa08df21b5df06862e2d8e2bdd/numpy-2.2.6-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b412caa66f72040e6d268491a59f2c43bf03eb6c96dd8f0307829feb7fa2b6fb", size = 21165245, upload-time = "2025-05-17T21:27:58.555Z" }, + { url = "https://files.pythonhosted.org/packages/22/c2/4b9221495b2a132cc9d2eb862e21d42a009f5a60e45fc44b00118c174bff/numpy-2.2.6-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8e41fd67c52b86603a91c1a505ebaef50b3314de0213461c7a6e99c9a3beff90", size = 14360048, upload-time = "2025-05-17T21:28:21.406Z" }, + { url = "https://files.pythonhosted.org/packages/fd/77/dc2fcfc66943c6410e2bf598062f5959372735ffda175b39906d54f02349/numpy-2.2.6-cp310-cp310-macosx_14_0_arm64.whl", hash = "sha256:37e990a01ae6ec7fe7fa1c26c55ecb672dd98b19c3d0e1d1f326fa13cb38d163", size = 5340542, upload-time = "2025-05-17T21:28:30.931Z" }, + { url = "https://files.pythonhosted.org/packages/7a/4f/1cb5fdc353a5f5cc7feb692db9b8ec2c3d6405453f982435efc52561df58/numpy-2.2.6-cp310-cp310-macosx_14_0_x86_64.whl", hash = "sha256:5a6429d4be8ca66d889b7cf70f536a397dc45ba6faeb5f8c5427935d9592e9cf", size = 6878301, upload-time = "2025-05-17T21:28:41.613Z" }, + { url = "https://files.pythonhosted.org/packages/eb/17/96a3acd228cec142fcb8723bd3cc39c2a474f7dcf0a5d16731980bcafa95/numpy-2.2.6-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:efd28d4e9cd7d7a8d39074a4d44c63eda73401580c5c76acda2ce969e0a38e83", size = 14297320, upload-time = "2025-05-17T21:29:02.78Z" }, + { url = "https://files.pythonhosted.org/packages/b4/63/3de6a34ad7ad6646ac7d2f55ebc6ad439dbbf9c4370017c50cf403fb19b5/numpy-2.2.6-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fc7b73d02efb0e18c000e9ad8b83480dfcd5dfd11065997ed4c6747470ae8915", size = 16801050, upload-time = "2025-05-17T21:29:27.675Z" }, + { url = "https://files.pythonhosted.org/packages/07/b6/89d837eddef52b3d0cec5c6ba0456c1bf1b9ef6a6672fc2b7873c3ec4e2e/numpy-2.2.6-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:74d4531beb257d2c3f4b261bfb0fc09e0f9ebb8842d82a7b4209415896adc680", size = 15807034, upload-time = "2025-05-17T21:29:51.102Z" }, + { url = "https://files.pythonhosted.org/packages/01/c8/dc6ae86e3c61cfec1f178e5c9f7858584049b6093f843bca541f94120920/numpy-2.2.6-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:8fc377d995680230e83241d8a96def29f204b5782f371c532579b4f20607a289", size = 18614185, upload-time = "2025-05-17T21:30:18.703Z" }, + { url = "https://files.pythonhosted.org/packages/5b/c5/0064b1b7e7c89137b471ccec1fd2282fceaae0ab3a9550f2568782d80357/numpy-2.2.6-cp310-cp310-win32.whl", hash = "sha256:b093dd74e50a8cba3e873868d9e93a85b78e0daf2e98c6797566ad8044e8363d", size = 6527149, upload-time = "2025-05-17T21:30:29.788Z" }, + { url = "https://files.pythonhosted.org/packages/a3/dd/4b822569d6b96c39d1215dbae0582fd99954dcbcf0c1a13c61783feaca3f/numpy-2.2.6-cp310-cp310-win_amd64.whl", hash = "sha256:f0fd6321b839904e15c46e0d257fdd101dd7f530fe03fd6359c1ea63738703f3", size = 12904620, upload-time = "2025-05-17T21:30:48.994Z" }, + { url = "https://files.pythonhosted.org/packages/da/a8/4f83e2aa666a9fbf56d6118faaaf5f1974d456b1823fda0a176eff722839/numpy-2.2.6-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:f9f1adb22318e121c5c69a09142811a201ef17ab257a1e66ca3025065b7f53ae", size = 21176963, upload-time = "2025-05-17T21:31:19.36Z" }, + { url = "https://files.pythonhosted.org/packages/b3/2b/64e1affc7972decb74c9e29e5649fac940514910960ba25cd9af4488b66c/numpy-2.2.6-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c820a93b0255bc360f53eca31a0e676fd1101f673dda8da93454a12e23fc5f7a", size = 14406743, upload-time = "2025-05-17T21:31:41.087Z" }, + { url = "https://files.pythonhosted.org/packages/4a/9f/0121e375000b5e50ffdd8b25bf78d8e1a5aa4cca3f185d41265198c7b834/numpy-2.2.6-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:3d70692235e759f260c3d837193090014aebdf026dfd167834bcba43e30c2a42", size = 5352616, upload-time = "2025-05-17T21:31:50.072Z" }, + { url = "https://files.pythonhosted.org/packages/31/0d/b48c405c91693635fbe2dcd7bc84a33a602add5f63286e024d3b6741411c/numpy-2.2.6-cp311-cp311-macosx_14_0_x86_64.whl", hash = "sha256:481b49095335f8eed42e39e8041327c05b0f6f4780488f61286ed3c01368d491", size = 6889579, upload-time = "2025-05-17T21:32:01.712Z" }, + { url = "https://files.pythonhosted.org/packages/52/b8/7f0554d49b565d0171eab6e99001846882000883998e7b7d9f0d98b1f934/numpy-2.2.6-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b64d8d4d17135e00c8e346e0a738deb17e754230d7e0810ac5012750bbd85a5a", size = 14312005, upload-time = "2025-05-17T21:32:23.332Z" }, + { url = "https://files.pythonhosted.org/packages/b3/dd/2238b898e51bd6d389b7389ffb20d7f4c10066d80351187ec8e303a5a475/numpy-2.2.6-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba10f8411898fc418a521833e014a77d3ca01c15b0c6cdcce6a0d2897e6dbbdf", size = 16821570, upload-time = "2025-05-17T21:32:47.991Z" }, + { url = "https://files.pythonhosted.org/packages/83/6c/44d0325722cf644f191042bf47eedad61c1e6df2432ed65cbe28509d404e/numpy-2.2.6-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:bd48227a919f1bafbdda0583705e547892342c26fb127219d60a5c36882609d1", size = 15818548, upload-time = "2025-05-17T21:33:11.728Z" }, + { url = "https://files.pythonhosted.org/packages/ae/9d/81e8216030ce66be25279098789b665d49ff19eef08bfa8cb96d4957f422/numpy-2.2.6-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:9551a499bf125c1d4f9e250377c1ee2eddd02e01eac6644c080162c0c51778ab", size = 18620521, upload-time = "2025-05-17T21:33:39.139Z" }, + { url = "https://files.pythonhosted.org/packages/6a/fd/e19617b9530b031db51b0926eed5345ce8ddc669bb3bc0044b23e275ebe8/numpy-2.2.6-cp311-cp311-win32.whl", hash = "sha256:0678000bb9ac1475cd454c6b8c799206af8107e310843532b04d49649c717a47", size = 6525866, upload-time = "2025-05-17T21:33:50.273Z" }, + { url = "https://files.pythonhosted.org/packages/31/0a/f354fb7176b81747d870f7991dc763e157a934c717b67b58456bc63da3df/numpy-2.2.6-cp311-cp311-win_amd64.whl", hash = "sha256:e8213002e427c69c45a52bbd94163084025f533a55a59d6f9c5b820774ef3303", size = 12907455, upload-time = "2025-05-17T21:34:09.135Z" }, + { url = "https://files.pythonhosted.org/packages/82/5d/c00588b6cf18e1da539b45d3598d3557084990dcc4331960c15ee776ee41/numpy-2.2.6-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:41c5a21f4a04fa86436124d388f6ed60a9343a6f767fced1a8a71c3fbca038ff", size = 20875348, upload-time = "2025-05-17T21:34:39.648Z" }, + { url = "https://files.pythonhosted.org/packages/66/ee/560deadcdde6c2f90200450d5938f63a34b37e27ebff162810f716f6a230/numpy-2.2.6-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:de749064336d37e340f640b05f24e9e3dd678c57318c7289d222a8a2f543e90c", size = 14119362, upload-time = "2025-05-17T21:35:01.241Z" }, + { url = "https://files.pythonhosted.org/packages/3c/65/4baa99f1c53b30adf0acd9a5519078871ddde8d2339dc5a7fde80d9d87da/numpy-2.2.6-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:894b3a42502226a1cac872f840030665f33326fc3dac8e57c607905773cdcde3", size = 5084103, upload-time = "2025-05-17T21:35:10.622Z" }, + { url = "https://files.pythonhosted.org/packages/cc/89/e5a34c071a0570cc40c9a54eb472d113eea6d002e9ae12bb3a8407fb912e/numpy-2.2.6-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:71594f7c51a18e728451bb50cc60a3ce4e6538822731b2933209a1f3614e9282", size = 6625382, upload-time = "2025-05-17T21:35:21.414Z" }, + { url = "https://files.pythonhosted.org/packages/f8/35/8c80729f1ff76b3921d5c9487c7ac3de9b2a103b1cd05e905b3090513510/numpy-2.2.6-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f2618db89be1b4e05f7a1a847a9c1c0abd63e63a1607d892dd54668dd92faf87", size = 14018462, upload-time = "2025-05-17T21:35:42.174Z" }, + { url = "https://files.pythonhosted.org/packages/8c/3d/1e1db36cfd41f895d266b103df00ca5b3cbe965184df824dec5c08c6b803/numpy-2.2.6-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fd83c01228a688733f1ded5201c678f0c53ecc1006ffbc404db9f7a899ac6249", size = 16527618, upload-time = "2025-05-17T21:36:06.711Z" }, + { url = "https://files.pythonhosted.org/packages/61/c6/03ed30992602c85aa3cd95b9070a514f8b3c33e31124694438d88809ae36/numpy-2.2.6-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:37c0ca431f82cd5fa716eca9506aefcabc247fb27ba69c5062a6d3ade8cf8f49", size = 15505511, upload-time = "2025-05-17T21:36:29.965Z" }, + { url = "https://files.pythonhosted.org/packages/b7/25/5761d832a81df431e260719ec45de696414266613c9ee268394dd5ad8236/numpy-2.2.6-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:fe27749d33bb772c80dcd84ae7e8df2adc920ae8297400dabec45f0dedb3f6de", size = 18313783, upload-time = "2025-05-17T21:36:56.883Z" }, + { url = "https://files.pythonhosted.org/packages/57/0a/72d5a3527c5ebffcd47bde9162c39fae1f90138c961e5296491ce778e682/numpy-2.2.6-cp312-cp312-win32.whl", hash = "sha256:4eeaae00d789f66c7a25ac5f34b71a7035bb474e679f410e5e1a94deb24cf2d4", size = 6246506, upload-time = "2025-05-17T21:37:07.368Z" }, + { url = "https://files.pythonhosted.org/packages/36/fa/8c9210162ca1b88529ab76b41ba02d433fd54fecaf6feb70ef9f124683f1/numpy-2.2.6-cp312-cp312-win_amd64.whl", hash = "sha256:c1f9540be57940698ed329904db803cf7a402f3fc200bfe599334c9bd84a40b2", size = 12614190, upload-time = "2025-05-17T21:37:26.213Z" }, + { url = "https://files.pythonhosted.org/packages/f9/5c/6657823f4f594f72b5471f1db1ab12e26e890bb2e41897522d134d2a3e81/numpy-2.2.6-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:0811bb762109d9708cca4d0b13c4f67146e3c3b7cf8d34018c722adb2d957c84", size = 20867828, upload-time = "2025-05-17T21:37:56.699Z" }, + { url = "https://files.pythonhosted.org/packages/dc/9e/14520dc3dadf3c803473bd07e9b2bd1b69bc583cb2497b47000fed2fa92f/numpy-2.2.6-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:287cc3162b6f01463ccd86be154f284d0893d2b3ed7292439ea97eafa8170e0b", size = 14143006, upload-time = "2025-05-17T21:38:18.291Z" }, + { url = "https://files.pythonhosted.org/packages/4f/06/7e96c57d90bebdce9918412087fc22ca9851cceaf5567a45c1f404480e9e/numpy-2.2.6-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:f1372f041402e37e5e633e586f62aa53de2eac8d98cbfb822806ce4bbefcb74d", size = 5076765, upload-time = "2025-05-17T21:38:27.319Z" }, + { url = "https://files.pythonhosted.org/packages/73/ed/63d920c23b4289fdac96ddbdd6132e9427790977d5457cd132f18e76eae0/numpy-2.2.6-cp313-cp313-macosx_14_0_x86_64.whl", hash = "sha256:55a4d33fa519660d69614a9fad433be87e5252f4b03850642f88993f7b2ca566", size = 6617736, upload-time = "2025-05-17T21:38:38.141Z" }, + { url = "https://files.pythonhosted.org/packages/85/c5/e19c8f99d83fd377ec8c7e0cf627a8049746da54afc24ef0a0cb73d5dfb5/numpy-2.2.6-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f92729c95468a2f4f15e9bb94c432a9229d0d50de67304399627a943201baa2f", size = 14010719, upload-time = "2025-05-17T21:38:58.433Z" }, + { url = "https://files.pythonhosted.org/packages/19/49/4df9123aafa7b539317bf6d342cb6d227e49f7a35b99c287a6109b13dd93/numpy-2.2.6-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1bc23a79bfabc5d056d106f9befb8d50c31ced2fbc70eedb8155aec74a45798f", size = 16526072, upload-time = "2025-05-17T21:39:22.638Z" }, + { url = "https://files.pythonhosted.org/packages/b2/6c/04b5f47f4f32f7c2b0e7260442a8cbcf8168b0e1a41ff1495da42f42a14f/numpy-2.2.6-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:e3143e4451880bed956e706a3220b4e5cf6172ef05fcc397f6f36a550b1dd868", size = 15503213, upload-time = "2025-05-17T21:39:45.865Z" }, + { url = "https://files.pythonhosted.org/packages/17/0a/5cd92e352c1307640d5b6fec1b2ffb06cd0dabe7d7b8227f97933d378422/numpy-2.2.6-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:b4f13750ce79751586ae2eb824ba7e1e8dba64784086c98cdbbcc6a42112ce0d", size = 18316632, upload-time = "2025-05-17T21:40:13.331Z" }, + { url = "https://files.pythonhosted.org/packages/f0/3b/5cba2b1d88760ef86596ad0f3d484b1cbff7c115ae2429678465057c5155/numpy-2.2.6-cp313-cp313-win32.whl", hash = "sha256:5beb72339d9d4fa36522fc63802f469b13cdbe4fdab4a288f0c441b74272ebfd", size = 6244532, upload-time = "2025-05-17T21:43:46.099Z" }, + { url = "https://files.pythonhosted.org/packages/cb/3b/d58c12eafcb298d4e6d0d40216866ab15f59e55d148a5658bb3132311fcf/numpy-2.2.6-cp313-cp313-win_amd64.whl", hash = "sha256:b0544343a702fa80c95ad5d3d608ea3599dd54d4632df855e4c8d24eb6ecfa1c", size = 12610885, upload-time = "2025-05-17T21:44:05.145Z" }, + { url = "https://files.pythonhosted.org/packages/6b/9e/4bf918b818e516322db999ac25d00c75788ddfd2d2ade4fa66f1f38097e1/numpy-2.2.6-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:0bca768cd85ae743b2affdc762d617eddf3bcf8724435498a1e80132d04879e6", size = 20963467, upload-time = "2025-05-17T21:40:44Z" }, + { url = "https://files.pythonhosted.org/packages/61/66/d2de6b291507517ff2e438e13ff7b1e2cdbdb7cb40b3ed475377aece69f9/numpy-2.2.6-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:fc0c5673685c508a142ca65209b4e79ed6740a4ed6b2267dbba90f34b0b3cfda", size = 14225144, upload-time = "2025-05-17T21:41:05.695Z" }, + { url = "https://files.pythonhosted.org/packages/e4/25/480387655407ead912e28ba3a820bc69af9adf13bcbe40b299d454ec011f/numpy-2.2.6-cp313-cp313t-macosx_14_0_arm64.whl", hash = "sha256:5bd4fc3ac8926b3819797a7c0e2631eb889b4118a9898c84f585a54d475b7e40", size = 5200217, upload-time = "2025-05-17T21:41:15.903Z" }, + { url = "https://files.pythonhosted.org/packages/aa/4a/6e313b5108f53dcbf3aca0c0f3e9c92f4c10ce57a0a721851f9785872895/numpy-2.2.6-cp313-cp313t-macosx_14_0_x86_64.whl", hash = "sha256:fee4236c876c4e8369388054d02d0e9bb84821feb1a64dd59e137e6511a551f8", size = 6712014, upload-time = "2025-05-17T21:41:27.321Z" }, + { url = "https://files.pythonhosted.org/packages/b7/30/172c2d5c4be71fdf476e9de553443cf8e25feddbe185e0bd88b096915bcc/numpy-2.2.6-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e1dda9c7e08dc141e0247a5b8f49cf05984955246a327d4c48bda16821947b2f", size = 14077935, upload-time = "2025-05-17T21:41:49.738Z" }, + { url = "https://files.pythonhosted.org/packages/12/fb/9e743f8d4e4d3c710902cf87af3512082ae3d43b945d5d16563f26ec251d/numpy-2.2.6-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f447e6acb680fd307f40d3da4852208af94afdfab89cf850986c3ca00562f4fa", size = 16600122, upload-time = "2025-05-17T21:42:14.046Z" }, + { url = "https://files.pythonhosted.org/packages/12/75/ee20da0e58d3a66f204f38916757e01e33a9737d0b22373b3eb5a27358f9/numpy-2.2.6-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:389d771b1623ec92636b0786bc4ae56abafad4a4c513d36a55dce14bd9ce8571", size = 15586143, upload-time = "2025-05-17T21:42:37.464Z" }, + { url = "https://files.pythonhosted.org/packages/76/95/bef5b37f29fc5e739947e9ce5179ad402875633308504a52d188302319c8/numpy-2.2.6-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:8e9ace4a37db23421249ed236fdcdd457d671e25146786dfc96835cd951aa7c1", size = 18385260, upload-time = "2025-05-17T21:43:05.189Z" }, + { url = "https://files.pythonhosted.org/packages/09/04/f2f83279d287407cf36a7a8053a5abe7be3622a4363337338f2585e4afda/numpy-2.2.6-cp313-cp313t-win32.whl", hash = "sha256:038613e9fb8c72b0a41f025a7e4c3f0b7a1b5d768ece4796b674c8f3fe13efff", size = 6377225, upload-time = "2025-05-17T21:43:16.254Z" }, + { url = "https://files.pythonhosted.org/packages/67/0e/35082d13c09c02c011cf21570543d202ad929d961c02a147493cb0c2bdf5/numpy-2.2.6-cp313-cp313t-win_amd64.whl", hash = "sha256:6031dd6dfecc0cf9f668681a37648373bddd6421fff6c66ec1624eed0180ee06", size = 12771374, upload-time = "2025-05-17T21:43:35.479Z" }, + { url = "https://files.pythonhosted.org/packages/9e/3b/d94a75f4dbf1ef5d321523ecac21ef23a3cd2ac8b78ae2aac40873590229/numpy-2.2.6-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:0b605b275d7bd0c640cad4e5d30fa701a8d59302e127e5f79138ad62762c3e3d", size = 21040391, upload-time = "2025-05-17T21:44:35.948Z" }, + { url = "https://files.pythonhosted.org/packages/17/f4/09b2fa1b58f0fb4f7c7963a1649c64c4d315752240377ed74d9cd878f7b5/numpy-2.2.6-pp310-pypy310_pp73-macosx_14_0_x86_64.whl", hash = "sha256:7befc596a7dc9da8a337f79802ee8adb30a552a94f792b9c9d18c840055907db", size = 6786754, upload-time = "2025-05-17T21:44:47.446Z" }, + { url = "https://files.pythonhosted.org/packages/af/30/feba75f143bdc868a1cc3f44ccfa6c4b9ec522b36458e738cd00f67b573f/numpy-2.2.6-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ce47521a4754c8f4593837384bd3424880629f718d87c5d44f8ed763edd63543", size = 16643476, upload-time = "2025-05-17T21:45:11.871Z" }, + { url = "https://files.pythonhosted.org/packages/37/48/ac2a9584402fb6c0cd5b5d1a91dcf176b15760130dd386bbafdbfe3640bf/numpy-2.2.6-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:d042d24c90c41b54fd506da306759e06e568864df8ec17ccc17e9e884634fd00", size = 12812666, upload-time = "2025-05-17T21:45:31.426Z" }, +] + +[[package]] +name = "numpy" +version = "2.4.4" +source = { registry = "https://pypi.org/simple" } +resolution-markers = [ + "python_full_version >= '3.11'", +] +sdist = { url = "https://files.pythonhosted.org/packages/d7/9f/b8cef5bffa569759033adda9481211426f12f53299629b410340795c2514/numpy-2.4.4.tar.gz", hash = "sha256:2d390634c5182175533585cc89f3608a4682ccb173cc9bb940b2881c8d6f8fa0", size = 20731587, upload-time = "2026-03-29T13:22:01.298Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ef/c6/4218570d8c8ecc9704b5157a3348e486e84ef4be0ed3e38218ab473c83d2/numpy-2.4.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:f983334aea213c99992053ede6168500e5f086ce74fbc4acc3f2b00f5762e9db", size = 16976799, upload-time = "2026-03-29T13:18:15.438Z" }, + { url = "https://files.pythonhosted.org/packages/dd/92/b4d922c4a5f5dab9ed44e6153908a5c665b71acf183a83b93b690996e39b/numpy-2.4.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:72944b19f2324114e9dc86a159787333b77874143efcf89a5167ef83cfee8af0", size = 14971552, upload-time = "2026-03-29T13:18:18.606Z" }, + { url = "https://files.pythonhosted.org/packages/8a/dc/df98c095978fa6ee7b9a9387d1d58cbb3d232d0e69ad169a4ce784bde4fd/numpy-2.4.4-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:86b6f55f5a352b48d7fbfd2dbc3d5b780b2d79f4d3c121f33eb6efb22e9a2015", size = 5476566, upload-time = "2026-03-29T13:18:21.532Z" }, + { url = "https://files.pythonhosted.org/packages/28/34/b3fdcec6e725409223dd27356bdf5a3c2cc2282e428218ecc9cb7acc9763/numpy-2.4.4-cp311-cp311-macosx_14_0_x86_64.whl", hash = "sha256:ba1f4fc670ed79f876f70082eff4f9583c15fb9a4b89d6188412de4d18ae2f40", size = 6806482, upload-time = "2026-03-29T13:18:23.634Z" }, + { url = "https://files.pythonhosted.org/packages/68/62/63417c13aa35d57bee1337c67446761dc25ea6543130cf868eace6e8157b/numpy-2.4.4-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8a87ec22c87be071b6bdbd27920b129b94f2fc964358ce38f3822635a3e2e03d", size = 15973376, upload-time = "2026-03-29T13:18:26.677Z" }, + { url = "https://files.pythonhosted.org/packages/cf/c5/9fcb7e0e69cef59cf10c746b84f7d58b08bc66a6b7d459783c5a4f6101a6/numpy-2.4.4-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:df3775294accfdd75f32c74ae39fcba920c9a378a2fc18a12b6820aa8c1fb502", size = 16925137, upload-time = "2026-03-29T13:18:30.14Z" }, + { url = "https://files.pythonhosted.org/packages/7e/43/80020edacb3f84b9efdd1591120a4296462c23fd8db0dde1666f6ef66f13/numpy-2.4.4-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:0d4e437e295f18ec29bc79daf55e8a47a9113df44d66f702f02a293d93a2d6dd", size = 17329414, upload-time = "2026-03-29T13:18:33.733Z" }, + { url = "https://files.pythonhosted.org/packages/fd/06/af0658593b18a5f73532d377188b964f239eb0894e664a6c12f484472f97/numpy-2.4.4-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:6aa3236c78803afbcb255045fbef97a9e25a1f6c9888357d205ddc42f4d6eba5", size = 18658397, upload-time = "2026-03-29T13:18:37.511Z" }, + { url = "https://files.pythonhosted.org/packages/e6/ce/13a09ed65f5d0ce5c7dd0669250374c6e379910f97af2c08c57b0608eee4/numpy-2.4.4-cp311-cp311-win32.whl", hash = "sha256:30caa73029a225b2d40d9fae193e008e24b2026b7ee1a867b7ee8d96ca1a448e", size = 6239499, upload-time = "2026-03-29T13:18:40.372Z" }, + { url = "https://files.pythonhosted.org/packages/bd/63/05d193dbb4b5eec1eca73822d80da98b511f8328ad4ae3ca4caf0f4db91d/numpy-2.4.4-cp311-cp311-win_amd64.whl", hash = "sha256:6bbe4eb67390b0a0265a2c25458f6b90a409d5d069f1041e6aff1e27e3d9a79e", size = 12614257, upload-time = "2026-03-29T13:18:42.95Z" }, + { url = "https://files.pythonhosted.org/packages/87/c5/8168052f080c26fa984c413305012be54741c9d0d74abd7fbeeccae3889f/numpy-2.4.4-cp311-cp311-win_arm64.whl", hash = "sha256:fcfe2045fd2e8f3cb0ce9d4ba6dba6333b8fa05bb8a4939c908cd43322d14c7e", size = 10486775, upload-time = "2026-03-29T13:18:45.835Z" }, + { url = "https://files.pythonhosted.org/packages/28/05/32396bec30fb2263770ee910142f49c1476d08e8ad41abf8403806b520ce/numpy-2.4.4-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:15716cfef24d3a9762e3acdf87e27f58dc823d1348f765bbea6bef8c639bfa1b", size = 16689272, upload-time = "2026-03-29T13:18:49.223Z" }, + { url = "https://files.pythonhosted.org/packages/c5/f3/a983d28637bfcd763a9c7aafdb6d5c0ebf3d487d1e1459ffdb57e2f01117/numpy-2.4.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:23cbfd4c17357c81021f21540da84ee282b9c8fba38a03b7b9d09ba6b951421e", size = 14699573, upload-time = "2026-03-29T13:18:52.629Z" }, + { url = "https://files.pythonhosted.org/packages/9b/fd/e5ecca1e78c05106d98028114f5c00d3eddb41207686b2b7de3e477b0e22/numpy-2.4.4-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:8b3b60bb7cba2c8c81837661c488637eee696f59a877788a396d33150c35d842", size = 5204782, upload-time = "2026-03-29T13:18:55.579Z" }, + { url = "https://files.pythonhosted.org/packages/de/2f/702a4594413c1a8632092beae8aba00f1d67947389369b3777aed783fdca/numpy-2.4.4-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:e4a010c27ff6f210ff4c6ef34394cd61470d01014439b192ec22552ee867f2a8", size = 6552038, upload-time = "2026-03-29T13:18:57.769Z" }, + { url = "https://files.pythonhosted.org/packages/7f/37/eed308a8f56cba4d1fdf467a4fc67ef4ff4bf1c888f5fc980481890104b1/numpy-2.4.4-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f9e75681b59ddaa5e659898085ae0eaea229d054f2ac0c7e563a62205a700121", size = 15670666, upload-time = "2026-03-29T13:19:00.341Z" }, + { url = "https://files.pythonhosted.org/packages/0a/0d/0e3ecece05b7a7e87ab9fb587855548da437a061326fff64a223b6dcb78a/numpy-2.4.4-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:81f4a14bee47aec54f883e0cad2d73986640c1590eb9bfaaba7ad17394481e6e", size = 16645480, upload-time = "2026-03-29T13:19:03.63Z" }, + { url = "https://files.pythonhosted.org/packages/34/49/f2312c154b82a286758ee2f1743336d50651f8b5195db18cdb63675ff649/numpy-2.4.4-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:62d6b0f03b694173f9fcb1fb317f7222fd0b0b103e784c6549f5e53a27718c44", size = 17020036, upload-time = "2026-03-29T13:19:07.428Z" }, + { url = "https://files.pythonhosted.org/packages/7b/e9/736d17bd77f1b0ec4f9901aaec129c00d59f5d84d5e79bba540ef12c2330/numpy-2.4.4-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:fbc356aae7adf9e6336d336b9c8111d390a05df88f1805573ebb0807bd06fd1d", size = 18368643, upload-time = "2026-03-29T13:19:10.775Z" }, + { url = "https://files.pythonhosted.org/packages/63/f6/d417977c5f519b17c8a5c3bc9e8304b0908b0e21136fe43bf628a1343914/numpy-2.4.4-cp312-cp312-win32.whl", hash = "sha256:0d35aea54ad1d420c812bfa0385c71cd7cc5bcf7c65fed95fc2cd02fe8c79827", size = 5961117, upload-time = "2026-03-29T13:19:13.464Z" }, + { url = "https://files.pythonhosted.org/packages/2d/5b/e1deebf88ff431b01b7406ca3583ab2bbb90972bbe1c568732e49c844f7e/numpy-2.4.4-cp312-cp312-win_amd64.whl", hash = "sha256:b5f0362dc928a6ecd9db58868fca5e48485205e3855957bdedea308f8672ea4a", size = 12320584, upload-time = "2026-03-29T13:19:16.155Z" }, + { url = "https://files.pythonhosted.org/packages/58/89/e4e856ac82a68c3ed64486a544977d0e7bdd18b8da75b78a577ca31c4395/numpy-2.4.4-cp312-cp312-win_arm64.whl", hash = "sha256:846300f379b5b12cc769334464656bc882e0735d27d9726568bc932fdc49d5ec", size = 10221450, upload-time = "2026-03-29T13:19:18.994Z" }, + { url = "https://files.pythonhosted.org/packages/14/1d/d0a583ce4fefcc3308806a749a536c201ed6b5ad6e1322e227ee4848979d/numpy-2.4.4-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:08f2e31ed5e6f04b118e49821397f12767934cfdd12a1ce86a058f91e004ee50", size = 16684933, upload-time = "2026-03-29T13:19:22.47Z" }, + { url = "https://files.pythonhosted.org/packages/c1/62/2b7a48fbb745d344742c0277f01286dead15f3f68e4f359fbfcf7b48f70f/numpy-2.4.4-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:e823b8b6edc81e747526f70f71a9c0a07ac4e7ad13020aa736bb7c9d67196115", size = 14694532, upload-time = "2026-03-29T13:19:25.581Z" }, + { url = "https://files.pythonhosted.org/packages/e5/87/499737bfba066b4a3bebff24a8f1c5b2dee410b209bc6668c9be692580f0/numpy-2.4.4-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:4a19d9dba1a76618dd86b164d608566f393f8ec6ac7c44f0cc879011c45e65af", size = 5199661, upload-time = "2026-03-29T13:19:28.31Z" }, + { url = "https://files.pythonhosted.org/packages/cd/da/464d551604320d1491bc345efed99b4b7034143a85787aab78d5691d5a0e/numpy-2.4.4-cp313-cp313-macosx_14_0_x86_64.whl", hash = "sha256:d2a8490669bfe99a233298348acc2d824d496dee0e66e31b66a6022c2ad74a5c", size = 6547539, upload-time = "2026-03-29T13:19:30.97Z" }, + { url = "https://files.pythonhosted.org/packages/7d/90/8d23e3b0dafd024bf31bdec225b3bb5c2dbfa6912f8a53b8659f21216cbf/numpy-2.4.4-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:45dbed2ab436a9e826e302fcdcbe9133f9b0006e5af7168afb8963a6520da103", size = 15668806, upload-time = "2026-03-29T13:19:33.887Z" }, + { url = "https://files.pythonhosted.org/packages/d1/73/a9d864e42a01896bb5974475438f16086be9ba1f0d19d0bb7a07427c4a8b/numpy-2.4.4-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c901b15172510173f5cb310eae652908340f8dede90fff9e3bf6c0d8dfd92f83", size = 16632682, upload-time = "2026-03-29T13:19:37.336Z" }, + { url = "https://files.pythonhosted.org/packages/34/fb/14570d65c3bde4e202a031210475ae9cde9b7686a2e7dc97ee67d2833b35/numpy-2.4.4-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:99d838547ace2c4aace6c4f76e879ddfe02bb58a80c1549928477862b7a6d6ed", size = 17019810, upload-time = "2026-03-29T13:19:40.963Z" }, + { url = "https://files.pythonhosted.org/packages/8a/77/2ba9d87081fd41f6d640c83f26fb7351e536b7ce6dd9061b6af5904e8e46/numpy-2.4.4-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:0aec54fd785890ecca25a6003fd9a5aed47ad607bbac5cd64f836ad8666f4959", size = 18357394, upload-time = "2026-03-29T13:19:44.859Z" }, + { url = "https://files.pythonhosted.org/packages/a2/23/52666c9a41708b0853fa3b1a12c90da38c507a3074883823126d4e9d5b30/numpy-2.4.4-cp313-cp313-win32.whl", hash = "sha256:07077278157d02f65c43b1b26a3886bce886f95d20aabd11f87932750dfb14ed", size = 5959556, upload-time = "2026-03-29T13:19:47.661Z" }, + { url = "https://files.pythonhosted.org/packages/57/fb/48649b4971cde70d817cf97a2a2fdc0b4d8308569f1dd2f2611959d2e0cf/numpy-2.4.4-cp313-cp313-win_amd64.whl", hash = "sha256:5c70f1cc1c4efbe316a572e2d8b9b9cc44e89b95f79ca3331553fbb63716e2bf", size = 12317311, upload-time = "2026-03-29T13:19:50.67Z" }, + { url = "https://files.pythonhosted.org/packages/ba/d8/11490cddd564eb4de97b4579ef6bfe6a736cc07e94c1598590ae25415e01/numpy-2.4.4-cp313-cp313-win_arm64.whl", hash = "sha256:ef4059d6e5152fa1a39f888e344c73fdc926e1b2dd58c771d67b0acfbf2aa67d", size = 10222060, upload-time = "2026-03-29T13:19:54.229Z" }, + { url = "https://files.pythonhosted.org/packages/99/5d/dab4339177a905aad3e2221c915b35202f1ec30d750dd2e5e9d9a72b804b/numpy-2.4.4-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:4bbc7f303d125971f60ec0aaad5e12c62d0d2c925f0ab1273debd0e4ba37aba5", size = 14822302, upload-time = "2026-03-29T13:19:57.585Z" }, + { url = "https://files.pythonhosted.org/packages/eb/e4/0564a65e7d3d97562ed6f9b0fd0fb0a6f559ee444092f105938b50043876/numpy-2.4.4-cp313-cp313t-macosx_14_0_arm64.whl", hash = "sha256:4d6d57903571f86180eb98f8f0c839fa9ebbfb031356d87f1361be91e433f5b7", size = 5327407, upload-time = "2026-03-29T13:20:00.601Z" }, + { url = "https://files.pythonhosted.org/packages/29/8d/35a3a6ce5ad371afa58b4700f1c820f8f279948cca32524e0a695b0ded83/numpy-2.4.4-cp313-cp313t-macosx_14_0_x86_64.whl", hash = "sha256:4636de7fd195197b7535f231b5de9e4b36d2c440b6e566d2e4e4746e6af0ca93", size = 6647631, upload-time = "2026-03-29T13:20:02.855Z" }, + { url = "https://files.pythonhosted.org/packages/f4/da/477731acbd5a58a946c736edfdabb2ac5b34c3d08d1ba1a7b437fa0884df/numpy-2.4.4-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ad2e2ef14e0b04e544ea2fa0a36463f847f113d314aa02e5b402fdf910ef309e", size = 15727691, upload-time = "2026-03-29T13:20:06.004Z" }, + { url = "https://files.pythonhosted.org/packages/e6/db/338535d9b152beabeb511579598418ba0212ce77cf9718edd70262cc4370/numpy-2.4.4-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:5a285b3b96f951841799528cd1f4f01cd70e7e0204b4abebac9463eecfcf2a40", size = 16681241, upload-time = "2026-03-29T13:20:09.417Z" }, + { url = "https://files.pythonhosted.org/packages/e2/a9/ad248e8f58beb7a0219b413c9c7d8151c5d285f7f946c3e26695bdbbe2df/numpy-2.4.4-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:f8474c4241bc18b750be2abea9d7a9ec84f46ef861dbacf86a4f6e043401f79e", size = 17085767, upload-time = "2026-03-29T13:20:13.126Z" }, + { url = "https://files.pythonhosted.org/packages/b5/1a/3b88ccd3694681356f70da841630e4725a7264d6a885c8d442a697e1146b/numpy-2.4.4-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:4e874c976154687c1f71715b034739b45c7711bec81db01914770373d125e392", size = 18403169, upload-time = "2026-03-29T13:20:17.096Z" }, + { url = "https://files.pythonhosted.org/packages/c2/c9/fcfd5d0639222c6eac7f304829b04892ef51c96a75d479214d77e3ce6e33/numpy-2.4.4-cp313-cp313t-win32.whl", hash = "sha256:9c585a1790d5436a5374bac930dad6ed244c046ed91b2b2a3634eb2971d21008", size = 6083477, upload-time = "2026-03-29T13:20:20.195Z" }, + { url = "https://files.pythonhosted.org/packages/d5/e3/3938a61d1c538aaec8ed6fd6323f57b0c2d2d2219512434c5c878db76553/numpy-2.4.4-cp313-cp313t-win_amd64.whl", hash = "sha256:93e15038125dc1e5345d9b5b68aa7f996ec33b98118d18c6ca0d0b7d6198b7e8", size = 12457487, upload-time = "2026-03-29T13:20:22.946Z" }, + { url = "https://files.pythonhosted.org/packages/97/6a/7e345032cc60501721ef94e0e30b60f6b0bd601f9174ebd36389a2b86d40/numpy-2.4.4-cp313-cp313t-win_arm64.whl", hash = "sha256:0dfd3f9d3adbe2920b68b5cd3d51444e13a10792ec7154cd0a2f6e74d4ab3233", size = 10292002, upload-time = "2026-03-29T13:20:25.909Z" }, + { url = "https://files.pythonhosted.org/packages/6e/06/c54062f85f673dd5c04cbe2f14c3acb8c8b95e3384869bb8cc9bff8cb9df/numpy-2.4.4-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:f169b9a863d34f5d11b8698ead99febeaa17a13ca044961aa8e2662a6c7766a0", size = 16684353, upload-time = "2026-03-29T13:20:29.504Z" }, + { url = "https://files.pythonhosted.org/packages/4c/39/8a320264a84404c74cc7e79715de85d6130fa07a0898f67fb5cd5bd79908/numpy-2.4.4-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:2483e4584a1cb3092da4470b38866634bafb223cbcd551ee047633fd2584599a", size = 14704914, upload-time = "2026-03-29T13:20:33.547Z" }, + { url = "https://files.pythonhosted.org/packages/91/fb/287076b2614e1d1044235f50f03748f31fa287e3dbe6abeb35cdfa351eca/numpy-2.4.4-cp314-cp314-macosx_14_0_arm64.whl", hash = "sha256:2d19e6e2095506d1736b7d80595e0f252d76b89f5e715c35e06e937679ea7d7a", size = 5210005, upload-time = "2026-03-29T13:20:36.45Z" }, + { url = "https://files.pythonhosted.org/packages/63/eb/fcc338595309910de6ecabfcef2419a9ce24399680bfb149421fa2df1280/numpy-2.4.4-cp314-cp314-macosx_14_0_x86_64.whl", hash = "sha256:6a246d5914aa1c820c9443ddcee9c02bec3e203b0c080349533fae17727dfd1b", size = 6544974, upload-time = "2026-03-29T13:20:39.014Z" }, + { url = "https://files.pythonhosted.org/packages/44/5d/e7e9044032a716cdfaa3fba27a8e874bf1c5f1912a1ddd4ed071bf8a14a6/numpy-2.4.4-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:989824e9faf85f96ec9c7761cd8d29c531ad857bfa1daa930cba85baaecf1a9a", size = 15684591, upload-time = "2026-03-29T13:20:42.146Z" }, + { url = "https://files.pythonhosted.org/packages/98/7c/21252050676612625449b4807d6b695b9ce8a7c9e1c197ee6216c8a65c7c/numpy-2.4.4-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:27a8d92cd10f1382a67d7cf4db7ce18341b66438bdd9f691d7b0e48d104c2a9d", size = 16637700, upload-time = "2026-03-29T13:20:46.204Z" }, + { url = "https://files.pythonhosted.org/packages/b1/29/56d2bbef9465db24ef25393383d761a1af4f446a1df9b8cded4fe3a5a5d7/numpy-2.4.4-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:e44319a2953c738205bf3354537979eaa3998ed673395b964c1176083dd46252", size = 17035781, upload-time = "2026-03-29T13:20:50.242Z" }, + { url = "https://files.pythonhosted.org/packages/e3/2b/a35a6d7589d21f44cea7d0a98de5ddcbb3d421b2622a5c96b1edf18707c3/numpy-2.4.4-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:e892aff75639bbef0d2a2cfd55535510df26ff92f63c92cd84ef8d4ba5a5557f", size = 18362959, upload-time = "2026-03-29T13:20:54.019Z" }, + { url = "https://files.pythonhosted.org/packages/64/c9/d52ec581f2390e0f5f85cbfd80fb83d965fc15e9f0e1aec2195faa142cde/numpy-2.4.4-cp314-cp314-win32.whl", hash = "sha256:1378871da56ca8943c2ba674530924bb8ca40cd228358a3b5f302ad60cf875fc", size = 6008768, upload-time = "2026-03-29T13:20:56.912Z" }, + { url = "https://files.pythonhosted.org/packages/fa/22/4cc31a62a6c7b74a8730e31a4274c5dc80e005751e277a2ce38e675e4923/numpy-2.4.4-cp314-cp314-win_amd64.whl", hash = "sha256:715d1c092715954784bc79e1174fc2a90093dc4dc84ea15eb14dad8abdcdeb74", size = 12449181, upload-time = "2026-03-29T13:20:59.548Z" }, + { url = "https://files.pythonhosted.org/packages/70/2e/14cda6f4d8e396c612d1bf97f22958e92148801d7e4f110cabebdc0eef4b/numpy-2.4.4-cp314-cp314-win_arm64.whl", hash = "sha256:2c194dd721e54ecad9ad387c1d35e63dce5c4450c6dc7dd5611283dda239aabb", size = 10496035, upload-time = "2026-03-29T13:21:02.524Z" }, + { url = "https://files.pythonhosted.org/packages/b1/e8/8fed8c8d848d7ecea092dc3469643f9d10bc3a134a815a3b033da1d2039b/numpy-2.4.4-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:2aa0613a5177c264ff5921051a5719d20095ea586ca88cc802c5c218d1c67d3e", size = 14824958, upload-time = "2026-03-29T13:21:05.671Z" }, + { url = "https://files.pythonhosted.org/packages/05/1a/d8007a5138c179c2bf33ef44503e83d70434d2642877ee8fbb230e7c0548/numpy-2.4.4-cp314-cp314t-macosx_14_0_arm64.whl", hash = "sha256:42c16925aa5a02362f986765f9ebabf20de75cdefdca827d14315c568dcab113", size = 5330020, upload-time = "2026-03-29T13:21:08.635Z" }, + { url = "https://files.pythonhosted.org/packages/99/64/ffb99ac6ae93faf117bcbd5c7ba48a7f45364a33e8e458545d3633615dda/numpy-2.4.4-cp314-cp314t-macosx_14_0_x86_64.whl", hash = "sha256:874f200b2a981c647340f841730fc3a2b54c9d940566a3c4149099591e2c4c3d", size = 6650758, upload-time = "2026-03-29T13:21:10.949Z" }, + { url = "https://files.pythonhosted.org/packages/6e/6e/795cc078b78a384052e73b2f6281ff7a700e9bf53bcce2ee579d4f6dd879/numpy-2.4.4-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c9b39d38a9bd2ae1becd7eac1303d031c5c110ad31f2b319c6e7d98b135c934d", size = 15729948, upload-time = "2026-03-29T13:21:14.047Z" }, + { url = "https://files.pythonhosted.org/packages/5f/86/2acbda8cc2af5f3d7bfc791192863b9e3e19674da7b5e533fded124d1299/numpy-2.4.4-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b268594bccac7d7cf5844c7732e3f20c50921d94e36d7ec9b79e9857694b1b2f", size = 16679325, upload-time = "2026-03-29T13:21:17.561Z" }, + { url = "https://files.pythonhosted.org/packages/bc/59/cafd83018f4aa55e0ac6fa92aa066c0a1877b77a615ceff1711c260ffae8/numpy-2.4.4-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:ac6b31e35612a26483e20750126d30d0941f949426974cace8e6b5c58a3657b0", size = 17084883, upload-time = "2026-03-29T13:21:21.106Z" }, + { url = "https://files.pythonhosted.org/packages/f0/85/a42548db84e65ece46ab2caea3d3f78b416a47af387fcbb47ec28e660dc2/numpy-2.4.4-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:8e3ed142f2728df44263aaf5fb1f5b0b99f4070c553a0d7f033be65338329150", size = 18403474, upload-time = "2026-03-29T13:21:24.828Z" }, + { url = "https://files.pythonhosted.org/packages/ed/ad/483d9e262f4b831000062e5d8a45e342166ec8aaa1195264982bca267e62/numpy-2.4.4-cp314-cp314t-win32.whl", hash = "sha256:dddbbd259598d7240b18c9d87c56a9d2fb3b02fe266f49a7c101532e78c1d871", size = 6155500, upload-time = "2026-03-29T13:21:28.205Z" }, + { url = "https://files.pythonhosted.org/packages/c7/03/2fc4e14c7bd4ff2964b74ba90ecb8552540b6315f201df70f137faa5c589/numpy-2.4.4-cp314-cp314t-win_amd64.whl", hash = "sha256:a7164afb23be6e37ad90b2f10426149fd75aee07ca55653d2aa41e66c4ef697e", size = 12637755, upload-time = "2026-03-29T13:21:31.107Z" }, + { url = "https://files.pythonhosted.org/packages/58/78/548fb8e07b1a341746bfbecb32f2c268470f45fa028aacdbd10d9bc73aab/numpy-2.4.4-cp314-cp314t-win_arm64.whl", hash = "sha256:ba203255017337d39f89bdd58417f03c4426f12beed0440cfd933cb15f8669c7", size = 10566643, upload-time = "2026-03-29T13:21:34.339Z" }, + { url = "https://files.pythonhosted.org/packages/6b/33/8fae8f964a4f63ed528264ddf25d2b683d0b663e3cba26961eb838a7c1bd/numpy-2.4.4-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:58c8b5929fcb8287cbd6f0a3fae19c6e03a5c48402ae792962ac465224a629a4", size = 16854491, upload-time = "2026-03-29T13:21:38.03Z" }, + { url = "https://files.pythonhosted.org/packages/bc/d0/1aabee441380b981cf8cdda3ae7a46aa827d1b5a8cce84d14598bc94d6d9/numpy-2.4.4-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:eea7ac5d2dce4189771cedb559c738a71512768210dc4e4753b107a2048b3d0e", size = 14895830, upload-time = "2026-03-29T13:21:41.509Z" }, + { url = "https://files.pythonhosted.org/packages/a5/b8/aafb0d1065416894fccf4df6b49ef22b8db045187949545bced89c034b8e/numpy-2.4.4-pp311-pypy311_pp73-macosx_14_0_arm64.whl", hash = "sha256:51fc224f7ca4d92656d5a5eb315f12eb5fe2c97a66249aa7b5f562528a3be38c", size = 5400927, upload-time = "2026-03-29T13:21:44.747Z" }, + { url = "https://files.pythonhosted.org/packages/d6/77/063baa20b08b431038c7f9ff5435540c7b7265c78cf56012a483019ca72d/numpy-2.4.4-pp311-pypy311_pp73-macosx_14_0_x86_64.whl", hash = "sha256:28a650663f7314afc3e6ec620f44f333c386aad9f6fc472030865dc0ebb26ee3", size = 6715557, upload-time = "2026-03-29T13:21:47.406Z" }, + { url = "https://files.pythonhosted.org/packages/c7/a8/379542d45a14f149444c5c4c4e7714707239ce9cc1de8c2803958889da14/numpy-2.4.4-pp311-pypy311_pp73-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:19710a9ca9992d7174e9c52f643d4272dcd1558c5f7af7f6f8190f633bd651a7", size = 15804253, upload-time = "2026-03-29T13:21:50.753Z" }, + { url = "https://files.pythonhosted.org/packages/a2/c8/f0a45426d6d21e7ea3310a15cf90c43a14d9232c31a837702dba437f3373/numpy-2.4.4-pp311-pypy311_pp73-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9b2aec6af35c113b05695ebb5749a787acd63cafc83086a05771d1e1cd1e555f", size = 16753552, upload-time = "2026-03-29T13:21:54.344Z" }, + { url = "https://files.pythonhosted.org/packages/04/74/f4c001f4714c3ad9ce037e18cf2b9c64871a84951eaa0baf683a9ca9301c/numpy-2.4.4-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:f2cf083b324a467e1ab358c105f6cad5ea950f50524668a80c486ff1db24e119", size = 12509075, upload-time = "2026-03-29T13:21:57.644Z" }, +] + +[[package]] +name = "openai" +version = "2.30.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "anyio" }, + { name = "distro" }, + { name = "httpx" }, + { name = "jiter" }, + { name = "pydantic" }, + { name = "sniffio" }, + { name = "tqdm" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/88/15/52580c8fbc16d0675d516e8749806eda679b16de1e4434ea06fb6feaa610/openai-2.30.0.tar.gz", hash = "sha256:92f7661c990bda4b22a941806c83eabe4896c3094465030dd882a71abe80c885", size = 676084, upload-time = "2026-03-25T22:08:59.96Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2a/9e/5bfa2270f902d5b92ab7d41ce0475b8630572e71e349b2a4996d14bdda93/openai-2.30.0-py3-none-any.whl", hash = "sha256:9a5ae616888eb2748ec5e0c5b955a51592e0b201a11f4262db920f2a78c5231d", size = 1146656, upload-time = "2026-03-25T22:08:58.2Z" }, +] + +[[package]] +name = "openenv" +version = "0.1.13" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "numpy", version = "2.2.6", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.11'" }, + { name = "numpy", version = "2.4.4", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.11'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/35/94/c47e8f7303452793a3519c8cbc1b31dfffdedd13aaed821958ab3f152927/openenv-0.1.13.tar.gz", hash = "sha256:726971d2289472c1c20261436bcccdf3edfcf0b201d16aec127815bd83bfcb3d", size = 5112, upload-time = "2020-12-16T11:49:39.777Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/33/7f/e6f4467528161b8f0eb2ec784f4bbcd1fa9ea7acad13c0fb18597013e83b/openenv-0.1.13-py3-none-any.whl", hash = "sha256:813249d7f526f40c6e8b325f705294761a5bc887b9144c3383fa2bae7baa7726", size = 12080, upload-time = "2020-12-16T11:49:38.816Z" }, +] + +[[package]] +name = "osint-rl-env" +version = "0.1.0" +source = { editable = "." } +dependencies = [ + { name = "fastapi" }, + { name = "openai" }, + { name = "openenv" }, + { name = "requests" }, + { name = "uvicorn" }, +] + +[package.optional-dependencies] +dev = [ + { name = "pytest" }, +] + +[package.metadata] +requires-dist = [ + { name = "fastapi", specifier = ">=0.115.0" }, + { name = "openai", specifier = ">=1.40.0" }, + { name = "openenv", specifier = ">=0.1.13" }, + { name = "pytest", marker = "extra == 'dev'", specifier = ">=8.0.0" }, + { name = "requests", specifier = ">=2.32.3" }, + { name = "uvicorn", specifier = ">=0.30.0" }, +] +provides-extras = ["dev"] + +[[package]] +name = "packaging" +version = "26.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/65/ee/299d360cdc32edc7d2cf530f3accf79c4fca01e96ffc950d8a52213bd8e4/packaging-26.0.tar.gz", hash = "sha256:00243ae351a257117b6a241061796684b084ed1c516a08c48a3f7e147a9d80b4", size = 143416, upload-time = "2026-01-21T20:50:39.064Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b7/b9/c538f279a4e237a006a2c98387d081e9eb060d203d8ed34467cc0f0b9b53/packaging-26.0-py3-none-any.whl", hash = "sha256:b36f1fef9334a5588b4166f8bcd26a14e521f2b55e6b9de3aaa80d3ff7a37529", size = 74366, upload-time = "2026-01-21T20:50:37.788Z" }, +] + +[[package]] +name = "pluggy" +version = "1.6.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f9/e2/3e91f31a7d2b083fe6ef3fa267035b518369d9511ffab804f839851d2779/pluggy-1.6.0.tar.gz", hash = "sha256:7dcc130b76258d33b90f61b658791dede3486c3e6bfb003ee5c9bfb396dd22f3", size = 69412, upload-time = "2025-05-15T12:30:07.975Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/54/20/4d324d65cc6d9205fabedc306948156824eb9f0ee1633355a8f7ec5c66bf/pluggy-1.6.0-py3-none-any.whl", hash = "sha256:e920276dd6813095e9377c0bc5566d94c932c33b27a3e3945d8389c374dd4746", size = 20538, upload-time = "2025-05-15T12:30:06.134Z" }, +] + +[[package]] +name = "pydantic" +version = "2.12.5" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "annotated-types" }, + { name = "pydantic-core" }, + { name = "typing-extensions" }, + { name = "typing-inspection" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/69/44/36f1a6e523abc58ae5f928898e4aca2e0ea509b5aa6f6f392a5d882be928/pydantic-2.12.5.tar.gz", hash = "sha256:4d351024c75c0f085a9febbb665ce8c0c6ec5d30e903bdb6394b7ede26aebb49", size = 821591, upload-time = "2025-11-26T15:11:46.471Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/5a/87/b70ad306ebb6f9b585f114d0ac2137d792b48be34d732d60e597c2f8465a/pydantic-2.12.5-py3-none-any.whl", hash = "sha256:e561593fccf61e8a20fc46dfc2dfe075b8be7d0188df33f221ad1f0139180f9d", size = 463580, upload-time = "2025-11-26T15:11:44.605Z" }, +] + +[[package]] +name = "pydantic-core" +version = "2.41.5" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/71/70/23b021c950c2addd24ec408e9ab05d59b035b39d97cdc1130e1bce647bb6/pydantic_core-2.41.5.tar.gz", hash = "sha256:08daa51ea16ad373ffd5e7606252cc32f07bc72b28284b6bc9c6df804816476e", size = 460952, upload-time = "2025-11-04T13:43:49.098Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c6/90/32c9941e728d564b411d574d8ee0cf09b12ec978cb22b294995bae5549a5/pydantic_core-2.41.5-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:77b63866ca88d804225eaa4af3e664c5faf3568cea95360d21f4725ab6e07146", size = 2107298, upload-time = "2025-11-04T13:39:04.116Z" }, + { url = "https://files.pythonhosted.org/packages/fb/a8/61c96a77fe28993d9a6fb0f4127e05430a267b235a124545d79fea46dd65/pydantic_core-2.41.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:dfa8a0c812ac681395907e71e1274819dec685fec28273a28905df579ef137e2", size = 1901475, upload-time = "2025-11-04T13:39:06.055Z" }, + { url = "https://files.pythonhosted.org/packages/5d/b6/338abf60225acc18cdc08b4faef592d0310923d19a87fba1faf05af5346e/pydantic_core-2.41.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5921a4d3ca3aee735d9fd163808f5e8dd6c6972101e4adbda9a4667908849b97", size = 1918815, upload-time = "2025-11-04T13:39:10.41Z" }, + { url = "https://files.pythonhosted.org/packages/d1/1c/2ed0433e682983d8e8cba9c8d8ef274d4791ec6a6f24c58935b90e780e0a/pydantic_core-2.41.5-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e25c479382d26a2a41b7ebea1043564a937db462816ea07afa8a44c0866d52f9", size = 2065567, upload-time = "2025-11-04T13:39:12.244Z" }, + { url = "https://files.pythonhosted.org/packages/b3/24/cf84974ee7d6eae06b9e63289b7b8f6549d416b5c199ca2d7ce13bbcf619/pydantic_core-2.41.5-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f547144f2966e1e16ae626d8ce72b4cfa0caedc7fa28052001c94fb2fcaa1c52", size = 2230442, upload-time = "2025-11-04T13:39:13.962Z" }, + { url = "https://files.pythonhosted.org/packages/fd/21/4e287865504b3edc0136c89c9c09431be326168b1eb7841911cbc877a995/pydantic_core-2.41.5-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6f52298fbd394f9ed112d56f3d11aabd0d5bd27beb3084cc3d8ad069483b8941", size = 2350956, upload-time = "2025-11-04T13:39:15.889Z" }, + { url = "https://files.pythonhosted.org/packages/a8/76/7727ef2ffa4b62fcab916686a68a0426b9b790139720e1934e8ba797e238/pydantic_core-2.41.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:100baa204bb412b74fe285fb0f3a385256dad1d1879f0a5cb1499ed2e83d132a", size = 2068253, upload-time = "2025-11-04T13:39:17.403Z" }, + { url = "https://files.pythonhosted.org/packages/d5/8c/a4abfc79604bcb4c748e18975c44f94f756f08fb04218d5cb87eb0d3a63e/pydantic_core-2.41.5-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:05a2c8852530ad2812cb7914dc61a1125dc4e06252ee98e5638a12da6cc6fb6c", size = 2177050, upload-time = "2025-11-04T13:39:19.351Z" }, + { url = "https://files.pythonhosted.org/packages/67/b1/de2e9a9a79b480f9cb0b6e8b6ba4c50b18d4e89852426364c66aa82bb7b3/pydantic_core-2.41.5-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:29452c56df2ed968d18d7e21f4ab0ac55e71dc59524872f6fc57dcf4a3249ed2", size = 2147178, upload-time = "2025-11-04T13:39:21Z" }, + { url = "https://files.pythonhosted.org/packages/16/c1/dfb33f837a47b20417500efaa0378adc6635b3c79e8369ff7a03c494b4ac/pydantic_core-2.41.5-cp310-cp310-musllinux_1_1_armv7l.whl", hash = "sha256:d5160812ea7a8a2ffbe233d8da666880cad0cbaf5d4de74ae15c313213d62556", size = 2341833, upload-time = "2025-11-04T13:39:22.606Z" }, + { url = "https://files.pythonhosted.org/packages/47/36/00f398642a0f4b815a9a558c4f1dca1b4020a7d49562807d7bc9ff279a6c/pydantic_core-2.41.5-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:df3959765b553b9440adfd3c795617c352154e497a4eaf3752555cfb5da8fc49", size = 2321156, upload-time = "2025-11-04T13:39:25.843Z" }, + { url = "https://files.pythonhosted.org/packages/7e/70/cad3acd89fde2010807354d978725ae111ddf6d0ea46d1ea1775b5c1bd0c/pydantic_core-2.41.5-cp310-cp310-win32.whl", hash = "sha256:1f8d33a7f4d5a7889e60dc39856d76d09333d8a6ed0f5f1190635cbec70ec4ba", size = 1989378, upload-time = "2025-11-04T13:39:27.92Z" }, + { url = "https://files.pythonhosted.org/packages/76/92/d338652464c6c367e5608e4488201702cd1cbb0f33f7b6a85a60fe5f3720/pydantic_core-2.41.5-cp310-cp310-win_amd64.whl", hash = "sha256:62de39db01b8d593e45871af2af9e497295db8d73b085f6bfd0b18c83c70a8f9", size = 2013622, upload-time = "2025-11-04T13:39:29.848Z" }, + { url = "https://files.pythonhosted.org/packages/e8/72/74a989dd9f2084b3d9530b0915fdda64ac48831c30dbf7c72a41a5232db8/pydantic_core-2.41.5-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:a3a52f6156e73e7ccb0f8cced536adccb7042be67cb45f9562e12b319c119da6", size = 2105873, upload-time = "2025-11-04T13:39:31.373Z" }, + { url = "https://files.pythonhosted.org/packages/12/44/37e403fd9455708b3b942949e1d7febc02167662bf1a7da5b78ee1ea2842/pydantic_core-2.41.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:7f3bf998340c6d4b0c9a2f02d6a400e51f123b59565d74dc60d252ce888c260b", size = 1899826, upload-time = "2025-11-04T13:39:32.897Z" }, + { url = "https://files.pythonhosted.org/packages/33/7f/1d5cab3ccf44c1935a359d51a8a2a9e1a654b744b5e7f80d41b88d501eec/pydantic_core-2.41.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:378bec5c66998815d224c9ca994f1e14c0c21cb95d2f52b6021cc0b2a58f2a5a", size = 1917869, upload-time = "2025-11-04T13:39:34.469Z" }, + { url = "https://files.pythonhosted.org/packages/6e/6a/30d94a9674a7fe4f4744052ed6c5e083424510be1e93da5bc47569d11810/pydantic_core-2.41.5-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e7b576130c69225432866fe2f4a469a85a54ade141d96fd396dffcf607b558f8", size = 2063890, upload-time = "2025-11-04T13:39:36.053Z" }, + { url = "https://files.pythonhosted.org/packages/50/be/76e5d46203fcb2750e542f32e6c371ffa9b8ad17364cf94bb0818dbfb50c/pydantic_core-2.41.5-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6cb58b9c66f7e4179a2d5e0f849c48eff5c1fca560994d6eb6543abf955a149e", size = 2229740, upload-time = "2025-11-04T13:39:37.753Z" }, + { url = "https://files.pythonhosted.org/packages/d3/ee/fed784df0144793489f87db310a6bbf8118d7b630ed07aa180d6067e653a/pydantic_core-2.41.5-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:88942d3a3dff3afc8288c21e565e476fc278902ae4d6d134f1eeda118cc830b1", size = 2350021, upload-time = "2025-11-04T13:39:40.94Z" }, + { url = "https://files.pythonhosted.org/packages/c8/be/8fed28dd0a180dca19e72c233cbf58efa36df055e5b9d90d64fd1740b828/pydantic_core-2.41.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f31d95a179f8d64d90f6831d71fa93290893a33148d890ba15de25642c5d075b", size = 2066378, upload-time = "2025-11-04T13:39:42.523Z" }, + { url = "https://files.pythonhosted.org/packages/b0/3b/698cf8ae1d536a010e05121b4958b1257f0b5522085e335360e53a6b1c8b/pydantic_core-2.41.5-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c1df3d34aced70add6f867a8cf413e299177e0c22660cc767218373d0779487b", size = 2175761, upload-time = "2025-11-04T13:39:44.553Z" }, + { url = "https://files.pythonhosted.org/packages/b8/ba/15d537423939553116dea94ce02f9c31be0fa9d0b806d427e0308ec17145/pydantic_core-2.41.5-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:4009935984bd36bd2c774e13f9a09563ce8de4abaa7226f5108262fa3e637284", size = 2146303, upload-time = "2025-11-04T13:39:46.238Z" }, + { url = "https://files.pythonhosted.org/packages/58/7f/0de669bf37d206723795f9c90c82966726a2ab06c336deba4735b55af431/pydantic_core-2.41.5-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:34a64bc3441dc1213096a20fe27e8e128bd3ff89921706e83c0b1ac971276594", size = 2340355, upload-time = "2025-11-04T13:39:48.002Z" }, + { url = "https://files.pythonhosted.org/packages/e5/de/e7482c435b83d7e3c3ee5ee4451f6e8973cff0eb6007d2872ce6383f6398/pydantic_core-2.41.5-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:c9e19dd6e28fdcaa5a1de679aec4141f691023916427ef9bae8584f9c2fb3b0e", size = 2319875, upload-time = "2025-11-04T13:39:49.705Z" }, + { url = "https://files.pythonhosted.org/packages/fe/e6/8c9e81bb6dd7560e33b9053351c29f30c8194b72f2d6932888581f503482/pydantic_core-2.41.5-cp311-cp311-win32.whl", hash = "sha256:2c010c6ded393148374c0f6f0bf89d206bf3217f201faa0635dcd56bd1520f6b", size = 1987549, upload-time = "2025-11-04T13:39:51.842Z" }, + { url = "https://files.pythonhosted.org/packages/11/66/f14d1d978ea94d1bc21fc98fcf570f9542fe55bfcc40269d4e1a21c19bf7/pydantic_core-2.41.5-cp311-cp311-win_amd64.whl", hash = "sha256:76ee27c6e9c7f16f47db7a94157112a2f3a00e958bc626e2f4ee8bec5c328fbe", size = 2011305, upload-time = "2025-11-04T13:39:53.485Z" }, + { url = "https://files.pythonhosted.org/packages/56/d8/0e271434e8efd03186c5386671328154ee349ff0354d83c74f5caaf096ed/pydantic_core-2.41.5-cp311-cp311-win_arm64.whl", hash = "sha256:4bc36bbc0b7584de96561184ad7f012478987882ebf9f9c389b23f432ea3d90f", size = 1972902, upload-time = "2025-11-04T13:39:56.488Z" }, + { url = "https://files.pythonhosted.org/packages/5f/5d/5f6c63eebb5afee93bcaae4ce9a898f3373ca23df3ccaef086d0233a35a7/pydantic_core-2.41.5-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:f41a7489d32336dbf2199c8c0a215390a751c5b014c2c1c5366e817202e9cdf7", size = 2110990, upload-time = "2025-11-04T13:39:58.079Z" }, + { url = "https://files.pythonhosted.org/packages/aa/32/9c2e8ccb57c01111e0fd091f236c7b371c1bccea0fa85247ac55b1e2b6b6/pydantic_core-2.41.5-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:070259a8818988b9a84a449a2a7337c7f430a22acc0859c6b110aa7212a6d9c0", size = 1896003, upload-time = "2025-11-04T13:39:59.956Z" }, + { url = "https://files.pythonhosted.org/packages/68/b8/a01b53cb0e59139fbc9e4fda3e9724ede8de279097179be4ff31f1abb65a/pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e96cea19e34778f8d59fe40775a7a574d95816eb150850a85a7a4c8f4b94ac69", size = 1919200, upload-time = "2025-11-04T13:40:02.241Z" }, + { url = "https://files.pythonhosted.org/packages/38/de/8c36b5198a29bdaade07b5985e80a233a5ac27137846f3bc2d3b40a47360/pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ed2e99c456e3fadd05c991f8f437ef902e00eedf34320ba2b0842bd1c3ca3a75", size = 2052578, upload-time = "2025-11-04T13:40:04.401Z" }, + { url = "https://files.pythonhosted.org/packages/00/b5/0e8e4b5b081eac6cb3dbb7e60a65907549a1ce035a724368c330112adfdd/pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:65840751b72fbfd82c3c640cff9284545342a4f1eb1586ad0636955b261b0b05", size = 2208504, upload-time = "2025-11-04T13:40:06.072Z" }, + { url = "https://files.pythonhosted.org/packages/77/56/87a61aad59c7c5b9dc8caad5a41a5545cba3810c3e828708b3d7404f6cef/pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e536c98a7626a98feb2d3eaf75944ef6f3dbee447e1f841eae16f2f0a72d8ddc", size = 2335816, upload-time = "2025-11-04T13:40:07.835Z" }, + { url = "https://files.pythonhosted.org/packages/0d/76/941cc9f73529988688a665a5c0ecff1112b3d95ab48f81db5f7606f522d3/pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eceb81a8d74f9267ef4081e246ffd6d129da5d87e37a77c9bde550cb04870c1c", size = 2075366, upload-time = "2025-11-04T13:40:09.804Z" }, + { url = "https://files.pythonhosted.org/packages/d3/43/ebef01f69baa07a482844faaa0a591bad1ef129253ffd0cdaa9d8a7f72d3/pydantic_core-2.41.5-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d38548150c39b74aeeb0ce8ee1d8e82696f4a4e16ddc6de7b1d8823f7de4b9b5", size = 2171698, upload-time = "2025-11-04T13:40:12.004Z" }, + { url = "https://files.pythonhosted.org/packages/b1/87/41f3202e4193e3bacfc2c065fab7706ebe81af46a83d3e27605029c1f5a6/pydantic_core-2.41.5-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:c23e27686783f60290e36827f9c626e63154b82b116d7fe9adba1fda36da706c", size = 2132603, upload-time = "2025-11-04T13:40:13.868Z" }, + { url = "https://files.pythonhosted.org/packages/49/7d/4c00df99cb12070b6bccdef4a195255e6020a550d572768d92cc54dba91a/pydantic_core-2.41.5-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:482c982f814460eabe1d3bb0adfdc583387bd4691ef00b90575ca0d2b6fe2294", size = 2329591, upload-time = "2025-11-04T13:40:15.672Z" }, + { url = "https://files.pythonhosted.org/packages/cc/6a/ebf4b1d65d458f3cda6a7335d141305dfa19bdc61140a884d165a8a1bbc7/pydantic_core-2.41.5-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:bfea2a5f0b4d8d43adf9d7b8bf019fb46fdd10a2e5cde477fbcb9d1fa08c68e1", size = 2319068, upload-time = "2025-11-04T13:40:17.532Z" }, + { url = "https://files.pythonhosted.org/packages/49/3b/774f2b5cd4192d5ab75870ce4381fd89cf218af999515baf07e7206753f0/pydantic_core-2.41.5-cp312-cp312-win32.whl", hash = "sha256:b74557b16e390ec12dca509bce9264c3bbd128f8a2c376eaa68003d7f327276d", size = 1985908, upload-time = "2025-11-04T13:40:19.309Z" }, + { url = "https://files.pythonhosted.org/packages/86/45/00173a033c801cacf67c190fef088789394feaf88a98a7035b0e40d53dc9/pydantic_core-2.41.5-cp312-cp312-win_amd64.whl", hash = "sha256:1962293292865bca8e54702b08a4f26da73adc83dd1fcf26fbc875b35d81c815", size = 2020145, upload-time = "2025-11-04T13:40:21.548Z" }, + { url = "https://files.pythonhosted.org/packages/f9/22/91fbc821fa6d261b376a3f73809f907cec5ca6025642c463d3488aad22fb/pydantic_core-2.41.5-cp312-cp312-win_arm64.whl", hash = "sha256:1746d4a3d9a794cacae06a5eaaccb4b8643a131d45fbc9af23e353dc0a5ba5c3", size = 1976179, upload-time = "2025-11-04T13:40:23.393Z" }, + { url = "https://files.pythonhosted.org/packages/87/06/8806241ff1f70d9939f9af039c6c35f2360cf16e93c2ca76f184e76b1564/pydantic_core-2.41.5-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:941103c9be18ac8daf7b7adca8228f8ed6bb7a1849020f643b3a14d15b1924d9", size = 2120403, upload-time = "2025-11-04T13:40:25.248Z" }, + { url = "https://files.pythonhosted.org/packages/94/02/abfa0e0bda67faa65fef1c84971c7e45928e108fe24333c81f3bfe35d5f5/pydantic_core-2.41.5-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:112e305c3314f40c93998e567879e887a3160bb8689ef3d2c04b6cc62c33ac34", size = 1896206, upload-time = "2025-11-04T13:40:27.099Z" }, + { url = "https://files.pythonhosted.org/packages/15/df/a4c740c0943e93e6500f9eb23f4ca7ec9bf71b19e608ae5b579678c8d02f/pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0cbaad15cb0c90aa221d43c00e77bb33c93e8d36e0bf74760cd00e732d10a6a0", size = 1919307, upload-time = "2025-11-04T13:40:29.806Z" }, + { url = "https://files.pythonhosted.org/packages/9a/e3/6324802931ae1d123528988e0e86587c2072ac2e5394b4bc2bc34b61ff6e/pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:03ca43e12fab6023fc79d28ca6b39b05f794ad08ec2feccc59a339b02f2b3d33", size = 2063258, upload-time = "2025-11-04T13:40:33.544Z" }, + { url = "https://files.pythonhosted.org/packages/c9/d4/2230d7151d4957dd79c3044ea26346c148c98fbf0ee6ebd41056f2d62ab5/pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:dc799088c08fa04e43144b164feb0c13f9a0bc40503f8df3e9fde58a3c0c101e", size = 2214917, upload-time = "2025-11-04T13:40:35.479Z" }, + { url = "https://files.pythonhosted.org/packages/e6/9f/eaac5df17a3672fef0081b6c1bb0b82b33ee89aa5cec0d7b05f52fd4a1fa/pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:97aeba56665b4c3235a0e52b2c2f5ae9cd071b8a8310ad27bddb3f7fb30e9aa2", size = 2332186, upload-time = "2025-11-04T13:40:37.436Z" }, + { url = "https://files.pythonhosted.org/packages/cf/4e/35a80cae583a37cf15604b44240e45c05e04e86f9cfd766623149297e971/pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:406bf18d345822d6c21366031003612b9c77b3e29ffdb0f612367352aab7d586", size = 2073164, upload-time = "2025-11-04T13:40:40.289Z" }, + { url = "https://files.pythonhosted.org/packages/bf/e3/f6e262673c6140dd3305d144d032f7bd5f7497d3871c1428521f19f9efa2/pydantic_core-2.41.5-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b93590ae81f7010dbe380cdeab6f515902ebcbefe0b9327cc4804d74e93ae69d", size = 2179146, upload-time = "2025-11-04T13:40:42.809Z" }, + { url = "https://files.pythonhosted.org/packages/75/c7/20bd7fc05f0c6ea2056a4565c6f36f8968c0924f19b7d97bbfea55780e73/pydantic_core-2.41.5-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:01a3d0ab748ee531f4ea6c3e48ad9dac84ddba4b0d82291f87248f2f9de8d740", size = 2137788, upload-time = "2025-11-04T13:40:44.752Z" }, + { url = "https://files.pythonhosted.org/packages/3a/8d/34318ef985c45196e004bc46c6eab2eda437e744c124ef0dbe1ff2c9d06b/pydantic_core-2.41.5-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:6561e94ba9dacc9c61bce40e2d6bdc3bfaa0259d3ff36ace3b1e6901936d2e3e", size = 2340133, upload-time = "2025-11-04T13:40:46.66Z" }, + { url = "https://files.pythonhosted.org/packages/9c/59/013626bf8c78a5a5d9350d12e7697d3d4de951a75565496abd40ccd46bee/pydantic_core-2.41.5-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:915c3d10f81bec3a74fbd4faebe8391013ba61e5a1a8d48c4455b923bdda7858", size = 2324852, upload-time = "2025-11-04T13:40:48.575Z" }, + { url = "https://files.pythonhosted.org/packages/1a/d9/c248c103856f807ef70c18a4f986693a46a8ffe1602e5d361485da502d20/pydantic_core-2.41.5-cp313-cp313-win32.whl", hash = "sha256:650ae77860b45cfa6e2cdafc42618ceafab3a2d9a3811fcfbd3bbf8ac3c40d36", size = 1994679, upload-time = "2025-11-04T13:40:50.619Z" }, + { url = "https://files.pythonhosted.org/packages/9e/8b/341991b158ddab181cff136acd2552c9f35bd30380422a639c0671e99a91/pydantic_core-2.41.5-cp313-cp313-win_amd64.whl", hash = "sha256:79ec52ec461e99e13791ec6508c722742ad745571f234ea6255bed38c6480f11", size = 2019766, upload-time = "2025-11-04T13:40:52.631Z" }, + { url = "https://files.pythonhosted.org/packages/73/7d/f2f9db34af103bea3e09735bb40b021788a5e834c81eedb541991badf8f5/pydantic_core-2.41.5-cp313-cp313-win_arm64.whl", hash = "sha256:3f84d5c1b4ab906093bdc1ff10484838aca54ef08de4afa9de0f5f14d69639cd", size = 1981005, upload-time = "2025-11-04T13:40:54.734Z" }, + { url = "https://files.pythonhosted.org/packages/ea/28/46b7c5c9635ae96ea0fbb779e271a38129df2550f763937659ee6c5dbc65/pydantic_core-2.41.5-cp314-cp314-macosx_10_12_x86_64.whl", hash = "sha256:3f37a19d7ebcdd20b96485056ba9e8b304e27d9904d233d7b1015db320e51f0a", size = 2119622, upload-time = "2025-11-04T13:40:56.68Z" }, + { url = "https://files.pythonhosted.org/packages/74/1a/145646e5687e8d9a1e8d09acb278c8535ebe9e972e1f162ed338a622f193/pydantic_core-2.41.5-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:1d1d9764366c73f996edd17abb6d9d7649a7eb690006ab6adbda117717099b14", size = 1891725, upload-time = "2025-11-04T13:40:58.807Z" }, + { url = "https://files.pythonhosted.org/packages/23/04/e89c29e267b8060b40dca97bfc64a19b2a3cf99018167ea1677d96368273/pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:25e1c2af0fce638d5f1988b686f3b3ea8cd7de5f244ca147c777769e798a9cd1", size = 1915040, upload-time = "2025-11-04T13:41:00.853Z" }, + { url = "https://files.pythonhosted.org/packages/84/a3/15a82ac7bd97992a82257f777b3583d3e84bdb06ba6858f745daa2ec8a85/pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:506d766a8727beef16b7adaeb8ee6217c64fc813646b424d0804d67c16eddb66", size = 2063691, upload-time = "2025-11-04T13:41:03.504Z" }, + { url = "https://files.pythonhosted.org/packages/74/9b/0046701313c6ef08c0c1cf0e028c67c770a4e1275ca73131563c5f2a310a/pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4819fa52133c9aa3c387b3328f25c1facc356491e6135b459f1de698ff64d869", size = 2213897, upload-time = "2025-11-04T13:41:05.804Z" }, + { url = "https://files.pythonhosted.org/packages/8a/cd/6bac76ecd1b27e75a95ca3a9a559c643b3afcd2dd62086d4b7a32a18b169/pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2b761d210c9ea91feda40d25b4efe82a1707da2ef62901466a42492c028553a2", size = 2333302, upload-time = "2025-11-04T13:41:07.809Z" }, + { url = "https://files.pythonhosted.org/packages/4c/d2/ef2074dc020dd6e109611a8be4449b98cd25e1b9b8a303c2f0fca2f2bcf7/pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:22f0fb8c1c583a3b6f24df2470833b40207e907b90c928cc8d3594b76f874375", size = 2064877, upload-time = "2025-11-04T13:41:09.827Z" }, + { url = "https://files.pythonhosted.org/packages/18/66/e9db17a9a763d72f03de903883c057b2592c09509ccfe468187f2a2eef29/pydantic_core-2.41.5-cp314-cp314-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2782c870e99878c634505236d81e5443092fba820f0373997ff75f90f68cd553", size = 2180680, upload-time = "2025-11-04T13:41:12.379Z" }, + { url = "https://files.pythonhosted.org/packages/d3/9e/3ce66cebb929f3ced22be85d4c2399b8e85b622db77dad36b73c5387f8f8/pydantic_core-2.41.5-cp314-cp314-musllinux_1_1_aarch64.whl", hash = "sha256:0177272f88ab8312479336e1d777f6b124537d47f2123f89cb37e0accea97f90", size = 2138960, upload-time = "2025-11-04T13:41:14.627Z" }, + { url = "https://files.pythonhosted.org/packages/a6/62/205a998f4327d2079326b01abee48e502ea739d174f0a89295c481a2272e/pydantic_core-2.41.5-cp314-cp314-musllinux_1_1_armv7l.whl", hash = "sha256:63510af5e38f8955b8ee5687740d6ebf7c2a0886d15a6d65c32814613681bc07", size = 2339102, upload-time = "2025-11-04T13:41:16.868Z" }, + { url = "https://files.pythonhosted.org/packages/3c/0d/f05e79471e889d74d3d88f5bd20d0ed189ad94c2423d81ff8d0000aab4ff/pydantic_core-2.41.5-cp314-cp314-musllinux_1_1_x86_64.whl", hash = "sha256:e56ba91f47764cc14f1daacd723e3e82d1a89d783f0f5afe9c364b8bb491ccdb", size = 2326039, upload-time = "2025-11-04T13:41:18.934Z" }, + { url = "https://files.pythonhosted.org/packages/ec/e1/e08a6208bb100da7e0c4b288eed624a703f4d129bde2da475721a80cab32/pydantic_core-2.41.5-cp314-cp314-win32.whl", hash = "sha256:aec5cf2fd867b4ff45b9959f8b20ea3993fc93e63c7363fe6851424c8a7e7c23", size = 1995126, upload-time = "2025-11-04T13:41:21.418Z" }, + { url = "https://files.pythonhosted.org/packages/48/5d/56ba7b24e9557f99c9237e29f5c09913c81eeb2f3217e40e922353668092/pydantic_core-2.41.5-cp314-cp314-win_amd64.whl", hash = "sha256:8e7c86f27c585ef37c35e56a96363ab8de4e549a95512445b85c96d3e2f7c1bf", size = 2015489, upload-time = "2025-11-04T13:41:24.076Z" }, + { url = "https://files.pythonhosted.org/packages/4e/bb/f7a190991ec9e3e0ba22e4993d8755bbc4a32925c0b5b42775c03e8148f9/pydantic_core-2.41.5-cp314-cp314-win_arm64.whl", hash = "sha256:e672ba74fbc2dc8eea59fb6d4aed6845e6905fc2a8afe93175d94a83ba2a01a0", size = 1977288, upload-time = "2025-11-04T13:41:26.33Z" }, + { url = "https://files.pythonhosted.org/packages/92/ed/77542d0c51538e32e15afe7899d79efce4b81eee631d99850edc2f5e9349/pydantic_core-2.41.5-cp314-cp314t-macosx_10_12_x86_64.whl", hash = "sha256:8566def80554c3faa0e65ac30ab0932b9e3a5cd7f8323764303d468e5c37595a", size = 2120255, upload-time = "2025-11-04T13:41:28.569Z" }, + { url = "https://files.pythonhosted.org/packages/bb/3d/6913dde84d5be21e284439676168b28d8bbba5600d838b9dca99de0fad71/pydantic_core-2.41.5-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:b80aa5095cd3109962a298ce14110ae16b8c1aece8b72f9dafe81cf597ad80b3", size = 1863760, upload-time = "2025-11-04T13:41:31.055Z" }, + { url = "https://files.pythonhosted.org/packages/5a/f0/e5e6b99d4191da102f2b0eb9687aaa7f5bea5d9964071a84effc3e40f997/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3006c3dd9ba34b0c094c544c6006cc79e87d8612999f1a5d43b769b89181f23c", size = 1878092, upload-time = "2025-11-04T13:41:33.21Z" }, + { url = "https://files.pythonhosted.org/packages/71/48/36fb760642d568925953bcc8116455513d6e34c4beaa37544118c36aba6d/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:72f6c8b11857a856bcfa48c86f5368439f74453563f951e473514579d44aa612", size = 2053385, upload-time = "2025-11-04T13:41:35.508Z" }, + { url = "https://files.pythonhosted.org/packages/20/25/92dc684dd8eb75a234bc1c764b4210cf2646479d54b47bf46061657292a8/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5cb1b2f9742240e4bb26b652a5aeb840aa4b417c7748b6f8387927bc6e45e40d", size = 2218832, upload-time = "2025-11-04T13:41:37.732Z" }, + { url = "https://files.pythonhosted.org/packages/e2/09/f53e0b05023d3e30357d82eb35835d0f6340ca344720a4599cd663dca599/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bd3d54f38609ff308209bd43acea66061494157703364ae40c951f83ba99a1a9", size = 2327585, upload-time = "2025-11-04T13:41:40Z" }, + { url = "https://files.pythonhosted.org/packages/aa/4e/2ae1aa85d6af35a39b236b1b1641de73f5a6ac4d5a7509f77b814885760c/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2ff4321e56e879ee8d2a879501c8e469414d948f4aba74a2d4593184eb326660", size = 2041078, upload-time = "2025-11-04T13:41:42.323Z" }, + { url = "https://files.pythonhosted.org/packages/cd/13/2e215f17f0ef326fc72afe94776edb77525142c693767fc347ed6288728d/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d0d2568a8c11bf8225044aa94409e21da0cb09dcdafe9ecd10250b2baad531a9", size = 2173914, upload-time = "2025-11-04T13:41:45.221Z" }, + { url = "https://files.pythonhosted.org/packages/02/7a/f999a6dcbcd0e5660bc348a3991c8915ce6599f4f2c6ac22f01d7a10816c/pydantic_core-2.41.5-cp314-cp314t-musllinux_1_1_aarch64.whl", hash = "sha256:a39455728aabd58ceabb03c90e12f71fd30fa69615760a075b9fec596456ccc3", size = 2129560, upload-time = "2025-11-04T13:41:47.474Z" }, + { url = "https://files.pythonhosted.org/packages/3a/b1/6c990ac65e3b4c079a4fb9f5b05f5b013afa0f4ed6780a3dd236d2cbdc64/pydantic_core-2.41.5-cp314-cp314t-musllinux_1_1_armv7l.whl", hash = "sha256:239edca560d05757817c13dc17c50766136d21f7cd0fac50295499ae24f90fdf", size = 2329244, upload-time = "2025-11-04T13:41:49.992Z" }, + { url = "https://files.pythonhosted.org/packages/d9/02/3c562f3a51afd4d88fff8dffb1771b30cfdfd79befd9883ee094f5b6c0d8/pydantic_core-2.41.5-cp314-cp314t-musllinux_1_1_x86_64.whl", hash = "sha256:2a5e06546e19f24c6a96a129142a75cee553cc018ffee48a460059b1185f4470", size = 2331955, upload-time = "2025-11-04T13:41:54.079Z" }, + { url = "https://files.pythonhosted.org/packages/5c/96/5fb7d8c3c17bc8c62fdb031c47d77a1af698f1d7a406b0f79aaa1338f9ad/pydantic_core-2.41.5-cp314-cp314t-win32.whl", hash = "sha256:b4ececa40ac28afa90871c2cc2b9ffd2ff0bf749380fbdf57d165fd23da353aa", size = 1988906, upload-time = "2025-11-04T13:41:56.606Z" }, + { url = "https://files.pythonhosted.org/packages/22/ed/182129d83032702912c2e2d8bbe33c036f342cc735737064668585dac28f/pydantic_core-2.41.5-cp314-cp314t-win_amd64.whl", hash = "sha256:80aa89cad80b32a912a65332f64a4450ed00966111b6615ca6816153d3585a8c", size = 1981607, upload-time = "2025-11-04T13:41:58.889Z" }, + { url = "https://files.pythonhosted.org/packages/9f/ed/068e41660b832bb0b1aa5b58011dea2a3fe0ba7861ff38c4d4904c1c1a99/pydantic_core-2.41.5-cp314-cp314t-win_arm64.whl", hash = "sha256:35b44f37a3199f771c3eaa53051bc8a70cd7b54f333531c59e29fd4db5d15008", size = 1974769, upload-time = "2025-11-04T13:42:01.186Z" }, + { url = "https://files.pythonhosted.org/packages/11/72/90fda5ee3b97e51c494938a4a44c3a35a9c96c19bba12372fb9c634d6f57/pydantic_core-2.41.5-graalpy311-graalpy242_311_native-macosx_10_12_x86_64.whl", hash = "sha256:b96d5f26b05d03cc60f11a7761a5ded1741da411e7fe0909e27a5e6a0cb7b034", size = 2115441, upload-time = "2025-11-04T13:42:39.557Z" }, + { url = "https://files.pythonhosted.org/packages/1f/53/8942f884fa33f50794f119012dc6a1a02ac43a56407adaac20463df8e98f/pydantic_core-2.41.5-graalpy311-graalpy242_311_native-macosx_11_0_arm64.whl", hash = "sha256:634e8609e89ceecea15e2d61bc9ac3718caaaa71963717bf3c8f38bfde64242c", size = 1930291, upload-time = "2025-11-04T13:42:42.169Z" }, + { url = "https://files.pythonhosted.org/packages/79/c8/ecb9ed9cd942bce09fc888ee960b52654fbdbede4ba6c2d6e0d3b1d8b49c/pydantic_core-2.41.5-graalpy311-graalpy242_311_native-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:93e8740d7503eb008aa2df04d3b9735f845d43ae845e6dcd2be0b55a2da43cd2", size = 1948632, upload-time = "2025-11-04T13:42:44.564Z" }, + { url = "https://files.pythonhosted.org/packages/2e/1b/687711069de7efa6af934e74f601e2a4307365e8fdc404703afc453eab26/pydantic_core-2.41.5-graalpy311-graalpy242_311_native-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f15489ba13d61f670dcc96772e733aad1a6f9c429cc27574c6cdaed82d0146ad", size = 2138905, upload-time = "2025-11-04T13:42:47.156Z" }, + { url = "https://files.pythonhosted.org/packages/09/32/59b0c7e63e277fa7911c2fc70ccfb45ce4b98991e7ef37110663437005af/pydantic_core-2.41.5-graalpy312-graalpy250_312_native-macosx_10_12_x86_64.whl", hash = "sha256:7da7087d756b19037bc2c06edc6c170eeef3c3bafcb8f532ff17d64dc427adfd", size = 2110495, upload-time = "2025-11-04T13:42:49.689Z" }, + { url = "https://files.pythonhosted.org/packages/aa/81/05e400037eaf55ad400bcd318c05bb345b57e708887f07ddb2d20e3f0e98/pydantic_core-2.41.5-graalpy312-graalpy250_312_native-macosx_11_0_arm64.whl", hash = "sha256:aabf5777b5c8ca26f7824cb4a120a740c9588ed58df9b2d196ce92fba42ff8dc", size = 1915388, upload-time = "2025-11-04T13:42:52.215Z" }, + { url = "https://files.pythonhosted.org/packages/6e/0d/e3549b2399f71d56476b77dbf3cf8937cec5cd70536bdc0e374a421d0599/pydantic_core-2.41.5-graalpy312-graalpy250_312_native-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c007fe8a43d43b3969e8469004e9845944f1a80e6acd47c150856bb87f230c56", size = 1942879, upload-time = "2025-11-04T13:42:56.483Z" }, + { url = "https://files.pythonhosted.org/packages/f7/07/34573da085946b6a313d7c42f82f16e8920bfd730665de2d11c0c37a74b5/pydantic_core-2.41.5-graalpy312-graalpy250_312_native-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:76d0819de158cd855d1cbb8fcafdf6f5cf1eb8e470abe056d5d161106e38062b", size = 2139017, upload-time = "2025-11-04T13:42:59.471Z" }, + { url = "https://files.pythonhosted.org/packages/e6/b0/1a2aa41e3b5a4ba11420aba2d091b2d17959c8d1519ece3627c371951e73/pydantic_core-2.41.5-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:b5819cd790dbf0c5eb9f82c73c16b39a65dd6dd4d1439dcdea7816ec9adddab8", size = 2103351, upload-time = "2025-11-04T13:43:02.058Z" }, + { url = "https://files.pythonhosted.org/packages/a4/ee/31b1f0020baaf6d091c87900ae05c6aeae101fa4e188e1613c80e4f1ea31/pydantic_core-2.41.5-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:5a4e67afbc95fa5c34cf27d9089bca7fcab4e51e57278d710320a70b956d1b9a", size = 1925363, upload-time = "2025-11-04T13:43:05.159Z" }, + { url = "https://files.pythonhosted.org/packages/e1/89/ab8e86208467e467a80deaca4e434adac37b10a9d134cd2f99b28a01e483/pydantic_core-2.41.5-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ece5c59f0ce7d001e017643d8d24da587ea1f74f6993467d85ae8a5ef9d4f42b", size = 2135615, upload-time = "2025-11-04T13:43:08.116Z" }, + { url = "https://files.pythonhosted.org/packages/99/0a/99a53d06dd0348b2008f2f30884b34719c323f16c3be4e6cc1203b74a91d/pydantic_core-2.41.5-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:16f80f7abe3351f8ea6858914ddc8c77e02578544a0ebc15b4c2e1a0e813b0b2", size = 2175369, upload-time = "2025-11-04T13:43:12.49Z" }, + { url = "https://files.pythonhosted.org/packages/6d/94/30ca3b73c6d485b9bb0bc66e611cff4a7138ff9736b7e66bcf0852151636/pydantic_core-2.41.5-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:33cb885e759a705b426baada1fe68cbb0a2e68e34c5d0d0289a364cf01709093", size = 2144218, upload-time = "2025-11-04T13:43:15.431Z" }, + { url = "https://files.pythonhosted.org/packages/87/57/31b4f8e12680b739a91f472b5671294236b82586889ef764b5fbc6669238/pydantic_core-2.41.5-pp310-pypy310_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:c8d8b4eb992936023be7dee581270af5c6e0697a8559895f527f5b7105ecd36a", size = 2329951, upload-time = "2025-11-04T13:43:18.062Z" }, + { url = "https://files.pythonhosted.org/packages/7d/73/3c2c8edef77b8f7310e6fb012dbc4b8551386ed575b9eb6fb2506e28a7eb/pydantic_core-2.41.5-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:242a206cd0318f95cd21bdacff3fcc3aab23e79bba5cac3db5a841c9ef9c6963", size = 2318428, upload-time = "2025-11-04T13:43:20.679Z" }, + { url = "https://files.pythonhosted.org/packages/2f/02/8559b1f26ee0d502c74f9cca5c0d2fd97e967e083e006bbbb4e97f3a043a/pydantic_core-2.41.5-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:d3a978c4f57a597908b7e697229d996d77a6d3c94901e9edee593adada95ce1a", size = 2147009, upload-time = "2025-11-04T13:43:23.286Z" }, + { url = "https://files.pythonhosted.org/packages/5f/9b/1b3f0e9f9305839d7e84912f9e8bfbd191ed1b1ef48083609f0dabde978c/pydantic_core-2.41.5-pp311-pypy311_pp73-macosx_10_12_x86_64.whl", hash = "sha256:b2379fa7ed44ddecb5bfe4e48577d752db9fc10be00a6b7446e9663ba143de26", size = 2101980, upload-time = "2025-11-04T13:43:25.97Z" }, + { url = "https://files.pythonhosted.org/packages/a4/ed/d71fefcb4263df0da6a85b5d8a7508360f2f2e9b3bf5814be9c8bccdccc1/pydantic_core-2.41.5-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:266fb4cbf5e3cbd0b53669a6d1b039c45e3ce651fd5442eff4d07c2cc8d66808", size = 1923865, upload-time = "2025-11-04T13:43:28.763Z" }, + { url = "https://files.pythonhosted.org/packages/ce/3a/626b38db460d675f873e4444b4bb030453bbe7b4ba55df821d026a0493c4/pydantic_core-2.41.5-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:58133647260ea01e4d0500089a8c4f07bd7aa6ce109682b1426394988d8aaacc", size = 2134256, upload-time = "2025-11-04T13:43:31.71Z" }, + { url = "https://files.pythonhosted.org/packages/83/d9/8412d7f06f616bbc053d30cb4e5f76786af3221462ad5eee1f202021eb4e/pydantic_core-2.41.5-pp311-pypy311_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:287dad91cfb551c363dc62899a80e9e14da1f0e2b6ebde82c806612ca2a13ef1", size = 2174762, upload-time = "2025-11-04T13:43:34.744Z" }, + { url = "https://files.pythonhosted.org/packages/55/4c/162d906b8e3ba3a99354e20faa1b49a85206c47de97a639510a0e673f5da/pydantic_core-2.41.5-pp311-pypy311_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:03b77d184b9eb40240ae9fd676ca364ce1085f203e1b1256f8ab9984dca80a84", size = 2143141, upload-time = "2025-11-04T13:43:37.701Z" }, + { url = "https://files.pythonhosted.org/packages/1f/f2/f11dd73284122713f5f89fc940f370d035fa8e1e078d446b3313955157fe/pydantic_core-2.41.5-pp311-pypy311_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:a668ce24de96165bb239160b3d854943128f4334822900534f2fe947930e5770", size = 2330317, upload-time = "2025-11-04T13:43:40.406Z" }, + { url = "https://files.pythonhosted.org/packages/88/9d/b06ca6acfe4abb296110fb1273a4d848a0bfb2ff65f3ee92127b3244e16b/pydantic_core-2.41.5-pp311-pypy311_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f14f8f046c14563f8eb3f45f499cc658ab8d10072961e07225e507adb700e93f", size = 2316992, upload-time = "2025-11-04T13:43:43.602Z" }, + { url = "https://files.pythonhosted.org/packages/36/c7/cfc8e811f061c841d7990b0201912c3556bfeb99cdcb7ed24adc8d6f8704/pydantic_core-2.41.5-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:56121965f7a4dc965bff783d70b907ddf3d57f6eba29b6d2e5dabfaf07799c51", size = 2145302, upload-time = "2025-11-04T13:43:46.64Z" }, +] + +[[package]] +name = "pygments" +version = "2.20.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/c3/b2/bc9c9196916376152d655522fdcebac55e66de6603a76a02bca1b6414f6c/pygments-2.20.0.tar.gz", hash = "sha256:6757cd03768053ff99f3039c1a36d6c0aa0b263438fcab17520b30a303a82b5f", size = 4955991, upload-time = "2026-03-29T13:29:33.898Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f4/7e/a72dd26f3b0f4f2bf1dd8923c85f7ceb43172af56d63c7383eb62b332364/pygments-2.20.0-py3-none-any.whl", hash = "sha256:81a9e26dd42fd28a23a2d169d86d7ac03b46e2f8b59ed4698fb4785f946d0176", size = 1231151, upload-time = "2026-03-29T13:29:30.038Z" }, +] + +[[package]] +name = "pytest" +version = "9.0.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "colorama", marker = "sys_platform == 'win32'" }, + { name = "exceptiongroup", marker = "python_full_version < '3.11'" }, + { name = "iniconfig" }, + { name = "packaging" }, + { name = "pluggy" }, + { name = "pygments" }, + { name = "tomli", marker = "python_full_version < '3.11'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/d1/db/7ef3487e0fb0049ddb5ce41d3a49c235bf9ad299b6a25d5780a89f19230f/pytest-9.0.2.tar.gz", hash = "sha256:75186651a92bd89611d1d9fc20f0b4345fd827c41ccd5c299a868a05d70edf11", size = 1568901, upload-time = "2025-12-06T21:30:51.014Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/3b/ab/b3226f0bd7cdcf710fbede2b3548584366da3b19b5021e74f5bde2a8fa3f/pytest-9.0.2-py3-none-any.whl", hash = "sha256:711ffd45bf766d5264d487b917733b453d917afd2b0ad65223959f59089f875b", size = 374801, upload-time = "2025-12-06T21:30:49.154Z" }, +] + +[[package]] +name = "requests" +version = "2.33.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "certifi" }, + { name = "charset-normalizer" }, + { name = "idna" }, + { name = "urllib3" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/5f/a4/98b9c7c6428a668bf7e42ebb7c79d576a1c3c1e3ae2d47e674b468388871/requests-2.33.1.tar.gz", hash = "sha256:18817f8c57c6263968bc123d237e3b8b08ac046f5456bd1e307ee8f4250d3517", size = 134120, upload-time = "2026-03-30T16:09:15.531Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d7/8e/7540e8a2036f79a125c1d2ebadf69ed7901608859186c856fa0388ef4197/requests-2.33.1-py3-none-any.whl", hash = "sha256:4e6d1ef462f3626a1f0a0a9c42dd93c63bad33f9f1c1937509b8c5c8718ab56a", size = 64947, upload-time = "2026-03-30T16:09:13.83Z" }, +] + +[[package]] +name = "sniffio" +version = "1.3.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a2/87/a6771e1546d97e7e041b6ae58d80074f81b7d5121207425c964ddf5cfdbd/sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc", size = 20372, upload-time = "2024-02-25T23:20:04.057Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e9/44/75a9c9421471a6c4805dbf2356f7c181a29c1879239abab1ea2cc8f38b40/sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2", size = 10235, upload-time = "2024-02-25T23:20:01.196Z" }, +] + +[[package]] +name = "starlette" +version = "1.0.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "anyio" }, + { name = "typing-extensions", marker = "python_full_version < '3.13'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/81/69/17425771797c36cded50b7fe44e850315d039f28b15901ab44839e70b593/starlette-1.0.0.tar.gz", hash = "sha256:6a4beaf1f81bb472fd19ea9b918b50dc3a77a6f2e190a12954b25e6ed5eea149", size = 2655289, upload-time = "2026-03-22T18:29:46.779Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0b/c9/584bc9651441b4ba60cc4d557d8a547b5aff901af35bda3a4ee30c819b82/starlette-1.0.0-py3-none-any.whl", hash = "sha256:d3ec55e0bb321692d275455ddfd3df75fff145d009685eb40dc91fc66b03d38b", size = 72651, upload-time = "2026-03-22T18:29:45.111Z" }, +] + +[[package]] +name = "tomli" +version = "2.4.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/22/de/48c59722572767841493b26183a0d1cc411d54fd759c5607c4590b6563a6/tomli-2.4.1.tar.gz", hash = "sha256:7c7e1a961a0b2f2472c1ac5b69affa0ae1132c39adcb67aba98568702b9cc23f", size = 17543, upload-time = "2026-03-25T20:22:03.828Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f4/11/db3d5885d8528263d8adc260bb2d28ebf1270b96e98f0e0268d32b8d9900/tomli-2.4.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:f8f0fc26ec2cc2b965b7a3b87cd19c5c6b8c5e5f436b984e85f486d652285c30", size = 154704, upload-time = "2026-03-25T20:21:10.473Z" }, + { url = "https://files.pythonhosted.org/packages/6d/f7/675db52c7e46064a9aa928885a9b20f4124ecb9bc2e1ce74c9106648d202/tomli-2.4.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4ab97e64ccda8756376892c53a72bd1f964e519c77236368527f758fbc36a53a", size = 149454, upload-time = "2026-03-25T20:21:12.036Z" }, + { url = "https://files.pythonhosted.org/packages/61/71/81c50943cf953efa35bce7646caab3cf457a7d8c030b27cfb40d7235f9ee/tomli-2.4.1-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:96481a5786729fd470164b47cdb3e0e58062a496f455ee41b4403be77cb5a076", size = 237561, upload-time = "2026-03-25T20:21:13.098Z" }, + { url = "https://files.pythonhosted.org/packages/48/c1/f41d9cb618acccca7df82aaf682f9b49013c9397212cb9f53219e3abac37/tomli-2.4.1-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:5a881ab208c0baf688221f8cecc5401bd291d67e38a1ac884d6736cbcd8247e9", size = 243824, upload-time = "2026-03-25T20:21:14.569Z" }, + { url = "https://files.pythonhosted.org/packages/22/e4/5a816ecdd1f8ca51fb756ef684b90f2780afc52fc67f987e3c61d800a46d/tomli-2.4.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:47149d5bd38761ac8be13a84864bf0b7b70bc051806bc3669ab1cbc56216b23c", size = 242227, upload-time = "2026-03-25T20:21:15.712Z" }, + { url = "https://files.pythonhosted.org/packages/6b/49/2b2a0ef529aa6eec245d25f0c703e020a73955ad7edf73e7f54ddc608aa5/tomli-2.4.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:ec9bfaf3ad2df51ace80688143a6a4ebc09a248f6ff781a9945e51937008fcbc", size = 247859, upload-time = "2026-03-25T20:21:17.001Z" }, + { url = "https://files.pythonhosted.org/packages/83/bd/6c1a630eaca337e1e78c5903104f831bda934c426f9231429396ce3c3467/tomli-2.4.1-cp311-cp311-win32.whl", hash = "sha256:ff2983983d34813c1aeb0fa89091e76c3a22889ee83ab27c5eeb45100560c049", size = 97204, upload-time = "2026-03-25T20:21:18.079Z" }, + { url = "https://files.pythonhosted.org/packages/42/59/71461df1a885647e10b6bb7802d0b8e66480c61f3f43079e0dcd315b3954/tomli-2.4.1-cp311-cp311-win_amd64.whl", hash = "sha256:5ee18d9ebdb417e384b58fe414e8d6af9f4e7a0ae761519fb50f721de398dd4e", size = 108084, upload-time = "2026-03-25T20:21:18.978Z" }, + { url = "https://files.pythonhosted.org/packages/b8/83/dceca96142499c069475b790e7913b1044c1a4337e700751f48ed723f883/tomli-2.4.1-cp311-cp311-win_arm64.whl", hash = "sha256:c2541745709bad0264b7d4705ad453b76ccd191e64aa6f0fc66b69a293a45ece", size = 95285, upload-time = "2026-03-25T20:21:20.309Z" }, + { url = "https://files.pythonhosted.org/packages/c1/ba/42f134a3fe2b370f555f44b1d72feebb94debcab01676bf918d0cb70e9aa/tomli-2.4.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:c742f741d58a28940ce01d58f0ab2ea3ced8b12402f162f4d534dfe18ba1cd6a", size = 155924, upload-time = "2026-03-25T20:21:21.626Z" }, + { url = "https://files.pythonhosted.org/packages/dc/c7/62d7a17c26487ade21c5422b646110f2162f1fcc95980ef7f63e73c68f14/tomli-2.4.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:7f86fd587c4ed9dd76f318225e7d9b29cfc5a9d43de44e5754db8d1128487085", size = 150018, upload-time = "2026-03-25T20:21:23.002Z" }, + { url = "https://files.pythonhosted.org/packages/5c/05/79d13d7c15f13bdef410bdd49a6485b1c37d28968314eabee452c22a7fda/tomli-2.4.1-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ff18e6a727ee0ab0388507b89d1bc6a22b138d1e2fa56d1ad494586d61d2eae9", size = 244948, upload-time = "2026-03-25T20:21:24.04Z" }, + { url = "https://files.pythonhosted.org/packages/10/90/d62ce007a1c80d0b2c93e02cab211224756240884751b94ca72df8a875ca/tomli-2.4.1-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:136443dbd7e1dee43c68ac2694fde36b2849865fa258d39bf822c10e8068eac5", size = 253341, upload-time = "2026-03-25T20:21:25.177Z" }, + { url = "https://files.pythonhosted.org/packages/1a/7e/caf6496d60152ad4ed09282c1885cca4eea150bfd007da84aea07bcc0a3e/tomli-2.4.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:5e262d41726bc187e69af7825504c933b6794dc3fbd5945e41a79bb14c31f585", size = 248159, upload-time = "2026-03-25T20:21:26.364Z" }, + { url = "https://files.pythonhosted.org/packages/99/e7/c6f69c3120de34bbd882c6fba7975f3d7a746e9218e56ab46a1bc4b42552/tomli-2.4.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:5cb41aa38891e073ee49d55fbc7839cfdb2bc0e600add13874d048c94aadddd1", size = 253290, upload-time = "2026-03-25T20:21:27.46Z" }, + { url = "https://files.pythonhosted.org/packages/d6/2f/4a3c322f22c5c66c4b836ec58211641a4067364f5dcdd7b974b4c5da300c/tomli-2.4.1-cp312-cp312-win32.whl", hash = "sha256:da25dc3563bff5965356133435b757a795a17b17d01dbc0f42fb32447ddfd917", size = 98141, upload-time = "2026-03-25T20:21:28.492Z" }, + { url = "https://files.pythonhosted.org/packages/24/22/4daacd05391b92c55759d55eaee21e1dfaea86ce5c571f10083360adf534/tomli-2.4.1-cp312-cp312-win_amd64.whl", hash = "sha256:52c8ef851d9a240f11a88c003eacb03c31fc1c9c4ec64a99a0f922b93874fda9", size = 108847, upload-time = "2026-03-25T20:21:29.386Z" }, + { url = "https://files.pythonhosted.org/packages/68/fd/70e768887666ddd9e9f5d85129e84910f2db2796f9096aa02b721a53098d/tomli-2.4.1-cp312-cp312-win_arm64.whl", hash = "sha256:f758f1b9299d059cc3f6546ae2af89670cb1c4d48ea29c3cacc4fe7de3058257", size = 95088, upload-time = "2026-03-25T20:21:30.677Z" }, + { url = "https://files.pythonhosted.org/packages/07/06/b823a7e818c756d9a7123ba2cda7d07bc2dd32835648d1a7b7b7a05d848d/tomli-2.4.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:36d2bd2ad5fb9eaddba5226aa02c8ec3fa4f192631e347b3ed28186d43be6b54", size = 155866, upload-time = "2026-03-25T20:21:31.65Z" }, + { url = "https://files.pythonhosted.org/packages/14/6f/12645cf7f08e1a20c7eb8c297c6f11d31c1b50f316a7e7e1e1de6e2e7b7e/tomli-2.4.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:eb0dc4e38e6a1fd579e5d50369aa2e10acfc9cace504579b2faabb478e76941a", size = 149887, upload-time = "2026-03-25T20:21:33.028Z" }, + { url = "https://files.pythonhosted.org/packages/5c/e0/90637574e5e7212c09099c67ad349b04ec4d6020324539297b634a0192b0/tomli-2.4.1-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c7f2c7f2b9ca6bdeef8f0fa897f8e05085923eb091721675170254cbc5b02897", size = 243704, upload-time = "2026-03-25T20:21:34.51Z" }, + { url = "https://files.pythonhosted.org/packages/10/8f/d3ddb16c5a4befdf31a23307f72828686ab2096f068eaf56631e136c1fdd/tomli-2.4.1-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:f3c6818a1a86dd6dca7ddcaaf76947d5ba31aecc28cb1b67009a5877c9a64f3f", size = 251628, upload-time = "2026-03-25T20:21:36.012Z" }, + { url = "https://files.pythonhosted.org/packages/e3/f1/dbeeb9116715abee2485bf0a12d07a8f31af94d71608c171c45f64c0469d/tomli-2.4.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:d312ef37c91508b0ab2cee7da26ec0b3ed2f03ce12bd87a588d771ae15dcf82d", size = 247180, upload-time = "2026-03-25T20:21:37.136Z" }, + { url = "https://files.pythonhosted.org/packages/d3/74/16336ffd19ed4da28a70959f92f506233bd7cfc2332b20bdb01591e8b1d1/tomli-2.4.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:51529d40e3ca50046d7606fa99ce3956a617f9b36380da3b7f0dd3dd28e68cb5", size = 251674, upload-time = "2026-03-25T20:21:38.298Z" }, + { url = "https://files.pythonhosted.org/packages/16/f9/229fa3434c590ddf6c0aa9af64d3af4b752540686cace29e6281e3458469/tomli-2.4.1-cp313-cp313-win32.whl", hash = "sha256:2190f2e9dd7508d2a90ded5ed369255980a1bcdd58e52f7fe24b8162bf9fedbd", size = 97976, upload-time = "2026-03-25T20:21:39.316Z" }, + { url = "https://files.pythonhosted.org/packages/6a/1e/71dfd96bcc1c775420cb8befe7a9d35f2e5b1309798f009dca17b7708c1e/tomli-2.4.1-cp313-cp313-win_amd64.whl", hash = "sha256:8d65a2fbf9d2f8352685bc1364177ee3923d6baf5e7f43ea4959d7d8bc326a36", size = 108755, upload-time = "2026-03-25T20:21:40.248Z" }, + { url = "https://files.pythonhosted.org/packages/83/7a/d34f422a021d62420b78f5c538e5b102f62bea616d1d75a13f0a88acb04a/tomli-2.4.1-cp313-cp313-win_arm64.whl", hash = "sha256:4b605484e43cdc43f0954ddae319fb75f04cc10dd80d830540060ee7cd0243cd", size = 95265, upload-time = "2026-03-25T20:21:41.219Z" }, + { url = "https://files.pythonhosted.org/packages/3c/fb/9a5c8d27dbab540869f7c1f8eb0abb3244189ce780ba9cd73f3770662072/tomli-2.4.1-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:fd0409a3653af6c147209d267a0e4243f0ae46b011aa978b1080359fddc9b6cf", size = 155726, upload-time = "2026-03-25T20:21:42.23Z" }, + { url = "https://files.pythonhosted.org/packages/62/05/d2f816630cc771ad836af54f5001f47a6f611d2d39535364f148b6a92d6b/tomli-2.4.1-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:a120733b01c45e9a0c34aeef92bf0cf1d56cfe81ed9d47d562f9ed591a9828ac", size = 149859, upload-time = "2026-03-25T20:21:43.386Z" }, + { url = "https://files.pythonhosted.org/packages/ce/48/66341bdb858ad9bd0ceab5a86f90eddab127cf8b046418009f2125630ecb/tomli-2.4.1-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:559db847dc486944896521f68d8190be1c9e719fced785720d2216fe7022b662", size = 244713, upload-time = "2026-03-25T20:21:44.474Z" }, + { url = "https://files.pythonhosted.org/packages/df/6d/c5fad00d82b3c7a3ab6189bd4b10e60466f22cfe8a08a9394185c8a8111c/tomli-2.4.1-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:01f520d4f53ef97964a240a035ec2a869fe1a37dde002b57ebc4417a27ccd853", size = 252084, upload-time = "2026-03-25T20:21:45.62Z" }, + { url = "https://files.pythonhosted.org/packages/00/71/3a69e86f3eafe8c7a59d008d245888051005bd657760e96d5fbfb0b740c2/tomli-2.4.1-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:7f94b27a62cfad8496c8d2513e1a222dd446f095fca8987fceef261225538a15", size = 247973, upload-time = "2026-03-25T20:21:46.937Z" }, + { url = "https://files.pythonhosted.org/packages/67/50/361e986652847fec4bd5e4a0208752fbe64689c603c7ae5ea7cb16b1c0ca/tomli-2.4.1-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:ede3e6487c5ef5d28634ba3f31f989030ad6af71edfb0055cbbd14189ff240ba", size = 256223, upload-time = "2026-03-25T20:21:48.467Z" }, + { url = "https://files.pythonhosted.org/packages/8c/9a/b4173689a9203472e5467217e0154b00e260621caa227b6fa01feab16998/tomli-2.4.1-cp314-cp314-win32.whl", hash = "sha256:3d48a93ee1c9b79c04bb38772ee1b64dcf18ff43085896ea460ca8dec96f35f6", size = 98973, upload-time = "2026-03-25T20:21:49.526Z" }, + { url = "https://files.pythonhosted.org/packages/14/58/640ac93bf230cd27d002462c9af0d837779f8773bc03dee06b5835208214/tomli-2.4.1-cp314-cp314-win_amd64.whl", hash = "sha256:88dceee75c2c63af144e456745e10101eb67361050196b0b6af5d717254dddf7", size = 109082, upload-time = "2026-03-25T20:21:50.506Z" }, + { url = "https://files.pythonhosted.org/packages/d5/2f/702d5e05b227401c1068f0d386d79a589bb12bf64c3d2c72ce0631e3bc49/tomli-2.4.1-cp314-cp314-win_arm64.whl", hash = "sha256:b8c198f8c1805dc42708689ed6864951fd2494f924149d3e4bce7710f8eb5232", size = 96490, upload-time = "2026-03-25T20:21:51.474Z" }, + { url = "https://files.pythonhosted.org/packages/45/4b/b877b05c8ba62927d9865dd980e34a755de541eb65fffba52b4cc495d4d2/tomli-2.4.1-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:d4d8fe59808a54658fcc0160ecfb1b30f9089906c50b23bcb4c69eddc19ec2b4", size = 164263, upload-time = "2026-03-25T20:21:52.543Z" }, + { url = "https://files.pythonhosted.org/packages/24/79/6ab420d37a270b89f7195dec5448f79400d9e9c1826df982f3f8e97b24fd/tomli-2.4.1-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:7008df2e7655c495dd12d2a4ad038ff878d4ca4b81fccaf82b714e07eae4402c", size = 160736, upload-time = "2026-03-25T20:21:53.674Z" }, + { url = "https://files.pythonhosted.org/packages/02/e0/3630057d8eb170310785723ed5adcdfb7d50cb7e6455f85ba8a3deed642b/tomli-2.4.1-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1d8591993e228b0c930c4bb0db464bdad97b3289fb981255d6c9a41aedc84b2d", size = 270717, upload-time = "2026-03-25T20:21:55.129Z" }, + { url = "https://files.pythonhosted.org/packages/7a/b4/1613716072e544d1a7891f548d8f9ec6ce2faf42ca65acae01d76ea06bb0/tomli-2.4.1-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:734e20b57ba95624ecf1841e72b53f6e186355e216e5412de414e3c51e5e3c41", size = 278461, upload-time = "2026-03-25T20:21:56.228Z" }, + { url = "https://files.pythonhosted.org/packages/05/38/30f541baf6a3f6df77b3df16b01ba319221389e2da59427e221ef417ac0c/tomli-2.4.1-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:8a650c2dbafa08d42e51ba0b62740dae4ecb9338eefa093aa5c78ceb546fcd5c", size = 274855, upload-time = "2026-03-25T20:21:57.653Z" }, + { url = "https://files.pythonhosted.org/packages/77/a3/ec9dd4fd2c38e98de34223b995a3b34813e6bdadf86c75314c928350ed14/tomli-2.4.1-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:504aa796fe0569bb43171066009ead363de03675276d2d121ac1a4572397870f", size = 283144, upload-time = "2026-03-25T20:21:59.089Z" }, + { url = "https://files.pythonhosted.org/packages/ef/be/605a6261cac79fba2ec0c9827e986e00323a1945700969b8ee0b30d85453/tomli-2.4.1-cp314-cp314t-win32.whl", hash = "sha256:b1d22e6e9387bf4739fbe23bfa80e93f6b0373a7f1b96c6227c32bef95a4d7a8", size = 108683, upload-time = "2026-03-25T20:22:00.214Z" }, + { url = "https://files.pythonhosted.org/packages/12/64/da524626d3b9cc40c168a13da8335fe1c51be12c0a63685cc6db7308daae/tomli-2.4.1-cp314-cp314t-win_amd64.whl", hash = "sha256:2c1c351919aca02858f740c6d33adea0c5deea37f9ecca1cc1ef9e884a619d26", size = 121196, upload-time = "2026-03-25T20:22:01.169Z" }, + { url = "https://files.pythonhosted.org/packages/5a/cd/e80b62269fc78fc36c9af5a6b89c835baa8af28ff5ad28c7028d60860320/tomli-2.4.1-cp314-cp314t-win_arm64.whl", hash = "sha256:eab21f45c7f66c13f2a9e0e1535309cee140182a9cdae1e041d02e47291e8396", size = 100393, upload-time = "2026-03-25T20:22:02.137Z" }, + { url = "https://files.pythonhosted.org/packages/7b/61/cceae43728b7de99d9b847560c262873a1f6c98202171fd5ed62640b494b/tomli-2.4.1-py3-none-any.whl", hash = "sha256:0d85819802132122da43cb86656f8d1f8c6587d54ae7dcaf30e90533028b49fe", size = 14583, upload-time = "2026-03-25T20:22:03.012Z" }, +] + +[[package]] +name = "tqdm" +version = "4.67.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "colorama", marker = "sys_platform == 'win32'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/09/a9/6ba95a270c6f1fbcd8dac228323f2777d886cb206987444e4bce66338dd4/tqdm-4.67.3.tar.gz", hash = "sha256:7d825f03f89244ef73f1d4ce193cb1774a8179fd96f31d7e1dcde62092b960bb", size = 169598, upload-time = "2026-02-03T17:35:53.048Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/16/e1/3079a9ff9b8e11b846c6ac5c8b5bfb7ff225eee721825310c91b3b50304f/tqdm-4.67.3-py3-none-any.whl", hash = "sha256:ee1e4c0e59148062281c49d80b25b67771a127c85fc9676d3be5f243206826bf", size = 78374, upload-time = "2026-02-03T17:35:50.982Z" }, +] + +[[package]] +name = "typing-extensions" +version = "4.15.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/72/94/1a15dd82efb362ac84269196e94cf00f187f7ed21c242792a923cdb1c61f/typing_extensions-4.15.0.tar.gz", hash = "sha256:0cea48d173cc12fa28ecabc3b837ea3cf6f38c6d1136f85cbaaf598984861466", size = 109391, upload-time = "2025-08-25T13:49:26.313Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/18/67/36e9267722cc04a6b9f15c7f3441c2363321a3ea07da7ae0c0707beb2a9c/typing_extensions-4.15.0-py3-none-any.whl", hash = "sha256:f0fa19c6845758ab08074a0cfa8b7aecb71c999ca73d62883bc25cc018c4e548", size = 44614, upload-time = "2025-08-25T13:49:24.86Z" }, +] + +[[package]] +name = "typing-inspection" +version = "0.4.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/55/e3/70399cb7dd41c10ac53367ae42139cf4b1ca5f36bb3dc6c9d33acdb43655/typing_inspection-0.4.2.tar.gz", hash = "sha256:ba561c48a67c5958007083d386c3295464928b01faa735ab8547c5692e87f464", size = 75949, upload-time = "2025-10-01T02:14:41.687Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/dc/9b/47798a6c91d8bdb567fe2698fe81e0c6b7cb7ef4d13da4114b41d239f65d/typing_inspection-0.4.2-py3-none-any.whl", hash = "sha256:4ed1cacbdc298c220f1bd249ed5287caa16f34d44ef4e9c3d0cbad5b521545e7", size = 14611, upload-time = "2025-10-01T02:14:40.154Z" }, +] + +[[package]] +name = "urllib3" +version = "2.6.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/c7/24/5f1b3bdffd70275f6661c76461e25f024d5a38a46f04aaca912426a2b1d3/urllib3-2.6.3.tar.gz", hash = "sha256:1b62b6884944a57dbe321509ab94fd4d3b307075e0c2eae991ac71ee15ad38ed", size = 435556, upload-time = "2026-01-07T16:24:43.925Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/39/08/aaaad47bc4e9dc8c725e68f9d04865dbcb2052843ff09c97b08904852d84/urllib3-2.6.3-py3-none-any.whl", hash = "sha256:bf272323e553dfb2e87d9bfd225ca7b0f467b919d7bbd355436d3fd37cb0acd4", size = 131584, upload-time = "2026-01-07T16:24:42.685Z" }, +] + +[[package]] +name = "uvicorn" +version = "0.42.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "click" }, + { name = "h11" }, + { name = "typing-extensions", marker = "python_full_version < '3.11'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/e3/ad/4a96c425be6fb67e0621e62d86c402b4a17ab2be7f7c055d9bd2f638b9e2/uvicorn-0.42.0.tar.gz", hash = "sha256:9b1f190ce15a2dd22e7758651d9b6d12df09a13d51ba5bf4fc33c383a48e1775", size = 85393, upload-time = "2026-03-16T06:19:50.077Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0a/89/f8827ccff89c1586027a105e5630ff6139a64da2515e24dafe860bd9ae4d/uvicorn-0.42.0-py3-none-any.whl", hash = "sha256:96c30f5c7abe6f74ae8900a70e92b85ad6613b745d4879eb9b16ccad15645359", size = 68830, upload-time = "2026-03-16T06:19:48.325Z" }, +]