File size: 33,769 Bytes
25be136
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
#!/usr/bin/env python3
import json
from pathlib import Path


OUT = Path(__file__).resolve().parents[1] / "data" / "cases.jsonl"


def env_cases():
    services = [
        ("database", "db-staging.internal", "db-prod.internal", "db-dev.internal"),
        ("redis cache", "redis-staging.internal", "redis-prod.internal", "redis-dev.internal"),
        ("object storage bucket", "assets-staging", "assets-prod", "assets-dev"),
        ("email sender", "ses-staging", "ses-prod", "ses-dev"),
        ("nats cluster", "nats-staging.internal", "nats-prod.internal", "nats-dev.internal"),
        ("API base URL", "https://api-staging.ecue.ai", "https://api.ecue.ai", "https://api-dev.ecue.ai"),
        ("metrics backend", "vm-staging.internal", "vm-prod.internal", "vm-dev.internal"),
        ("auth issuer", "https://auth-staging.ecue.ai", "https://auth.ecue.ai", "https://auth-dev.ecue.ai"),
        ("artifact bucket", "artifacts-staging", "artifacts-prod", "artifacts-dev"),
        ("search index", "meili-staging.internal", "meili-prod.internal", "meili-dev.internal"),
    ]
    env_queries = [
        ("staging", "production", "development"),
        ("production", "staging", "development"),
        ("development", "production", "staging"),
    ]
    cases = []
    idx = 1
    for service, a, b, c in services:
        for target, distractor, other in env_queries:
            correct = {"staging": a, "production": b, "development": c}[target]
            wrong = {"staging": b, "production": a, "development": a}[target]
            other_val = {"staging": c, "production": c, "development": b}[target]
            cases.append({
                "id": f"env-{idx:03d}",
                "query": f"What {service} does {target} use?",
                "adversary_type": "environment_swap",
                "entries": [
                    entry("e1", f"The {target} environment uses {correct} for its {service}.", [target, normalize(service), "infra"], 1),
                    entry("e2", f"The {distractor} environment uses {wrong} for its {service}.", [distractor, normalize(service), "infra"], 1),
                    entry("e3", f"The {other} environment uses {other_val} for its {service}.", [other, normalize(service), "infra"], 2),
                    entry("e4", f"The {target} environment rotates credentials weekly for the {service}.", [target, normalize(service), "ops"], 2),
                    entry("e5", f"The {target} email sender is ses-{target} for notification traffic.", [target, "email", "infra"], 3),
                ],
                "relevant_ids": ["e1"],
                "notes": "Environment swap distractors with same service family.",
            })
            idx += 1
    return cases


def env_cases_extra():
    rows = [
        ("staging", "feature flag service", "flags-staging.internal", "flags-prod.internal", "flags-dev.internal"),
        ("production", "feature flag service", "flags-prod.internal", "flags-staging.internal", "flags-dev.internal"),
        ("staging", "artifact registry", "registry-staging.internal", "registry-prod.internal", "registry-dev.internal"),
        ("production", "artifact registry", "registry-prod.internal", "registry-staging.internal", "registry-dev.internal"),
        ("development", "artifact registry", "registry-dev.internal", "registry-prod.internal", "registry-staging.internal"),
        ("staging", "workflow queue", "queue-staging.internal", "queue-prod.internal", "queue-dev.internal"),
        ("production", "workflow queue", "queue-prod.internal", "queue-staging.internal", "queue-dev.internal"),
        ("development", "workflow queue", "queue-dev.internal", "queue-prod.internal", "queue-staging.internal"),
        ("staging", "analytics endpoint", "analytics-staging.internal", "analytics-prod.internal", "analytics-dev.internal"),
        ("production", "analytics endpoint", "analytics-prod.internal", "analytics-staging.internal", "analytics-dev.internal"),
        ("development", "analytics endpoint", "analytics-dev.internal", "analytics-prod.internal", "analytics-staging.internal"),
        ("staging", "auth callback domain", "callback-staging.ecue.ai", "callback.ecue.ai", "callback-dev.ecue.ai"),
        ("production", "auth callback domain", "callback.ecue.ai", "callback-staging.ecue.ai", "callback-dev.ecue.ai"),
        ("development", "auth callback domain", "callback-dev.ecue.ai", "callback.ecue.ai", "callback-staging.ecue.ai"),
        ("staging", "payments webhook host", "payments-staging.internal", "payments-prod.internal", "payments-dev.internal"),
        ("production", "payments webhook host", "payments-prod.internal", "payments-staging.internal", "payments-dev.internal"),
        ("development", "payments webhook host", "payments-dev.internal", "payments-prod.internal", "payments-staging.internal"),
        ("staging", "logs bucket", "logs-staging", "logs-prod", "logs-dev"),
        ("production", "logs bucket", "logs-prod", "logs-staging", "logs-dev"),
        ("development", "logs bucket", "logs-dev", "logs-prod", "logs-staging"),
    ]
    cases = []
    for idx, (target, service, correct, wrong, other) in enumerate(rows, 1):
        distractor = "production" if target != "production" else "staging"
        sibling = "development" if target != "development" else "staging"
        cases.append({
            "id": f"envx-{idx:03d}",
            "query": f"What {service} does {target} use?",
            "adversary_type": "environment_swap",
            "entries": [
                entry("e1", f"The {target} environment uses {correct} for the {service}.", [target, normalize(service), "infra"], 1),
                entry("e2", f"The {distractor} environment uses {wrong} for the {service}.", [distractor, normalize(service), "infra"], 1),
                entry("e3", f"The {sibling} environment uses {other} for the {service}.", [sibling, normalize(service), "infra"], 2),
                entry("e4", f"The {target} runbook mentions how to rotate secrets for the {service}.", [target, normalize(service), "runbook"], 3),
            ],
            "relevant_ids": ["e1"],
            "notes": "Extra environment confusion case.",
        })
    return cases


def entity_cases():
    pairs = [
        ("Caroline", "Catherine", "oat milk", "black tea"),
        ("Jordan", "Jordyn", "standing desk", "ergonomic chair"),
        ("Alicia", "Alice", "Postgres", "MySQL"),
        ("Mika", "Mila", "vim", "emacs"),
        ("Darren", "Dorian", "dark mode", "light mode"),
        ("Riley", "Ryan", "GraphQL", "REST"),
        ("Marina", "Maria", "daily standups", "weekly status docs"),
        ("Talia", "Tanya", "Go modules", "Bazel workspaces"),
        ("Nolan", "Noah", "CUDA containers", "CPU-only builds"),
        ("Elena", "Alina", "Linear", "Jira"),
        ("Sonia", "Sonya", "dark roast", "green tea"),
        ("Maren", "Karen", "Nix", "Docker Compose"),
    ]
    cases = []
    idx = 1
    for a, b, pref, alt in pairs:
        cases.append({
            "id": f"entity-{idx:03d}",
            "query": f"What does {a} prefer?",
            "adversary_type": "entity_swap",
            "entries": [
                entry("e1", f"{a} prefers {pref} for daily work.", [normalize(a), "preference"], 1),
                entry("e2", f"{b} prefers {pref} for daily work.", [normalize(b), "preference"], 1),
                entry("e3", f"{a} also likes {alt} in some situations.", [normalize(a), "preference"], 2),
                entry("e4", f"The team discussed {pref} and {alt} during planning.", ["team", "planning"], 3),
                entry("e5", f"{a} wrote a migration note about deployment safety.", [normalize(a), "deployment"], 3),
            ],
            "relevant_ids": ["e1"],
            "notes": "Entity swap with near-name collision and shared preference value.",
        })
        idx += 1
    return cases


def entity_cases_extra():
    rows = [
        ("Colin", "Collin", "sourdough bread", "rice noodles"),
        ("Amira", "Amara", "linear issue tracker", "spreadsheet backlog"),
        ("Devon", "Devin", "kubernetes", "nomad"),
        ("Selena", "Sienna", "elm", "react"),
        ("Tristan", "Tristen", "standing meetings", "async updates"),
        ("Nadia", "Naomi", "matcha", "espresso"),
        ("Harper", "Harold", "tailscale", "wireguard"),
        ("Leona", "Lenora", "postgresql", "sqlite"),
        ("Mason", "Marlon", "weekly demos", "monthly demos"),
        ("Tessa", "Teresa", "dark theme", "solarized light"),
        ("Gideon", "Gillian", "helm charts", "terraform modules"),
        ("Priya", "Priyanka", "quiet keyboards", "clicky keyboards"),
        ("Evan", "Ivan", "gpu builds", "cpu builds"),
        ("Lena", "Lina", "shortbread cookies", "ginger cookies"),
        ("Marco", "Marek", "clickhouse", "bigquery"),
        ("Keira", "Kiera", "rss feeds", "email newsletters"),
        ("Brennan", "Brendan", "obsidian", "logseq"),
        ("Farah", "Fiona", "blueberry yogurt", "plain yogurt"),
        ("Noelle", "Noel", "go test", "pytest"),
        ("Soren", "Sorin", "daily walks", "stationary bike"),
    ]
    cases = []
    for idx, (a, b, pref, alt) in enumerate(rows, 1):
        cases.append({
            "id": f"entityx-{idx:03d}",
            "query": f"What does {a} prefer?",
            "adversary_type": "entity_swap",
            "entries": [
                entry("e1", f"{a} prefers {pref} for everyday use.", [normalize(a), "preference"], 1),
                entry("e2", f"{b} prefers {pref} for everyday use.", [normalize(b), "preference"], 1),
                entry("e3", f"{a} sometimes uses {alt} instead.", [normalize(a), "preference"], 2),
                entry("e4", f"The team discussed {pref} and {alt} during planning.", ["team", "planning"], 3),
            ],
            "relevant_ids": ["e1"],
            "notes": "Extra entity confusion case.",
        })
    return cases


def time_cases():
    events = [
        ("migrate auth to bearer tokens", "March", "May"),
        ("split staging and production domains", "April", "June"),
        ("move Redis into a private subnet", "January", "February"),
        ("ship the reporting dashboard", "July", "August"),
        ("enable GPU inference in Docker", "September", "October"),
        ("rotate the SES credentials", "November", "December"),
        ("switch the default embedded model to bge-small", "February", "April"),
        ("publish the public GHCR image", "May", "July"),
        ("replace the handwritten MCP server", "June", "August"),
        ("introduce depth-aware ranking", "October", "December"),
        ("remove the TUI surface", "January", "March"),
        ("generalize the tagger pipeline", "April", "September"),
    ]
    cases = []
    idx = 1
    for event, correct_month, wrong_month in events:
        cases.append({
            "id": f"time-{idx:03d}",
            "query": f"When did we {event}?",
            "adversary_type": "time_swap",
            "entries": [
                entry("e1", f"We {event} in {correct_month}.", ["timeline", normalize(event)], 1, timestamp=f"2026-{month_num(correct_month)}-03"),
                entry("e2", f"We {event} in {wrong_month}.", ["timeline", normalize(event)], 1, timestamp=f"2026-{month_num(wrong_month)}-04"),
                entry("e3", f"We discussed how to {event} throughout {correct_month} planning.", ["planning", normalize(event)], 2),
                entry("e4", f"We completed documentation for that change in {wrong_month}.", ["docs", normalize(event)], 2),
            ],
            "relevant_ids": ["e1"],
            "notes": "Same event with swapped month distractor.",
        })
        idx += 1
    return cases


def time_cases_extra():
    rows = [
        ("publish the first public image", "January", "March"),
        ("rename the graph package to taggraph", "February", "April"),
        ("switch OpenCode to the official MCP SDK", "May", "July"),
        ("remove the terminal UI", "June", "August"),
        ("set bge-small as the default embedded model", "September", "November"),
        ("introduce proper-noun tag extraction", "October", "December"),
        ("generalize the Docker runtime image", "March", "May"),
        ("fix anonymous GHCR pulls", "April", "June"),
        ("drop tier terminology from the interface", "July", "September"),
        ("move from topicgraph to taggraph", "August", "October"),
        ("add benchmark charts", "November", "January"),
        ("publish the adversarial benchmark skeleton", "December", "February"),
        ("introduce depth-aware ranking bias", "January", "March"),
        ("refocus the README benchmark section", "February", "April"),
        ("switch the package image name to generic tagmem", "May", "July"),
        ("replace the shell-wrapper MCP path", "June", "August"),
        ("add scripted ingest integration tests", "September", "November"),
        ("add benchmark raw artifact publishing", "October", "December"),
        ("make the public repo visible", "March", "May"),
        ("publish the refreshed runtime image", "April", "June"),
    ]
    cases = []
    for idx, (event, correct_month, wrong_month) in enumerate(rows, 1):
        cases.append({
            "id": f"timex-{idx:03d}",
            "query": f"When did we {event}?",
            "adversary_type": "time_swap",
            "entries": [
                entry("e1", f"We {event} in {correct_month}.", [normalize(event), "timeline"], 1, timestamp=f"2026-{month_num(correct_month)}-03"),
                entry("e2", f"We {event} in {wrong_month}.", [normalize(event), "timeline"], 1, timestamp=f"2026-{month_num(wrong_month)}-04"),
                entry("e3", f"We planned to {event} throughout {correct_month}.", [normalize(event), "planning"], 2),
            ],
            "relevant_ids": ["e1"],
            "notes": "Extra time-swap case.",
        })
    return cases


def state_cases():
    facts = [
        ("current production domain", "ecue.ai", "hifidelityai.com", "preview.ecue.ai"),
        ("default embed model", "bge-small-en-v1.5", "all-MiniLM-L6-v2", "bge-base-en-v1.5"),
        ("current staging database", "db-staging.internal", "db-old-staging.internal", "db-preview.internal"),
        ("current metrics backend", "VictoriaMetrics", "Prometheus", "InfluxDB"),
        ("current mail sender", "ses-prod", "ses-old", "ses-preview"),
        ("current OpenCode MCP name", "tagmem", "tagmem_active", "mempalace_active"),
        ("current published image", "ghcr.io/codysnider/tagmem", "ghcr.io/codysnider/tagmem-opencode", "ghcr.io/codysnider/tagmem-preview"),
        ("current local data root", "$HOME/.local/share/tagmem", "/data/tagmem", "/srv/tagmem"),
        ("current vector backend", "chromem-go", "ChromaDB", "SQLite FTS"),
        ("current graph package name", "taggraph", "topicgraph", "memorygraph"),
    ]
    cases = []
    idx = 1
    for label, current, old, sibling in facts:
        cases.append({
            "id": f"state-{idx:03d}",
            "query": f"What is the {label}?",
            "adversary_type": "state_update",
            "entries": [
                entry("e1", f"The {label} is {current}.", [normalize(label), "current"], 0, timestamp="2026-04-01"),
                entry("e2", f"The {label} used to be {old}.", [normalize(label), "historical"], 2, timestamp="2025-10-01"),
                entry("e3", f"The staging or preview equivalent is {sibling}.", [normalize(label), "preview"], 2, timestamp="2026-04-01"),
                entry("e4", f"We updated the runbook after changing the {label}.", [normalize(label), "runbook"], 3),
            ],
            "relevant_ids": ["e1"],
            "notes": "Current fact vs stale fact and sibling environment distractor.",
        })
        idx += 1
    return cases


def state_cases_extra():
    rows = [
        ("current benchmark default model", "bge-small-en-v1.5", "all-MiniLM-L6-v2", "bge-base-en-v1.5"),
        ("current OpenCode MCP name", "tagmem", "tagmem_active", "mempalace_active"),
        ("current Dockerfile name", "Dockerfile.runtime", "Dockerfile.opencode", "Dockerfile.dev"),
        ("current GHCR image", "ghcr.io/codysnider/tagmem", "ghcr.io/codysnider/tagmem-opencode", "ghcr.io/codysnider/tagmem-preview"),
        ("current benchmark data root", "$HOME/.local/share/tagmem", "/data/tagmem", "/srv/tagmem"),
        ("current public MCP prefix", "tagmem_", "tiered_memory_", "memory_"),
        ("current graph package name", "taggraph", "topicgraph", "memorygraph"),
        ("current default acceleration mode", "auto", "cuda", "cpu"),
        ("current install preference", "Docker", "local go build", "manual source edits"),
        ("current CLI primary surface", "CLI and MCP", "TUI and MCP", "TUI only"),
        ("current default runtime image", "tagmem:latest", "tagmem-opencode:latest", "tagmem-dev:latest"),
        ("current published benchmark file path", "benchmarks/raw/bge-small-en-v1.5", "bench-results-live", "results/live"),
        ("current package host", "ghcr.io", "docker.io", "quay.io"),
        ("current repo owner", "codysnider", "lhl", "openai"),
        ("current benchmark category count", "5", "4", "6"),
        ("current benchmark default device", "GPU", "CPU", "TPU"),
        ("current memory grouping model", "tags plus depth", "rooms plus wings", "folders only"),
        ("current benchmark standalone project", "adversarial-memory-bench", "benchmarks", "tagmem-bench"),
        ("current install wrapper root", "~/.local/share/tagmem/install", "~/.local/share/tagmem/opencode-install", "~/.cache/tagmem/install"),
        ("current doctor image behavior", "generic runtime image", "OpenCode-only image", "source-only binary"),
    ]
    cases = []
    for idx, (label, current, old, sibling) in enumerate(rows, 1):
        cases.append({
            "id": f"statex-{idx:03d}",
            "query": f"What is the {label}?",
            "adversary_type": "state_update",
            "entries": [
                entry("e1", f"The {label} is {current}.", [normalize(label), "current"], 0, timestamp="2026-04-10"),
                entry("e2", f"The {label} used to be {old}.", [normalize(label), "historical"], 2, timestamp="2025-12-01"),
                entry("e3", f"A related alternative is {sibling}.", [normalize(label), "alternative"], 2, timestamp="2026-04-10"),
            ],
            "relevant_ids": ["e1"],
            "notes": "Extra state-update case.",
        })
    return cases


def speaker_cases():
    topics = [
        ("Terraform environments", "separate Terraform states for shared, production, and staging", "one Terraform state with workspaces"),
        ("tagging pipeline", "use deterministic extraction first and embedding ranking second", "let the model invent tags directly"),
        ("OpenCode integration", "run the image directly with the mcp subcommand", "wrap everything in a shell pipeline"),
        ("release packaging", "publish ghcr.io/codysnider/tagmem as the generic image", "ship only a local Docker build"),
        ("depth model", "treat depth as a secondary ranking bias", "replace tags with rigid hierarchical folders"),
        ("README benchmarks", "keep a compact benchmark table in the main README", "put the full benchmark suite in the top section"),
        ("Docker data root", "use HOME-local defaults with an env override", "hardcode everything to /data/tagmem"),
        ("MCP compatibility", "use the official Go SDK", "hand-roll every transport detail"),
        ("adversarial benchmark repo", "split it into a standalone project", "bury it inside the main app repo"),
        ("OpenCode command naming", "use remember instead of mine", "keep mine because users will figure it out"),
    ]
    cases = []
    idx = 1
    for subject, assistant_text, user_text in topics:
        cases.append({
            "id": f"speaker-{idx:03d}",
            "query": f"What did you suggest for {subject}?",
            "adversary_type": "speaker_swap",
            "entries": [
                entry("e1", f"You suggested {assistant_text}.", [normalize(subject), "assistant", "suggestion"], 1, speaker="assistant"),
                entry("e2", f"I suggested {user_text}.", [normalize(subject), "user", "suggestion"], 1, speaker="user"),
                entry("e3", f"We discussed {subject} implementation details later.", [normalize(subject), "discussion"], 2),
                entry("e4", f"The team wrote follow-up notes about {subject}.", [normalize(subject), "notes"], 3),
            ],
            "relevant_ids": ["e1"],
            "notes": "Assistant/user suggestion swap with same subject area.",
        })
        idx += 1
    return cases


def speaker_cases_extra():
    rows = [
        ("adversarial benchmark design", "use exact relevant ids and plausible distractors", "just score generated answers"),
        ("Docker image naming", "make the image generic, not OpenCode-specific", "keep the image named tagmem-opencode forever"),
        ("OpenCode commands", "rename mine to remember", "keep mine because users already know it"),
        ("tagging pipeline", "use deterministic extraction before model ranking", "let the model invent every tag from scratch"),
        ("README benchmarks", "keep the main README comparison compact", "put every benchmark detail at the top of the README"),
        ("public package visibility", "verify anonymous GHCR pulls from another machine", "assume package visibility from the maintainer machine"),
        ("OpenCode MCP implementation", "use the official Go SDK", "keep hand-rolling the protocol indefinitely"),
        ("TUI removal", "remove it completely for this phase", "leave the broken TUI in place because it might improve later"),
        ("depth model", "treat depth as a ranking bias, not the main organizer", "replace tags with depth-only buckets"),
        ("GPU support", "allow GPU if available and fall back to CPU", "ban GPU because purity matters more than user experience"),
        ("docs cleanup", "remove personal machine paths from public docs", "leave local paths because maintainers can mentally translate them"),
        ("fresh install testing", "test from another machine over SSH", "assume local success implies public install success"),
        ("image publish process", "publish generic ghcr.io/codysnider/tagmem", "ship only a local image and tell users to rebuild it"),
        ("memory terminology", "use entries, tags, depth, facts, diary", "invent new metaphor-heavy nouns"),
        ("public benchmark reporting", "include methodology, machine specs, and raw JSON", "quote one headline number and hide everything else"),
        ("OpenAI-compatible support", "keep it generic rather than naming one specific server", "hard-code it as an Ollama feature"),
        ("CLI install docs", "prefer Docker-first instructions", "assume everyone wants go install first"),
        ("graph naming", "rename topicgraph to taggraph", "keep topicgraph because it is only internal"),
        ("CPU fallback verification", "prove it with real add/search on a remote machine", "just trust the doctor output"),
        ("benchmark expansion", "add adversarial distractor cases as a standalone project", "bury the benchmark data inside the main repo forever"),
    ]
    cases = []
    for idx, (subject, assistant_text, user_text) in enumerate(rows, 1):
        cases.append({
            "id": f"speakerx-{idx:03d}",
            "query": f"What did you suggest for {subject}?",
            "adversary_type": "speaker_swap",
            "entries": [
                entry("e1", f"You suggested {assistant_text}.", [normalize(subject), "assistant", "suggestion"], 1, speaker="assistant"),
                entry("e2", f"I suggested {user_text}.", [normalize(subject), "user", "suggestion"], 1, speaker="user"),
                entry("e3", f"We discussed {subject} again in follow-up planning.", [normalize(subject), "discussion"], 2),
            ],
            "relevant_ids": ["e1"],
            "notes": "Extra speaker confusion case.",
        })
    return cases


def paraphrase_cases():
    items = [
        ("API calls time out after 30 seconds.", "The API timeout is 60 seconds.", "Background jobs retry for up to 30 seconds.", "What timeout do API calls use?"),
        ("Production cookies expire after 12 hours.", "Production cookies expire after 24 hours.", "Staging cookies expire after 12 hours.", "How long do production cookies last?"),
        ("The upload limit is 25 megabytes.", "The upload limit is 50 megabytes.", "The attachment limit is 25 megabytes.", "What is the upload limit?"),
        ("Auth refresh runs every 10 minutes.", "Auth refresh runs every 15 minutes.", "Metrics scraping runs every 10 minutes.", "How often does auth refresh run?"),
        ("The reporting batch runs at 02:00 UTC.", "The reporting batch runs at 03:00 UTC.", "The cleanup batch runs at 02:00 UTC.", "When does the reporting batch run?"),
        ("The public API port is 8443.", "The public API port is 9443.", "The internal admin port is 8443.", "What port does the public API use?"),
        ("The Docker data root defaults to HOME-local storage.", "The Docker data root defaults to /data/tagmem.", "The benchmark root defaults to HOME-local storage.", "What is the Docker data root default?"),
        ("The MCP tools use the tagmem_ prefix.", "The MCP tools use the tiered_memory_ prefix.", "The CLI binary is named tagmem.", "What prefix do the MCP tools use?"),
        ("The default GPU model is bge-small-en-v1.5.", "The default GPU model is bge-base-en-v1.5.", "The CPU fallback model is all-MiniLM-L6-v2.", "What is the default GPU model?"),
        ("The runtime image is ghcr.io/codysnider/tagmem.", "The runtime image is ghcr.io/codysnider/tagmem-opencode.", "The repo path is github.com/codysnider/tagmem.", "What is the runtime image name?"),
        ("The MCP server is implemented with the official Go SDK.", "The MCP server is still handwritten.", "The transport runs over stdio.", "How is the MCP server implemented?"),
        ("The local graph package is named taggraph.", "The local graph package is named topicgraph.", "The benchmark package is named tagbench.", "What is the graph package name?"),
    ]
    cases = []
    idx = 1
    for correct, wrong, neighbor, query in items:
        cases.append({
            "id": f"paraphrase-{idx:03d}",
            "query": query,
            "adversary_type": "near_duplicate_paraphrase",
            "entries": [
                entry("e1", correct, ["config", "limits"], 1),
                entry("e2", wrong, ["config", "limits"], 1),
                entry("e3", neighbor, ["jobs", "limits"], 2),
                entry("e4", f"We reviewed the policy related to: {correct}", ["policy"], 3),
            ],
            "relevant_ids": ["e1"],
            "notes": "Near-duplicate paraphrase with one wrong value distractor.",
        })
        idx += 1
    return cases


def paraphrase_cases_extra():
    rows = [
        ("The benchmark data root defaults to the home-local tagmem directory.", "The benchmark data root defaults to /data/tagmem.", "The benchmark output root defaults to the home-local tagmem directory.", "What is the benchmark data root default?"),
        ("The embedded model fallback uses the CPU runtime.", "The embedded model fallback aborts when CUDA fails.", "The doctor command reports the current runtime path.", "What happens when CUDA is unavailable?"),
        ("The MCP server runs over stdio.", "The MCP server only runs over HTTP.", "The MCP client uses a local Docker command.", "How does the MCP server communicate?"),
        ("The install wrapper clones the public GitHub repository first.", "The install wrapper only copies a local checkout.", "The runtime wrapper uses the installed repo path.", "What does the install wrapper do first?"),
        ("The README benchmark section now uses a compact comparison table.", "The README benchmark section now embeds the full benchmark methodology.", "Detailed benchmark docs live in the benchmarks folder.", "How is the benchmark section presented in the README?"),
        ("The OpenCode config should launch the generic image with the mcp subcommand.", "The OpenCode config should launch an OpenCode-specific image name.", "The tagmem image is generic and can run doctor or bench too.", "How should OpenCode launch the image?"),
        ("The graph browser should use tags instead of topics.", "The graph browser should keep topic terminology because users expect it.", "The graph package is now named taggraph.", "What should the graph browser use?"),
        ("The public image should be published to ghcr.io/codysnider/tagmem.", "The public image should stay local-only.", "The install flow can fall back to a local build if pull fails.", "Where should the public image be published?"),
        ("The main command with no arguments prints help.", "The main command with no arguments opens a TUI.", "The TUI was removed from the project surface.", "What happens when tagmem runs without a command?"),
        ("The standalone adversarial benchmark should live in its own repository.", "The adversarial benchmark should stay buried under the main repo only.", "The standalone benchmark can later be pushed to Hugging Face.", "Where should the adversarial benchmark live?"),
        ("The improved tagger uses deterministic extraction plus embedding ranking.", "The improved tagger relies entirely on a generative model.", "The improved tagger is aware of proper nouns and code symbols.", "How does the improved tagger work?"),
        ("The default GPU model is bge-small because it is the best overall tradeoff.", "The default GPU model is bge-base because it wins every benchmark decisively.", "MiniLM remains the throughput-first fallback.", "Why is bge-small the default GPU model?"),
        ("The MCP tool names now use the tagmem_ prefix.", "The MCP tool names still use the tiered_memory_ prefix for compatibility.", "The README lists the current tool names explicitly.", "What MCP tool prefix does the project use?"),
        ("The Docker data root can be overridden with TAGMEM_DATA_ROOT.", "The Docker data root is hardcoded to /data/tagmem.", "The default data root lives under the user's home directory.", "How do you override the Docker data root?"),
        ("The doctor command should report the execution device and runtime library.", "The doctor command only reports whether the process started.", "The doctor command is useful for checking CPU fallback.", "What should the doctor command report?"),
        ("The benchmark package should include raw JSON outputs and machine specs.", "The benchmark package should only include a headline score.", "The methodology file lists exact commands and dataset hashes.", "What belongs in the benchmark package?"),
        ("The CLI command for bringing data in is ingest, not mine.", "The CLI command for bringing data in should be called mine because it sounds more creative.", "The naming should stay boring and well-understood.", "What is the correct import command name?"),
        ("Depth is secondary and tags are primary in the memory model.", "Depth should replace tags entirely as the primary organizer.", "Search may still use depth as a bias.", "How do depth and tags relate?"),
        ("The image should be generic enough to run mcp, doctor, or benchmarks.", "The image should be named specifically for OpenCode forever.", "The same image can be reused by multiple agent runtimes.", "Why is the image generic?"),
        ("A fresh-user install test should be run from another machine, not assumed from local success.", "A local maintainer machine is enough to validate public installation.", "Anonymous GHCR pulls are part of the install story.", "How should install validation be done?"),
    ]
    cases = []
    for idx, (correct, wrong, neighbor, query) in enumerate(rows, 1):
        cases.append({
            "id": f"paraphrasex-{idx:03d}",
            "query": query,
            "adversary_type": "near_duplicate_paraphrase",
            "entries": [
                entry("e1", correct, ["docs", "retrieval"], 1),
                entry("e2", wrong, ["docs", "retrieval"], 1),
                entry("e3", neighbor, ["docs", "context"], 2),
            ],
            "relevant_ids": ["e1"],
            "notes": "Extra near-duplicate paraphrase case.",
        })
    return cases


def entry(entry_id, text, tags, depth, timestamp=None, speaker=None):
    item = {"id": entry_id, "text": text, "tags": tags, "depth": depth}
    if timestamp:
        item["timestamp"] = timestamp
    if speaker:
        item["speaker"] = speaker
    return item


def normalize(text: str) -> str:
    return text.lower().replace(" ", "-").replace("/", "-")


def month_num(name: str) -> str:
    months = {
        "January": "01", "February": "02", "March": "03", "April": "04", "May": "05", "June": "06",
        "July": "07", "August": "08", "September": "09", "October": "10", "November": "11", "December": "12",
    }
    return months[name]


def main() -> int:
    cases = []
    for builder in [
        env_cases, env_cases_extra,
        entity_cases, entity_cases_extra,
        time_cases, time_cases_extra,
        state_cases, state_cases_extra,
        speaker_cases, speaker_cases_extra,
        paraphrase_cases, paraphrase_cases_extra,
    ]:
        cases.extend(builder())

    OUT.parent.mkdir(parents=True, exist_ok=True)
    with OUT.open("w") as f:
        for case in cases:
            f.write(json.dumps(case, separators=(",", ":")) + "\n")

    print(f"wrote {len(cases)} cases to {OUT}")
    return 0


if __name__ == "__main__":
    raise SystemExit(main())